]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blob - test/grsecurity-2.2.2-3.2.2-201201272014.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.2.2-3.2.2-201201272014.patch
1 diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2 index dfa6fc6..0095943 100644
3 --- a/Documentation/dontdiff
4 +++ b/Documentation/dontdiff
5 @@ -5,6 +5,7 @@
6 *.cis
7 *.cpio
8 *.csp
9 +*.dbg
10 *.dsp
11 *.dvi
12 *.elf
13 @@ -14,6 +15,7 @@
14 *.gcov
15 *.gen.S
16 *.gif
17 +*.gmo
18 *.grep
19 *.grp
20 *.gz
21 @@ -48,9 +50,11 @@
22 *.tab.h
23 *.tex
24 *.ver
25 +*.vim
26 *.xml
27 *.xz
28 *_MODULES
29 +*_reg_safe.h
30 *_vga16.c
31 *~
32 \#*#
33 @@ -70,6 +74,7 @@ Kerntypes
34 Module.markers
35 Module.symvers
36 PENDING
37 +PERF*
38 SCCS
39 System.map*
40 TAGS
41 @@ -93,19 +98,24 @@ bounds.h
42 bsetup
43 btfixupprep
44 build
45 +builtin-policy.h
46 bvmlinux
47 bzImage*
48 capability_names.h
49 capflags.c
50 classlist.h*
51 +clut_vga16.c
52 +common-cmds.h
53 comp*.log
54 compile.h*
55 conf
56 config
57 config-*
58 config_data.h*
59 +config.c
60 config.mak
61 config.mak.autogen
62 +config.tmp
63 conmakehash
64 consolemap_deftbl.c*
65 cpustr.h
66 @@ -119,6 +129,7 @@ dslm
67 elf2ecoff
68 elfconfig.h*
69 evergreen_reg_safe.h
70 +exception_policy.conf
71 fixdep
72 flask.h
73 fore200e_mkfirm
74 @@ -126,12 +137,15 @@ fore200e_pca_fw.c*
75 gconf
76 gconf.glade.h
77 gen-devlist
78 +gen-kdb_cmds.c
79 gen_crc32table
80 gen_init_cpio
81 generated
82 genheaders
83 genksyms
84 *_gray256.c
85 +hash
86 +hid-example
87 hpet_example
88 hugepage-mmap
89 hugepage-shm
90 @@ -146,7 +160,7 @@ int32.c
91 int4.c
92 int8.c
93 kallsyms
94 -kconfig
95 +kern_constants.h
96 keywords.c
97 ksym.c*
98 ksym.h*
99 @@ -154,7 +168,6 @@ kxgettext
100 lkc_defs.h
101 lex.c
102 lex.*.c
103 -linux
104 logo_*.c
105 logo_*_clut224.c
106 logo_*_mono.c
107 @@ -166,14 +179,15 @@ machtypes.h
108 map
109 map_hugetlb
110 maui_boot.h
111 -media
112 mconf
113 +mdp
114 miboot*
115 mk_elfconfig
116 mkboot
117 mkbugboot
118 mkcpustr
119 mkdep
120 +mkpiggy
121 mkprep
122 mkregtable
123 mktables
124 @@ -209,6 +223,7 @@ r300_reg_safe.h
125 r420_reg_safe.h
126 r600_reg_safe.h
127 recordmcount
128 +regdb.c
129 relocs
130 rlim_names.h
131 rn50_reg_safe.h
132 @@ -219,6 +234,7 @@ setup
133 setup.bin
134 setup.elf
135 sImage
136 +slabinfo
137 sm_tbl*
138 split-include
139 syscalltab.h
140 @@ -229,6 +245,7 @@ tftpboot.img
141 timeconst.h
142 times.h*
143 trix_boot.h
144 +user_constants.h
145 utsrelease.h*
146 vdso-syms.lds
147 vdso.lds
148 @@ -246,7 +263,9 @@ vmlinux
149 vmlinux-*
150 vmlinux.aout
151 vmlinux.bin.all
152 +vmlinux.bin.bz2
153 vmlinux.lds
154 +vmlinux.relocs
155 vmlinuz
156 voffset.h
157 vsyscall.lds
158 @@ -254,9 +273,11 @@ vsyscall_32.lds
159 wanxlfw.inc
160 uImage
161 unifdef
162 +utsrelease.h
163 wakeup.bin
164 wakeup.elf
165 wakeup.lds
166 zImage*
167 zconf.hash.c
168 +zconf.lex.c
169 zoffset.h
170 diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
171 index 81c287f..d456d02 100644
172 --- a/Documentation/kernel-parameters.txt
173 +++ b/Documentation/kernel-parameters.txt
174 @@ -1935,6 +1935,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
175 the specified number of seconds. This is to be used if
176 your oopses keep scrolling off the screen.
177
178 + pax_nouderef [X86] disables UDEREF. Most likely needed under certain
179 + virtualization environments that don't cope well with the
180 + expand down segment used by UDEREF on X86-32 or the frequent
181 + page table updates on X86-64.
182 +
183 + pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
184 +
185 pcbit= [HW,ISDN]
186
187 pcd. [PARIDE]
188 diff --git a/Makefile b/Makefile
189 index 2f684da..bf21f8d 100644
190 --- a/Makefile
191 +++ b/Makefile
192 @@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
193
194 HOSTCC = gcc
195 HOSTCXX = g++
196 -HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
197 -HOSTCXXFLAGS = -O2
198 +HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
199 +HOSTCLFAGS += $(call cc-option, -Wno-empty-body)
200 +HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
201
202 # Decide whether to build built-in, modular, or both.
203 # Normally, just do built-in.
204 @@ -407,8 +408,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exc
205 # Rules shared between *config targets and build targets
206
207 # Basic helpers built in scripts/
208 -PHONY += scripts_basic
209 -scripts_basic:
210 +PHONY += scripts_basic gcc-plugins
211 +scripts_basic: gcc-plugins
212 $(Q)$(MAKE) $(build)=scripts/basic
213 $(Q)rm -f .tmp_quiet_recordmcount
214
215 @@ -564,6 +565,46 @@ else
216 KBUILD_CFLAGS += -O2
217 endif
218
219 +ifndef DISABLE_PAX_PLUGINS
220 +ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(CC)"), y)
221 +ifndef DISABLE_PAX_CONSTIFY_PLUGIN
222 +CONSTIFY_PLUGIN := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
223 +endif
224 +ifdef CONFIG_PAX_MEMORY_STACKLEAK
225 +STACKLEAK_PLUGIN := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
226 +STACKLEAK_PLUGIN += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
227 +endif
228 +ifdef CONFIG_KALLOCSTAT_PLUGIN
229 +KALLOCSTAT_PLUGIN := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
230 +endif
231 +ifdef CONFIG_PAX_KERNEXEC_PLUGIN
232 +KERNEXEC_PLUGIN := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
233 +KERNEXEC_PLUGIN += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD)
234 +endif
235 +ifdef CONFIG_CHECKER_PLUGIN
236 +ifeq ($(call cc-ifversion, -ge, 0406, y), y)
237 +CHECKER_PLUGIN := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
238 +endif
239 +endif
240 +GCC_PLUGINS := $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN) $(KALLOCSTAT_PLUGIN) $(KERNEXEC_PLUGIN) $(CHECKER_PLUGIN)
241 +export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN
242 +ifeq ($(KBUILD_EXTMOD),)
243 +gcc-plugins:
244 + $(Q)$(MAKE) $(build)=tools/gcc
245 +else
246 +gcc-plugins: ;
247 +endif
248 +else
249 +gcc-plugins:
250 +ifeq ($(call cc-ifversion, -ge, 0405, y), y)
251 + $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
252 +else
253 + $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
254 +endif
255 + $(Q)echo "PAX_MEMORY_STACKLEAK and other features will be less secure"
256 +endif
257 +endif
258 +
259 include $(srctree)/arch/$(SRCARCH)/Makefile
260
261 ifneq ($(CONFIG_FRAME_WARN),0)
262 @@ -708,7 +749,7 @@ export mod_strip_cmd
263
264
265 ifeq ($(KBUILD_EXTMOD),)
266 -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
267 +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
268
269 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
270 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
271 @@ -932,6 +973,7 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
272
273 # The actual objects are generated when descending,
274 # make sure no implicit rule kicks in
275 +$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS)
276 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
277
278 # Handle descending into subdirectories listed in $(vmlinux-dirs)
279 @@ -941,7 +983,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
280 # Error messages still appears in the original language
281
282 PHONY += $(vmlinux-dirs)
283 -$(vmlinux-dirs): prepare scripts
284 +$(vmlinux-dirs): gcc-plugins prepare scripts
285 $(Q)$(MAKE) $(build)=$@
286
287 # Store (new) KERNELRELASE string in include/config/kernel.release
288 @@ -985,6 +1027,7 @@ prepare0: archprepare FORCE
289 $(Q)$(MAKE) $(build)=.
290
291 # All the preparing..
292 +prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS),$(KBUILD_CFLAGS))
293 prepare: prepare0
294
295 # Generate some files
296 @@ -1086,6 +1129,7 @@ all: modules
297 # using awk while concatenating to the final file.
298
299 PHONY += modules
300 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
301 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
302 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
303 @$(kecho) ' Building modules, stage 2.';
304 @@ -1101,7 +1145,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
305
306 # Target to prepare building external modules
307 PHONY += modules_prepare
308 -modules_prepare: prepare scripts
309 +modules_prepare: gcc-plugins prepare scripts
310
311 # Target to install modules
312 PHONY += modules_install
313 @@ -1198,6 +1242,7 @@ distclean: mrproper
314 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
315 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
316 -o -name '.*.rej' \
317 + -o -name '.*.rej' -o -name '*.so' \
318 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
319 -type f -print | xargs rm -f
320
321 @@ -1358,6 +1403,7 @@ PHONY += $(module-dirs) modules
322 $(module-dirs): crmodverdir $(objtree)/Module.symvers
323 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
324
325 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
326 modules: $(module-dirs)
327 @$(kecho) ' Building modules, stage 2.';
328 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
329 @@ -1484,17 +1530,19 @@ else
330 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
331 endif
332
333 -%.s: %.c prepare scripts FORCE
334 +%.s: KBUILD_CFLAGS += $(GCC_PLUGINS)
335 +%.s: %.c gcc-plugins prepare scripts FORCE
336 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
337 %.i: %.c prepare scripts FORCE
338 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
339 -%.o: %.c prepare scripts FORCE
340 +%.o: KBUILD_CFLAGS += $(GCC_PLUGINS)
341 +%.o: %.c gcc-plugins prepare scripts FORCE
342 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
343 %.lst: %.c prepare scripts FORCE
344 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
345 -%.s: %.S prepare scripts FORCE
346 +%.s: %.S gcc-plugins prepare scripts FORCE
347 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
348 -%.o: %.S prepare scripts FORCE
349 +%.o: %.S gcc-plugins prepare scripts FORCE
350 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
351 %.symtypes: %.c prepare scripts FORCE
352 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
353 @@ -1504,11 +1552,13 @@ endif
354 $(cmd_crmodverdir)
355 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
356 $(build)=$(build-dir)
357 -%/: prepare scripts FORCE
358 +%/: KBUILD_CFLAGS += $(GCC_PLUGINS)
359 +%/: gcc-plugins prepare scripts FORCE
360 $(cmd_crmodverdir)
361 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
362 $(build)=$(build-dir)
363 -%.ko: prepare scripts FORCE
364 +%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS)
365 +%.ko: gcc-plugins prepare scripts FORCE
366 $(cmd_crmodverdir)
367 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
368 $(build)=$(build-dir) $(@:.ko=.o)
369 diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
370 index da5449e..7418343 100644
371 --- a/arch/alpha/include/asm/elf.h
372 +++ b/arch/alpha/include/asm/elf.h
373 @@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
374
375 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
376
377 +#ifdef CONFIG_PAX_ASLR
378 +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
379 +
380 +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
381 +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
382 +#endif
383 +
384 /* $0 is set by ld.so to a pointer to a function which might be
385 registered using atexit. This provides a mean for the dynamic
386 linker to call DT_FINI functions for shared libraries that have
387 diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
388 index de98a73..bd4f1f8 100644
389 --- a/arch/alpha/include/asm/pgtable.h
390 +++ b/arch/alpha/include/asm/pgtable.h
391 @@ -101,6 +101,17 @@ struct vm_area_struct;
392 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
393 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
394 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
395 +
396 +#ifdef CONFIG_PAX_PAGEEXEC
397 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
398 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
399 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
400 +#else
401 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
402 +# define PAGE_COPY_NOEXEC PAGE_COPY
403 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
404 +#endif
405 +
406 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
407
408 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
409 diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
410 index 2fd00b7..cfd5069 100644
411 --- a/arch/alpha/kernel/module.c
412 +++ b/arch/alpha/kernel/module.c
413 @@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
414
415 /* The small sections were sorted to the end of the segment.
416 The following should definitely cover them. */
417 - gp = (u64)me->module_core + me->core_size - 0x8000;
418 + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
419 got = sechdrs[me->arch.gotsecindex].sh_addr;
420
421 for (i = 0; i < n; i++) {
422 diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
423 index 01e8715..be0e80f 100644
424 --- a/arch/alpha/kernel/osf_sys.c
425 +++ b/arch/alpha/kernel/osf_sys.c
426 @@ -1147,7 +1147,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
427 /* At this point: (!vma || addr < vma->vm_end). */
428 if (limit - len < addr)
429 return -ENOMEM;
430 - if (!vma || addr + len <= vma->vm_start)
431 + if (check_heap_stack_gap(vma, addr, len))
432 return addr;
433 addr = vma->vm_end;
434 vma = vma->vm_next;
435 @@ -1183,6 +1183,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
436 merely specific addresses, but regions of memory -- perhaps
437 this feature should be incorporated into all ports? */
438
439 +#ifdef CONFIG_PAX_RANDMMAP
440 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
441 +#endif
442 +
443 if (addr) {
444 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
445 if (addr != (unsigned long) -ENOMEM)
446 @@ -1190,8 +1194,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
447 }
448
449 /* Next, try allocating at TASK_UNMAPPED_BASE. */
450 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
451 - len, limit);
452 + addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
453 +
454 if (addr != (unsigned long) -ENOMEM)
455 return addr;
456
457 diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
458 index fadd5f8..904e73a 100644
459 --- a/arch/alpha/mm/fault.c
460 +++ b/arch/alpha/mm/fault.c
461 @@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
462 __reload_thread(pcb);
463 }
464
465 +#ifdef CONFIG_PAX_PAGEEXEC
466 +/*
467 + * PaX: decide what to do with offenders (regs->pc = fault address)
468 + *
469 + * returns 1 when task should be killed
470 + * 2 when patched PLT trampoline was detected
471 + * 3 when unpatched PLT trampoline was detected
472 + */
473 +static int pax_handle_fetch_fault(struct pt_regs *regs)
474 +{
475 +
476 +#ifdef CONFIG_PAX_EMUPLT
477 + int err;
478 +
479 + do { /* PaX: patched PLT emulation #1 */
480 + unsigned int ldah, ldq, jmp;
481 +
482 + err = get_user(ldah, (unsigned int *)regs->pc);
483 + err |= get_user(ldq, (unsigned int *)(regs->pc+4));
484 + err |= get_user(jmp, (unsigned int *)(regs->pc+8));
485 +
486 + if (err)
487 + break;
488 +
489 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
490 + (ldq & 0xFFFF0000U) == 0xA77B0000U &&
491 + jmp == 0x6BFB0000U)
492 + {
493 + unsigned long r27, addr;
494 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
495 + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
496 +
497 + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
498 + err = get_user(r27, (unsigned long *)addr);
499 + if (err)
500 + break;
501 +
502 + regs->r27 = r27;
503 + regs->pc = r27;
504 + return 2;
505 + }
506 + } while (0);
507 +
508 + do { /* PaX: patched PLT emulation #2 */
509 + unsigned int ldah, lda, br;
510 +
511 + err = get_user(ldah, (unsigned int *)regs->pc);
512 + err |= get_user(lda, (unsigned int *)(regs->pc+4));
513 + err |= get_user(br, (unsigned int *)(regs->pc+8));
514 +
515 + if (err)
516 + break;
517 +
518 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
519 + (lda & 0xFFFF0000U) == 0xA77B0000U &&
520 + (br & 0xFFE00000U) == 0xC3E00000U)
521 + {
522 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
523 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
524 + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
525 +
526 + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
527 + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
528 + return 2;
529 + }
530 + } while (0);
531 +
532 + do { /* PaX: unpatched PLT emulation */
533 + unsigned int br;
534 +
535 + err = get_user(br, (unsigned int *)regs->pc);
536 +
537 + if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
538 + unsigned int br2, ldq, nop, jmp;
539 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
540 +
541 + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
542 + err = get_user(br2, (unsigned int *)addr);
543 + err |= get_user(ldq, (unsigned int *)(addr+4));
544 + err |= get_user(nop, (unsigned int *)(addr+8));
545 + err |= get_user(jmp, (unsigned int *)(addr+12));
546 + err |= get_user(resolver, (unsigned long *)(addr+16));
547 +
548 + if (err)
549 + break;
550 +
551 + if (br2 == 0xC3600000U &&
552 + ldq == 0xA77B000CU &&
553 + nop == 0x47FF041FU &&
554 + jmp == 0x6B7B0000U)
555 + {
556 + regs->r28 = regs->pc+4;
557 + regs->r27 = addr+16;
558 + regs->pc = resolver;
559 + return 3;
560 + }
561 + }
562 + } while (0);
563 +#endif
564 +
565 + return 1;
566 +}
567 +
568 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
569 +{
570 + unsigned long i;
571 +
572 + printk(KERN_ERR "PAX: bytes at PC: ");
573 + for (i = 0; i < 5; i++) {
574 + unsigned int c;
575 + if (get_user(c, (unsigned int *)pc+i))
576 + printk(KERN_CONT "???????? ");
577 + else
578 + printk(KERN_CONT "%08x ", c);
579 + }
580 + printk("\n");
581 +}
582 +#endif
583
584 /*
585 * This routine handles page faults. It determines the address,
586 @@ -131,8 +249,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
587 good_area:
588 si_code = SEGV_ACCERR;
589 if (cause < 0) {
590 - if (!(vma->vm_flags & VM_EXEC))
591 + if (!(vma->vm_flags & VM_EXEC)) {
592 +
593 +#ifdef CONFIG_PAX_PAGEEXEC
594 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
595 + goto bad_area;
596 +
597 + up_read(&mm->mmap_sem);
598 + switch (pax_handle_fetch_fault(regs)) {
599 +
600 +#ifdef CONFIG_PAX_EMUPLT
601 + case 2:
602 + case 3:
603 + return;
604 +#endif
605 +
606 + }
607 + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
608 + do_group_exit(SIGKILL);
609 +#else
610 goto bad_area;
611 +#endif
612 +
613 + }
614 } else if (!cause) {
615 /* Allow reads even for write-only mappings */
616 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
617 diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
618 index 86976d0..8a57797 100644
619 --- a/arch/arm/include/asm/atomic.h
620 +++ b/arch/arm/include/asm/atomic.h
621 @@ -239,6 +239,14 @@ typedef struct {
622 u64 __aligned(8) counter;
623 } atomic64_t;
624
625 +#ifdef CONFIG_PAX_REFCOUNT
626 +typedef struct {
627 + u64 __aligned(8) counter;
628 +} atomic64_unchecked_t;
629 +#else
630 +typedef atomic64_t atomic64_unchecked_t;
631 +#endif
632 +
633 #define ATOMIC64_INIT(i) { (i) }
634
635 static inline u64 atomic64_read(atomic64_t *v)
636 diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
637 index 0e9ce8d..6ef1e03 100644
638 --- a/arch/arm/include/asm/elf.h
639 +++ b/arch/arm/include/asm/elf.h
640 @@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
641 the loader. We need to make sure that it is out of the way of the program
642 that it will "exec", and that there is sufficient room for the brk. */
643
644 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
645 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
646 +
647 +#ifdef CONFIG_PAX_ASLR
648 +#define PAX_ELF_ET_DYN_BASE 0x00008000UL
649 +
650 +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
651 +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
652 +#endif
653
654 /* When the program starts, a1 contains a pointer to a function to be
655 registered with atexit, as per the SVR4 ABI. A value of 0 means we
656 @@ -126,10 +133,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
657 extern void elf_set_personality(const struct elf32_hdr *);
658 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
659
660 -struct mm_struct;
661 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
662 -#define arch_randomize_brk arch_randomize_brk
663 -
664 extern int vectors_user_mapping(void);
665 #define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping()
666 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES
667 diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
668 index e51b1e8..32a3113 100644
669 --- a/arch/arm/include/asm/kmap_types.h
670 +++ b/arch/arm/include/asm/kmap_types.h
671 @@ -21,6 +21,7 @@ enum km_type {
672 KM_L1_CACHE,
673 KM_L2_CACHE,
674 KM_KDB,
675 + KM_CLEARPAGE,
676 KM_TYPE_NR
677 };
678
679 diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
680 index b293616..96310e5 100644
681 --- a/arch/arm/include/asm/uaccess.h
682 +++ b/arch/arm/include/asm/uaccess.h
683 @@ -22,6 +22,8 @@
684 #define VERIFY_READ 0
685 #define VERIFY_WRITE 1
686
687 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
688 +
689 /*
690 * The exception table consists of pairs of addresses: the first is the
691 * address of an instruction that is allowed to fault, and the second is
692 @@ -387,8 +389,23 @@ do { \
693
694
695 #ifdef CONFIG_MMU
696 -extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
697 -extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
698 +extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
699 +extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
700 +
701 +static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
702 +{
703 + if (!__builtin_constant_p(n))
704 + check_object_size(to, n, false);
705 + return ___copy_from_user(to, from, n);
706 +}
707 +
708 +static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
709 +{
710 + if (!__builtin_constant_p(n))
711 + check_object_size(from, n, true);
712 + return ___copy_to_user(to, from, n);
713 +}
714 +
715 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
716 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
717 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
718 @@ -403,6 +420,9 @@ extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
719
720 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
721 {
722 + if ((long)n < 0)
723 + return n;
724 +
725 if (access_ok(VERIFY_READ, from, n))
726 n = __copy_from_user(to, from, n);
727 else /* security hole - plug it */
728 @@ -412,6 +432,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
729
730 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
731 {
732 + if ((long)n < 0)
733 + return n;
734 +
735 if (access_ok(VERIFY_WRITE, to, n))
736 n = __copy_to_user(to, from, n);
737 return n;
738 diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
739 index 5b0bce6..becd81c 100644
740 --- a/arch/arm/kernel/armksyms.c
741 +++ b/arch/arm/kernel/armksyms.c
742 @@ -95,8 +95,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
743 #ifdef CONFIG_MMU
744 EXPORT_SYMBOL(copy_page);
745
746 -EXPORT_SYMBOL(__copy_from_user);
747 -EXPORT_SYMBOL(__copy_to_user);
748 +EXPORT_SYMBOL(___copy_from_user);
749 +EXPORT_SYMBOL(___copy_to_user);
750 EXPORT_SYMBOL(__clear_user);
751
752 EXPORT_SYMBOL(__get_user_1);
753 diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
754 index 3d0c6fb..3dcae52 100644
755 --- a/arch/arm/kernel/process.c
756 +++ b/arch/arm/kernel/process.c
757 @@ -28,7 +28,6 @@
758 #include <linux/tick.h>
759 #include <linux/utsname.h>
760 #include <linux/uaccess.h>
761 -#include <linux/random.h>
762 #include <linux/hw_breakpoint.h>
763 #include <linux/cpuidle.h>
764
765 @@ -484,12 +483,6 @@ unsigned long get_wchan(struct task_struct *p)
766 return 0;
767 }
768
769 -unsigned long arch_randomize_brk(struct mm_struct *mm)
770 -{
771 - unsigned long range_end = mm->brk + 0x02000000;
772 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
773 -}
774 -
775 #ifdef CONFIG_MMU
776 /*
777 * The vectors page is always readable from user space for the
778 diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
779 index 99a5727..a3d5bb1 100644
780 --- a/arch/arm/kernel/traps.c
781 +++ b/arch/arm/kernel/traps.c
782 @@ -259,6 +259,8 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt
783
784 static DEFINE_RAW_SPINLOCK(die_lock);
785
786 +extern void gr_handle_kernel_exploit(void);
787 +
788 /*
789 * This function is protected against re-entrancy.
790 */
791 @@ -288,6 +290,9 @@ void die(const char *str, struct pt_regs *regs, int err)
792 panic("Fatal exception in interrupt");
793 if (panic_on_oops)
794 panic("Fatal exception");
795 +
796 + gr_handle_kernel_exploit();
797 +
798 if (ret != NOTIFY_STOP)
799 do_exit(SIGSEGV);
800 }
801 diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
802 index 66a477a..bee61d3 100644
803 --- a/arch/arm/lib/copy_from_user.S
804 +++ b/arch/arm/lib/copy_from_user.S
805 @@ -16,7 +16,7 @@
806 /*
807 * Prototype:
808 *
809 - * size_t __copy_from_user(void *to, const void *from, size_t n)
810 + * size_t ___copy_from_user(void *to, const void *from, size_t n)
811 *
812 * Purpose:
813 *
814 @@ -84,11 +84,11 @@
815
816 .text
817
818 -ENTRY(__copy_from_user)
819 +ENTRY(___copy_from_user)
820
821 #include "copy_template.S"
822
823 -ENDPROC(__copy_from_user)
824 +ENDPROC(___copy_from_user)
825
826 .pushsection .fixup,"ax"
827 .align 0
828 diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
829 index d066df6..df28194 100644
830 --- a/arch/arm/lib/copy_to_user.S
831 +++ b/arch/arm/lib/copy_to_user.S
832 @@ -16,7 +16,7 @@
833 /*
834 * Prototype:
835 *
836 - * size_t __copy_to_user(void *to, const void *from, size_t n)
837 + * size_t ___copy_to_user(void *to, const void *from, size_t n)
838 *
839 * Purpose:
840 *
841 @@ -88,11 +88,11 @@
842 .text
843
844 ENTRY(__copy_to_user_std)
845 -WEAK(__copy_to_user)
846 +WEAK(___copy_to_user)
847
848 #include "copy_template.S"
849
850 -ENDPROC(__copy_to_user)
851 +ENDPROC(___copy_to_user)
852 ENDPROC(__copy_to_user_std)
853
854 .pushsection .fixup,"ax"
855 diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
856 index d0ece2a..5ae2f39 100644
857 --- a/arch/arm/lib/uaccess.S
858 +++ b/arch/arm/lib/uaccess.S
859 @@ -20,7 +20,7 @@
860
861 #define PAGE_SHIFT 12
862
863 -/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
864 +/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
865 * Purpose : copy a block to user memory from kernel memory
866 * Params : to - user memory
867 * : from - kernel memory
868 @@ -40,7 +40,7 @@ USER( T(strgtb) r3, [r0], #1) @ May fault
869 sub r2, r2, ip
870 b .Lc2u_dest_aligned
871
872 -ENTRY(__copy_to_user)
873 +ENTRY(___copy_to_user)
874 stmfd sp!, {r2, r4 - r7, lr}
875 cmp r2, #4
876 blt .Lc2u_not_enough
877 @@ -278,14 +278,14 @@ USER( T(strgeb) r3, [r0], #1) @ May fault
878 ldrgtb r3, [r1], #0
879 USER( T(strgtb) r3, [r0], #1) @ May fault
880 b .Lc2u_finished
881 -ENDPROC(__copy_to_user)
882 +ENDPROC(___copy_to_user)
883
884 .pushsection .fixup,"ax"
885 .align 0
886 9001: ldmfd sp!, {r0, r4 - r7, pc}
887 .popsection
888
889 -/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
890 +/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
891 * Purpose : copy a block from user memory to kernel memory
892 * Params : to - kernel memory
893 * : from - user memory
894 @@ -304,7 +304,7 @@ USER( T(ldrgtb) r3, [r1], #1) @ May fault
895 sub r2, r2, ip
896 b .Lcfu_dest_aligned
897
898 -ENTRY(__copy_from_user)
899 +ENTRY(___copy_from_user)
900 stmfd sp!, {r0, r2, r4 - r7, lr}
901 cmp r2, #4
902 blt .Lcfu_not_enough
903 @@ -544,7 +544,7 @@ USER( T(ldrgeb) r3, [r1], #1) @ May fault
904 USER( T(ldrgtb) r3, [r1], #1) @ May fault
905 strgtb r3, [r0], #1
906 b .Lcfu_finished
907 -ENDPROC(__copy_from_user)
908 +ENDPROC(___copy_from_user)
909
910 .pushsection .fixup,"ax"
911 .align 0
912 diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
913 index 025f742..8432b08 100644
914 --- a/arch/arm/lib/uaccess_with_memcpy.c
915 +++ b/arch/arm/lib/uaccess_with_memcpy.c
916 @@ -104,7 +104,7 @@ out:
917 }
918
919 unsigned long
920 -__copy_to_user(void __user *to, const void *from, unsigned long n)
921 +___copy_to_user(void __user *to, const void *from, unsigned long n)
922 {
923 /*
924 * This test is stubbed out of the main function above to keep
925 diff --git a/arch/arm/mach-ux500/mbox-db5500.c b/arch/arm/mach-ux500/mbox-db5500.c
926 index 2b2d51c..0127490 100644
927 --- a/arch/arm/mach-ux500/mbox-db5500.c
928 +++ b/arch/arm/mach-ux500/mbox-db5500.c
929 @@ -168,7 +168,7 @@ static ssize_t mbox_read_fifo(struct device *dev,
930 return sprintf(buf, "0x%X\n", mbox_value);
931 }
932
933 -static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
934 +static DEVICE_ATTR(fifo, S_IWUSR | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
935
936 static int mbox_show(struct seq_file *s, void *data)
937 {
938 diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
939 index aa33949..b242a2f 100644
940 --- a/arch/arm/mm/fault.c
941 +++ b/arch/arm/mm/fault.c
942 @@ -183,6 +183,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
943 }
944 #endif
945
946 +#ifdef CONFIG_PAX_PAGEEXEC
947 + if (fsr & FSR_LNX_PF) {
948 + pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
949 + do_group_exit(SIGKILL);
950 + }
951 +#endif
952 +
953 tsk->thread.address = addr;
954 tsk->thread.error_code = fsr;
955 tsk->thread.trap_no = 14;
956 @@ -384,6 +391,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
957 }
958 #endif /* CONFIG_MMU */
959
960 +#ifdef CONFIG_PAX_PAGEEXEC
961 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
962 +{
963 + long i;
964 +
965 + printk(KERN_ERR "PAX: bytes at PC: ");
966 + for (i = 0; i < 20; i++) {
967 + unsigned char c;
968 + if (get_user(c, (__force unsigned char __user *)pc+i))
969 + printk(KERN_CONT "?? ");
970 + else
971 + printk(KERN_CONT "%02x ", c);
972 + }
973 + printk("\n");
974 +
975 + printk(KERN_ERR "PAX: bytes at SP-4: ");
976 + for (i = -1; i < 20; i++) {
977 + unsigned long c;
978 + if (get_user(c, (__force unsigned long __user *)sp+i))
979 + printk(KERN_CONT "???????? ");
980 + else
981 + printk(KERN_CONT "%08lx ", c);
982 + }
983 + printk("\n");
984 +}
985 +#endif
986 +
987 /*
988 * First Level Translation Fault Handler
989 *
990 diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
991 index 44b628e..623ee2a 100644
992 --- a/arch/arm/mm/mmap.c
993 +++ b/arch/arm/mm/mmap.c
994 @@ -54,6 +54,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
995 if (len > TASK_SIZE)
996 return -ENOMEM;
997
998 +#ifdef CONFIG_PAX_RANDMMAP
999 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
1000 +#endif
1001 +
1002 if (addr) {
1003 if (do_align)
1004 addr = COLOUR_ALIGN(addr, pgoff);
1005 @@ -61,15 +65,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1006 addr = PAGE_ALIGN(addr);
1007
1008 vma = find_vma(mm, addr);
1009 - if (TASK_SIZE - len >= addr &&
1010 - (!vma || addr + len <= vma->vm_start))
1011 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1012 return addr;
1013 }
1014 if (len > mm->cached_hole_size) {
1015 - start_addr = addr = mm->free_area_cache;
1016 + start_addr = addr = mm->free_area_cache;
1017 } else {
1018 - start_addr = addr = TASK_UNMAPPED_BASE;
1019 - mm->cached_hole_size = 0;
1020 + start_addr = addr = mm->mmap_base;
1021 + mm->cached_hole_size = 0;
1022 }
1023 /* 8 bits of randomness in 20 address space bits */
1024 if ((current->flags & PF_RANDOMIZE) &&
1025 @@ -89,14 +92,14 @@ full_search:
1026 * Start a new search - just in case we missed
1027 * some holes.
1028 */
1029 - if (start_addr != TASK_UNMAPPED_BASE) {
1030 - start_addr = addr = TASK_UNMAPPED_BASE;
1031 + if (start_addr != mm->mmap_base) {
1032 + start_addr = addr = mm->mmap_base;
1033 mm->cached_hole_size = 0;
1034 goto full_search;
1035 }
1036 return -ENOMEM;
1037 }
1038 - if (!vma || addr + len <= vma->vm_start) {
1039 + if (check_heap_stack_gap(vma, addr, len)) {
1040 /*
1041 * Remember the place where we stopped the search:
1042 */
1043 diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
1044 index 3b3159b..425ea94 100644
1045 --- a/arch/avr32/include/asm/elf.h
1046 +++ b/arch/avr32/include/asm/elf.h
1047 @@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
1048 the loader. We need to make sure that it is out of the way of the program
1049 that it will "exec", and that there is sufficient room for the brk. */
1050
1051 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1052 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1053
1054 +#ifdef CONFIG_PAX_ASLR
1055 +#define PAX_ELF_ET_DYN_BASE 0x00001000UL
1056 +
1057 +#define PAX_DELTA_MMAP_LEN 15
1058 +#define PAX_DELTA_STACK_LEN 15
1059 +#endif
1060
1061 /* This yields a mask that user programs can use to figure out what
1062 instruction set this CPU supports. This could be done in user space,
1063 diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
1064 index b7f5c68..556135c 100644
1065 --- a/arch/avr32/include/asm/kmap_types.h
1066 +++ b/arch/avr32/include/asm/kmap_types.h
1067 @@ -22,7 +22,8 @@ D(10) KM_IRQ0,
1068 D(11) KM_IRQ1,
1069 D(12) KM_SOFTIRQ0,
1070 D(13) KM_SOFTIRQ1,
1071 -D(14) KM_TYPE_NR
1072 +D(14) KM_CLEARPAGE,
1073 +D(15) KM_TYPE_NR
1074 };
1075
1076 #undef D
1077 diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
1078 index f7040a1..db9f300 100644
1079 --- a/arch/avr32/mm/fault.c
1080 +++ b/arch/avr32/mm/fault.c
1081 @@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
1082
1083 int exception_trace = 1;
1084
1085 +#ifdef CONFIG_PAX_PAGEEXEC
1086 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1087 +{
1088 + unsigned long i;
1089 +
1090 + printk(KERN_ERR "PAX: bytes at PC: ");
1091 + for (i = 0; i < 20; i++) {
1092 + unsigned char c;
1093 + if (get_user(c, (unsigned char *)pc+i))
1094 + printk(KERN_CONT "???????? ");
1095 + else
1096 + printk(KERN_CONT "%02x ", c);
1097 + }
1098 + printk("\n");
1099 +}
1100 +#endif
1101 +
1102 /*
1103 * This routine handles page faults. It determines the address and the
1104 * problem, and then passes it off to one of the appropriate routines.
1105 @@ -156,6 +173,16 @@ bad_area:
1106 up_read(&mm->mmap_sem);
1107
1108 if (user_mode(regs)) {
1109 +
1110 +#ifdef CONFIG_PAX_PAGEEXEC
1111 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
1112 + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
1113 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
1114 + do_group_exit(SIGKILL);
1115 + }
1116 + }
1117 +#endif
1118 +
1119 if (exception_trace && printk_ratelimit())
1120 printk("%s%s[%d]: segfault at %08lx pc %08lx "
1121 "sp %08lx ecr %lu\n",
1122 diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
1123 index f8e16b2..c73ff79 100644
1124 --- a/arch/frv/include/asm/kmap_types.h
1125 +++ b/arch/frv/include/asm/kmap_types.h
1126 @@ -23,6 +23,7 @@ enum km_type {
1127 KM_IRQ1,
1128 KM_SOFTIRQ0,
1129 KM_SOFTIRQ1,
1130 + KM_CLEARPAGE,
1131 KM_TYPE_NR
1132 };
1133
1134 diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
1135 index 385fd30..6c3d97e 100644
1136 --- a/arch/frv/mm/elf-fdpic.c
1137 +++ b/arch/frv/mm/elf-fdpic.c
1138 @@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1139 if (addr) {
1140 addr = PAGE_ALIGN(addr);
1141 vma = find_vma(current->mm, addr);
1142 - if (TASK_SIZE - len >= addr &&
1143 - (!vma || addr + len <= vma->vm_start))
1144 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1145 goto success;
1146 }
1147
1148 @@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1149 for (; vma; vma = vma->vm_next) {
1150 if (addr > limit)
1151 break;
1152 - if (addr + len <= vma->vm_start)
1153 + if (check_heap_stack_gap(vma, addr, len))
1154 goto success;
1155 addr = vma->vm_end;
1156 }
1157 @@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1158 for (; vma; vma = vma->vm_next) {
1159 if (addr > limit)
1160 break;
1161 - if (addr + len <= vma->vm_start)
1162 + if (check_heap_stack_gap(vma, addr, len))
1163 goto success;
1164 addr = vma->vm_end;
1165 }
1166 diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
1167 index b5298eb..67c6e62 100644
1168 --- a/arch/ia64/include/asm/elf.h
1169 +++ b/arch/ia64/include/asm/elf.h
1170 @@ -42,6 +42,13 @@
1171 */
1172 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
1173
1174 +#ifdef CONFIG_PAX_ASLR
1175 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
1176 +
1177 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1178 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1179 +#endif
1180 +
1181 #define PT_IA_64_UNWIND 0x70000001
1182
1183 /* IA-64 relocations: */
1184 diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
1185 index 1a97af3..7529d31 100644
1186 --- a/arch/ia64/include/asm/pgtable.h
1187 +++ b/arch/ia64/include/asm/pgtable.h
1188 @@ -12,7 +12,7 @@
1189 * David Mosberger-Tang <davidm@hpl.hp.com>
1190 */
1191
1192 -
1193 +#include <linux/const.h>
1194 #include <asm/mman.h>
1195 #include <asm/page.h>
1196 #include <asm/processor.h>
1197 @@ -143,6 +143,17 @@
1198 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1199 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1200 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
1201 +
1202 +#ifdef CONFIG_PAX_PAGEEXEC
1203 +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
1204 +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1205 +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1206 +#else
1207 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
1208 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
1209 +# define PAGE_COPY_NOEXEC PAGE_COPY
1210 +#endif
1211 +
1212 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
1213 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
1214 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
1215 diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
1216 index b77768d..e0795eb 100644
1217 --- a/arch/ia64/include/asm/spinlock.h
1218 +++ b/arch/ia64/include/asm/spinlock.h
1219 @@ -72,7 +72,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
1220 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
1221
1222 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
1223 - ACCESS_ONCE(*p) = (tmp + 2) & ~1;
1224 + ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
1225 }
1226
1227 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
1228 diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
1229 index 449c8c0..432a3d2 100644
1230 --- a/arch/ia64/include/asm/uaccess.h
1231 +++ b/arch/ia64/include/asm/uaccess.h
1232 @@ -257,7 +257,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
1233 const void *__cu_from = (from); \
1234 long __cu_len = (n); \
1235 \
1236 - if (__access_ok(__cu_to, __cu_len, get_fs())) \
1237 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
1238 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
1239 __cu_len; \
1240 })
1241 @@ -269,7 +269,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
1242 long __cu_len = (n); \
1243 \
1244 __chk_user_ptr(__cu_from); \
1245 - if (__access_ok(__cu_from, __cu_len, get_fs())) \
1246 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
1247 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
1248 __cu_len; \
1249 })
1250 diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
1251 index 24603be..948052d 100644
1252 --- a/arch/ia64/kernel/module.c
1253 +++ b/arch/ia64/kernel/module.c
1254 @@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
1255 void
1256 module_free (struct module *mod, void *module_region)
1257 {
1258 - if (mod && mod->arch.init_unw_table &&
1259 - module_region == mod->module_init) {
1260 + if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
1261 unw_remove_unwind_table(mod->arch.init_unw_table);
1262 mod->arch.init_unw_table = NULL;
1263 }
1264 @@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
1265 }
1266
1267 static inline int
1268 +in_init_rx (const struct module *mod, uint64_t addr)
1269 +{
1270 + return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
1271 +}
1272 +
1273 +static inline int
1274 +in_init_rw (const struct module *mod, uint64_t addr)
1275 +{
1276 + return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
1277 +}
1278 +
1279 +static inline int
1280 in_init (const struct module *mod, uint64_t addr)
1281 {
1282 - return addr - (uint64_t) mod->module_init < mod->init_size;
1283 + return in_init_rx(mod, addr) || in_init_rw(mod, addr);
1284 +}
1285 +
1286 +static inline int
1287 +in_core_rx (const struct module *mod, uint64_t addr)
1288 +{
1289 + return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
1290 +}
1291 +
1292 +static inline int
1293 +in_core_rw (const struct module *mod, uint64_t addr)
1294 +{
1295 + return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
1296 }
1297
1298 static inline int
1299 in_core (const struct module *mod, uint64_t addr)
1300 {
1301 - return addr - (uint64_t) mod->module_core < mod->core_size;
1302 + return in_core_rx(mod, addr) || in_core_rw(mod, addr);
1303 }
1304
1305 static inline int
1306 @@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
1307 break;
1308
1309 case RV_BDREL:
1310 - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
1311 + if (in_init_rx(mod, val))
1312 + val -= (uint64_t) mod->module_init_rx;
1313 + else if (in_init_rw(mod, val))
1314 + val -= (uint64_t) mod->module_init_rw;
1315 + else if (in_core_rx(mod, val))
1316 + val -= (uint64_t) mod->module_core_rx;
1317 + else if (in_core_rw(mod, val))
1318 + val -= (uint64_t) mod->module_core_rw;
1319 break;
1320
1321 case RV_LTV:
1322 @@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
1323 * addresses have been selected...
1324 */
1325 uint64_t gp;
1326 - if (mod->core_size > MAX_LTOFF)
1327 + if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
1328 /*
1329 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
1330 * at the end of the module.
1331 */
1332 - gp = mod->core_size - MAX_LTOFF / 2;
1333 + gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
1334 else
1335 - gp = mod->core_size / 2;
1336 - gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
1337 + gp = (mod->core_size_rx + mod->core_size_rw) / 2;
1338 + gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
1339 mod->arch.gp = gp;
1340 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
1341 }
1342 diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
1343 index 609d500..7dde2a8 100644
1344 --- a/arch/ia64/kernel/sys_ia64.c
1345 +++ b/arch/ia64/kernel/sys_ia64.c
1346 @@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
1347 if (REGION_NUMBER(addr) == RGN_HPAGE)
1348 addr = 0;
1349 #endif
1350 +
1351 +#ifdef CONFIG_PAX_RANDMMAP
1352 + if (mm->pax_flags & MF_PAX_RANDMMAP)
1353 + addr = mm->free_area_cache;
1354 + else
1355 +#endif
1356 +
1357 if (!addr)
1358 addr = mm->free_area_cache;
1359
1360 @@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
1361 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
1362 /* At this point: (!vma || addr < vma->vm_end). */
1363 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
1364 - if (start_addr != TASK_UNMAPPED_BASE) {
1365 + if (start_addr != mm->mmap_base) {
1366 /* Start a new search --- just in case we missed some holes. */
1367 - addr = TASK_UNMAPPED_BASE;
1368 + addr = mm->mmap_base;
1369 goto full_search;
1370 }
1371 return -ENOMEM;
1372 }
1373 - if (!vma || addr + len <= vma->vm_start) {
1374 + if (check_heap_stack_gap(vma, addr, len)) {
1375 /* Remember the address where we stopped this search: */
1376 mm->free_area_cache = addr + len;
1377 return addr;
1378 diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
1379 index 53c0ba0..2accdde 100644
1380 --- a/arch/ia64/kernel/vmlinux.lds.S
1381 +++ b/arch/ia64/kernel/vmlinux.lds.S
1382 @@ -199,7 +199,7 @@ SECTIONS {
1383 /* Per-cpu data: */
1384 . = ALIGN(PERCPU_PAGE_SIZE);
1385 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
1386 - __phys_per_cpu_start = __per_cpu_load;
1387 + __phys_per_cpu_start = per_cpu_load;
1388 /*
1389 * ensure percpu data fits
1390 * into percpu page size
1391 diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
1392 index 20b3593..1ce77f0 100644
1393 --- a/arch/ia64/mm/fault.c
1394 +++ b/arch/ia64/mm/fault.c
1395 @@ -73,6 +73,23 @@ mapped_kernel_page_is_present (unsigned long address)
1396 return pte_present(pte);
1397 }
1398
1399 +#ifdef CONFIG_PAX_PAGEEXEC
1400 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1401 +{
1402 + unsigned long i;
1403 +
1404 + printk(KERN_ERR "PAX: bytes at PC: ");
1405 + for (i = 0; i < 8; i++) {
1406 + unsigned int c;
1407 + if (get_user(c, (unsigned int *)pc+i))
1408 + printk(KERN_CONT "???????? ");
1409 + else
1410 + printk(KERN_CONT "%08x ", c);
1411 + }
1412 + printk("\n");
1413 +}
1414 +#endif
1415 +
1416 void __kprobes
1417 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1418 {
1419 @@ -146,9 +163,23 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
1420 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1421 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1422
1423 - if ((vma->vm_flags & mask) != mask)
1424 + if ((vma->vm_flags & mask) != mask) {
1425 +
1426 +#ifdef CONFIG_PAX_PAGEEXEC
1427 + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1428 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1429 + goto bad_area;
1430 +
1431 + up_read(&mm->mmap_sem);
1432 + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1433 + do_group_exit(SIGKILL);
1434 + }
1435 +#endif
1436 +
1437 goto bad_area;
1438
1439 + }
1440 +
1441 /*
1442 * If for any reason at all we couldn't handle the fault, make
1443 * sure we exit gracefully rather than endlessly redo the
1444 diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
1445 index 5ca674b..e0e1b70 100644
1446 --- a/arch/ia64/mm/hugetlbpage.c
1447 +++ b/arch/ia64/mm/hugetlbpage.c
1448 @@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
1449 /* At this point: (!vmm || addr < vmm->vm_end). */
1450 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1451 return -ENOMEM;
1452 - if (!vmm || (addr + len) <= vmm->vm_start)
1453 + if (check_heap_stack_gap(vmm, addr, len))
1454 return addr;
1455 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1456 }
1457 diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
1458 index 00cb0e2..2ad8024 100644
1459 --- a/arch/ia64/mm/init.c
1460 +++ b/arch/ia64/mm/init.c
1461 @@ -120,6 +120,19 @@ ia64_init_addr_space (void)
1462 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1463 vma->vm_end = vma->vm_start + PAGE_SIZE;
1464 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1465 +
1466 +#ifdef CONFIG_PAX_PAGEEXEC
1467 + if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1468 + vma->vm_flags &= ~VM_EXEC;
1469 +
1470 +#ifdef CONFIG_PAX_MPROTECT
1471 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
1472 + vma->vm_flags &= ~VM_MAYEXEC;
1473 +#endif
1474 +
1475 + }
1476 +#endif
1477 +
1478 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1479 down_write(&current->mm->mmap_sem);
1480 if (insert_vm_struct(current->mm, vma)) {
1481 diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
1482 index 82abd15..d95ae5d 100644
1483 --- a/arch/m32r/lib/usercopy.c
1484 +++ b/arch/m32r/lib/usercopy.c
1485 @@ -14,6 +14,9 @@
1486 unsigned long
1487 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1488 {
1489 + if ((long)n < 0)
1490 + return n;
1491 +
1492 prefetch(from);
1493 if (access_ok(VERIFY_WRITE, to, n))
1494 __copy_user(to,from,n);
1495 @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1496 unsigned long
1497 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1498 {
1499 + if ((long)n < 0)
1500 + return n;
1501 +
1502 prefetchw(to);
1503 if (access_ok(VERIFY_READ, from, n))
1504 __copy_user_zeroing(to,from,n);
1505 diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
1506 index 455c0ac..ad65fbe 100644
1507 --- a/arch/mips/include/asm/elf.h
1508 +++ b/arch/mips/include/asm/elf.h
1509 @@ -372,13 +372,16 @@ extern const char *__elf_platform;
1510 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1511 #endif
1512
1513 +#ifdef CONFIG_PAX_ASLR
1514 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1515 +
1516 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1517 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1518 +#endif
1519 +
1520 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1521 struct linux_binprm;
1522 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
1523 int uses_interp);
1524
1525 -struct mm_struct;
1526 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1527 -#define arch_randomize_brk arch_randomize_brk
1528 -
1529 #endif /* _ASM_ELF_H */
1530 diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
1531 index e59cd1a..8e329d6 100644
1532 --- a/arch/mips/include/asm/page.h
1533 +++ b/arch/mips/include/asm/page.h
1534 @@ -93,7 +93,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
1535 #ifdef CONFIG_CPU_MIPS32
1536 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1537 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1538 - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1539 + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1540 #else
1541 typedef struct { unsigned long long pte; } pte_t;
1542 #define pte_val(x) ((x).pte)
1543 diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h
1544 index 6018c80..7c37203 100644
1545 --- a/arch/mips/include/asm/system.h
1546 +++ b/arch/mips/include/asm/system.h
1547 @@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1548 */
1549 #define __ARCH_WANT_UNLOCKED_CTXSW
1550
1551 -extern unsigned long arch_align_stack(unsigned long sp);
1552 +#define arch_align_stack(x) ((x) & ~0xfUL)
1553
1554 #endif /* _ASM_SYSTEM_H */
1555 diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
1556 index 9fdd8bc..4bd7f1a 100644
1557 --- a/arch/mips/kernel/binfmt_elfn32.c
1558 +++ b/arch/mips/kernel/binfmt_elfn32.c
1559 @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
1560 #undef ELF_ET_DYN_BASE
1561 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1562
1563 +#ifdef CONFIG_PAX_ASLR
1564 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1565 +
1566 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1567 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1568 +#endif
1569 +
1570 #include <asm/processor.h>
1571 #include <linux/module.h>
1572 #include <linux/elfcore.h>
1573 diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
1574 index ff44823..97f8906 100644
1575 --- a/arch/mips/kernel/binfmt_elfo32.c
1576 +++ b/arch/mips/kernel/binfmt_elfo32.c
1577 @@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
1578 #undef ELF_ET_DYN_BASE
1579 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1580
1581 +#ifdef CONFIG_PAX_ASLR
1582 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1583 +
1584 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1585 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1586 +#endif
1587 +
1588 #include <asm/processor.h>
1589
1590 /*
1591 diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
1592 index c47f96e..661d418 100644
1593 --- a/arch/mips/kernel/process.c
1594 +++ b/arch/mips/kernel/process.c
1595 @@ -481,15 +481,3 @@ unsigned long get_wchan(struct task_struct *task)
1596 out:
1597 return pc;
1598 }
1599 -
1600 -/*
1601 - * Don't forget that the stack pointer must be aligned on a 8 bytes
1602 - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1603 - */
1604 -unsigned long arch_align_stack(unsigned long sp)
1605 -{
1606 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1607 - sp -= get_random_int() & ~PAGE_MASK;
1608 -
1609 - return sp & ALMASK;
1610 -}
1611 diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
1612 index 937cf33..adb39bb 100644
1613 --- a/arch/mips/mm/fault.c
1614 +++ b/arch/mips/mm/fault.c
1615 @@ -28,6 +28,23 @@
1616 #include <asm/highmem.h> /* For VMALLOC_END */
1617 #include <linux/kdebug.h>
1618
1619 +#ifdef CONFIG_PAX_PAGEEXEC
1620 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1621 +{
1622 + unsigned long i;
1623 +
1624 + printk(KERN_ERR "PAX: bytes at PC: ");
1625 + for (i = 0; i < 5; i++) {
1626 + unsigned int c;
1627 + if (get_user(c, (unsigned int *)pc+i))
1628 + printk(KERN_CONT "???????? ");
1629 + else
1630 + printk(KERN_CONT "%08x ", c);
1631 + }
1632 + printk("\n");
1633 +}
1634 +#endif
1635 +
1636 /*
1637 * This routine handles page faults. It determines the address,
1638 * and the problem, and then passes it off to one of the appropriate
1639 diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
1640 index 302d779..7d35bf8 100644
1641 --- a/arch/mips/mm/mmap.c
1642 +++ b/arch/mips/mm/mmap.c
1643 @@ -95,6 +95,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1644 do_color_align = 1;
1645
1646 /* requesting a specific address */
1647 +
1648 +#ifdef CONFIG_PAX_RANDMMAP
1649 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1650 +#endif
1651 +
1652 if (addr) {
1653 if (do_color_align)
1654 addr = COLOUR_ALIGN(addr, pgoff);
1655 @@ -102,8 +107,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1656 addr = PAGE_ALIGN(addr);
1657
1658 vma = find_vma(mm, addr);
1659 - if (TASK_SIZE - len >= addr &&
1660 - (!vma || addr + len <= vma->vm_start))
1661 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len))
1662 return addr;
1663 }
1664
1665 @@ -118,7 +122,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1666 /* At this point: (!vma || addr < vma->vm_end). */
1667 if (TASK_SIZE - len < addr)
1668 return -ENOMEM;
1669 - if (!vma || addr + len <= vma->vm_start)
1670 + if (check_heap_stack_gap(vmm, addr, len))
1671 return addr;
1672 addr = vma->vm_end;
1673 if (do_color_align)
1674 @@ -145,7 +149,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1675 /* make sure it can fit in the remaining address space */
1676 if (likely(addr > len)) {
1677 vma = find_vma(mm, addr - len);
1678 - if (!vma || addr <= vma->vm_start) {
1679 + if (check_heap_stack_gap(vmm, addr - len, len))
1680 /* cache the address as a hint for next time */
1681 return mm->free_area_cache = addr - len;
1682 }
1683 @@ -165,7 +169,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1684 * return with success:
1685 */
1686 vma = find_vma(mm, addr);
1687 - if (likely(!vma || addr + len <= vma->vm_start)) {
1688 + if (check_heap_stack_gap(vmm, addr, len)) {
1689 /* cache the address as a hint for next time */
1690 return mm->free_area_cache = addr;
1691 }
1692 @@ -242,30 +246,3 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
1693 mm->unmap_area = arch_unmap_area_topdown;
1694 }
1695 }
1696 -
1697 -static inline unsigned long brk_rnd(void)
1698 -{
1699 - unsigned long rnd = get_random_int();
1700 -
1701 - rnd = rnd << PAGE_SHIFT;
1702 - /* 8MB for 32bit, 256MB for 64bit */
1703 - if (TASK_IS_32BIT_ADDR)
1704 - rnd = rnd & 0x7ffffful;
1705 - else
1706 - rnd = rnd & 0xffffffful;
1707 -
1708 - return rnd;
1709 -}
1710 -
1711 -unsigned long arch_randomize_brk(struct mm_struct *mm)
1712 -{
1713 - unsigned long base = mm->brk;
1714 - unsigned long ret;
1715 -
1716 - ret = PAGE_ALIGN(base + brk_rnd());
1717 -
1718 - if (ret < mm->brk)
1719 - return mm->brk;
1720 -
1721 - return ret;
1722 -}
1723 diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
1724 index 19f6cb1..6c78cf2 100644
1725 --- a/arch/parisc/include/asm/elf.h
1726 +++ b/arch/parisc/include/asm/elf.h
1727 @@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
1728
1729 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1730
1731 +#ifdef CONFIG_PAX_ASLR
1732 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
1733 +
1734 +#define PAX_DELTA_MMAP_LEN 16
1735 +#define PAX_DELTA_STACK_LEN 16
1736 +#endif
1737 +
1738 /* This yields a mask that user programs can use to figure out what
1739 instruction set this CPU supports. This could be done in user space,
1740 but it's not easy, and we've already done it here. */
1741 diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
1742 index 22dadeb..f6c2be4 100644
1743 --- a/arch/parisc/include/asm/pgtable.h
1744 +++ b/arch/parisc/include/asm/pgtable.h
1745 @@ -210,6 +210,17 @@ struct vm_area_struct;
1746 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1747 #define PAGE_COPY PAGE_EXECREAD
1748 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1749 +
1750 +#ifdef CONFIG_PAX_PAGEEXEC
1751 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1752 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1753 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1754 +#else
1755 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
1756 +# define PAGE_COPY_NOEXEC PAGE_COPY
1757 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
1758 +#endif
1759 +
1760 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
1761 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
1762 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
1763 diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
1764 index 5e34ccf..672bc9c 100644
1765 --- a/arch/parisc/kernel/module.c
1766 +++ b/arch/parisc/kernel/module.c
1767 @@ -98,16 +98,38 @@
1768
1769 /* three functions to determine where in the module core
1770 * or init pieces the location is */
1771 +static inline int in_init_rx(struct module *me, void *loc)
1772 +{
1773 + return (loc >= me->module_init_rx &&
1774 + loc < (me->module_init_rx + me->init_size_rx));
1775 +}
1776 +
1777 +static inline int in_init_rw(struct module *me, void *loc)
1778 +{
1779 + return (loc >= me->module_init_rw &&
1780 + loc < (me->module_init_rw + me->init_size_rw));
1781 +}
1782 +
1783 static inline int in_init(struct module *me, void *loc)
1784 {
1785 - return (loc >= me->module_init &&
1786 - loc <= (me->module_init + me->init_size));
1787 + return in_init_rx(me, loc) || in_init_rw(me, loc);
1788 +}
1789 +
1790 +static inline int in_core_rx(struct module *me, void *loc)
1791 +{
1792 + return (loc >= me->module_core_rx &&
1793 + loc < (me->module_core_rx + me->core_size_rx));
1794 +}
1795 +
1796 +static inline int in_core_rw(struct module *me, void *loc)
1797 +{
1798 + return (loc >= me->module_core_rw &&
1799 + loc < (me->module_core_rw + me->core_size_rw));
1800 }
1801
1802 static inline int in_core(struct module *me, void *loc)
1803 {
1804 - return (loc >= me->module_core &&
1805 - loc <= (me->module_core + me->core_size));
1806 + return in_core_rx(me, loc) || in_core_rw(me, loc);
1807 }
1808
1809 static inline int in_local(struct module *me, void *loc)
1810 @@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
1811 }
1812
1813 /* align things a bit */
1814 - me->core_size = ALIGN(me->core_size, 16);
1815 - me->arch.got_offset = me->core_size;
1816 - me->core_size += gots * sizeof(struct got_entry);
1817 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1818 + me->arch.got_offset = me->core_size_rw;
1819 + me->core_size_rw += gots * sizeof(struct got_entry);
1820
1821 - me->core_size = ALIGN(me->core_size, 16);
1822 - me->arch.fdesc_offset = me->core_size;
1823 - me->core_size += fdescs * sizeof(Elf_Fdesc);
1824 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1825 + me->arch.fdesc_offset = me->core_size_rw;
1826 + me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1827
1828 me->arch.got_max = gots;
1829 me->arch.fdesc_max = fdescs;
1830 @@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
1831
1832 BUG_ON(value == 0);
1833
1834 - got = me->module_core + me->arch.got_offset;
1835 + got = me->module_core_rw + me->arch.got_offset;
1836 for (i = 0; got[i].addr; i++)
1837 if (got[i].addr == value)
1838 goto out;
1839 @@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
1840 #ifdef CONFIG_64BIT
1841 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1842 {
1843 - Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1844 + Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1845
1846 if (!value) {
1847 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
1848 @@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1849
1850 /* Create new one */
1851 fdesc->addr = value;
1852 - fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1853 + fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1854 return (Elf_Addr)fdesc;
1855 }
1856 #endif /* CONFIG_64BIT */
1857 @@ -845,7 +867,7 @@ register_unwind_table(struct module *me,
1858
1859 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
1860 end = table + sechdrs[me->arch.unwind_section].sh_size;
1861 - gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1862 + gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1863
1864 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
1865 me->arch.unwind_section, table, end, gp);
1866 diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
1867 index c9b9322..02d8940 100644
1868 --- a/arch/parisc/kernel/sys_parisc.c
1869 +++ b/arch/parisc/kernel/sys_parisc.c
1870 @@ -43,7 +43,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
1871 /* At this point: (!vma || addr < vma->vm_end). */
1872 if (TASK_SIZE - len < addr)
1873 return -ENOMEM;
1874 - if (!vma || addr + len <= vma->vm_start)
1875 + if (check_heap_stack_gap(vma, addr, len))
1876 return addr;
1877 addr = vma->vm_end;
1878 }
1879 @@ -79,7 +79,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
1880 /* At this point: (!vma || addr < vma->vm_end). */
1881 if (TASK_SIZE - len < addr)
1882 return -ENOMEM;
1883 - if (!vma || addr + len <= vma->vm_start)
1884 + if (check_heap_stack_gap(vma, addr, len))
1885 return addr;
1886 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
1887 if (addr < vma->vm_end) /* handle wraparound */
1888 @@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
1889 if (flags & MAP_FIXED)
1890 return addr;
1891 if (!addr)
1892 - addr = TASK_UNMAPPED_BASE;
1893 + addr = current->mm->mmap_base;
1894
1895 if (filp) {
1896 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
1897 diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
1898 index f19e660..414fe24 100644
1899 --- a/arch/parisc/kernel/traps.c
1900 +++ b/arch/parisc/kernel/traps.c
1901 @@ -733,9 +733,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
1902
1903 down_read(&current->mm->mmap_sem);
1904 vma = find_vma(current->mm,regs->iaoq[0]);
1905 - if (vma && (regs->iaoq[0] >= vma->vm_start)
1906 - && (vma->vm_flags & VM_EXEC)) {
1907 -
1908 + if (vma && (regs->iaoq[0] >= vma->vm_start)) {
1909 fault_address = regs->iaoq[0];
1910 fault_space = regs->iasq[0];
1911
1912 diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
1913 index 18162ce..94de376 100644
1914 --- a/arch/parisc/mm/fault.c
1915 +++ b/arch/parisc/mm/fault.c
1916 @@ -15,6 +15,7 @@
1917 #include <linux/sched.h>
1918 #include <linux/interrupt.h>
1919 #include <linux/module.h>
1920 +#include <linux/unistd.h>
1921
1922 #include <asm/uaccess.h>
1923 #include <asm/traps.h>
1924 @@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
1925 static unsigned long
1926 parisc_acctyp(unsigned long code, unsigned int inst)
1927 {
1928 - if (code == 6 || code == 16)
1929 + if (code == 6 || code == 7 || code == 16)
1930 return VM_EXEC;
1931
1932 switch (inst & 0xf0000000) {
1933 @@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
1934 }
1935 #endif
1936
1937 +#ifdef CONFIG_PAX_PAGEEXEC
1938 +/*
1939 + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
1940 + *
1941 + * returns 1 when task should be killed
1942 + * 2 when rt_sigreturn trampoline was detected
1943 + * 3 when unpatched PLT trampoline was detected
1944 + */
1945 +static int pax_handle_fetch_fault(struct pt_regs *regs)
1946 +{
1947 +
1948 +#ifdef CONFIG_PAX_EMUPLT
1949 + int err;
1950 +
1951 + do { /* PaX: unpatched PLT emulation */
1952 + unsigned int bl, depwi;
1953 +
1954 + err = get_user(bl, (unsigned int *)instruction_pointer(regs));
1955 + err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
1956 +
1957 + if (err)
1958 + break;
1959 +
1960 + if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
1961 + unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
1962 +
1963 + err = get_user(ldw, (unsigned int *)addr);
1964 + err |= get_user(bv, (unsigned int *)(addr+4));
1965 + err |= get_user(ldw2, (unsigned int *)(addr+8));
1966 +
1967 + if (err)
1968 + break;
1969 +
1970 + if (ldw == 0x0E801096U &&
1971 + bv == 0xEAC0C000U &&
1972 + ldw2 == 0x0E881095U)
1973 + {
1974 + unsigned int resolver, map;
1975 +
1976 + err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
1977 + err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
1978 + if (err)
1979 + break;
1980 +
1981 + regs->gr[20] = instruction_pointer(regs)+8;
1982 + regs->gr[21] = map;
1983 + regs->gr[22] = resolver;
1984 + regs->iaoq[0] = resolver | 3UL;
1985 + regs->iaoq[1] = regs->iaoq[0] + 4;
1986 + return 3;
1987 + }
1988 + }
1989 + } while (0);
1990 +#endif
1991 +
1992 +#ifdef CONFIG_PAX_EMUTRAMP
1993 +
1994 +#ifndef CONFIG_PAX_EMUSIGRT
1995 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
1996 + return 1;
1997 +#endif
1998 +
1999 + do { /* PaX: rt_sigreturn emulation */
2000 + unsigned int ldi1, ldi2, bel, nop;
2001 +
2002 + err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
2003 + err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
2004 + err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
2005 + err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
2006 +
2007 + if (err)
2008 + break;
2009 +
2010 + if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
2011 + ldi2 == 0x3414015AU &&
2012 + bel == 0xE4008200U &&
2013 + nop == 0x08000240U)
2014 + {
2015 + regs->gr[25] = (ldi1 & 2) >> 1;
2016 + regs->gr[20] = __NR_rt_sigreturn;
2017 + regs->gr[31] = regs->iaoq[1] + 16;
2018 + regs->sr[0] = regs->iasq[1];
2019 + regs->iaoq[0] = 0x100UL;
2020 + regs->iaoq[1] = regs->iaoq[0] + 4;
2021 + regs->iasq[0] = regs->sr[2];
2022 + regs->iasq[1] = regs->sr[2];
2023 + return 2;
2024 + }
2025 + } while (0);
2026 +#endif
2027 +
2028 + return 1;
2029 +}
2030 +
2031 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2032 +{
2033 + unsigned long i;
2034 +
2035 + printk(KERN_ERR "PAX: bytes at PC: ");
2036 + for (i = 0; i < 5; i++) {
2037 + unsigned int c;
2038 + if (get_user(c, (unsigned int *)pc+i))
2039 + printk(KERN_CONT "???????? ");
2040 + else
2041 + printk(KERN_CONT "%08x ", c);
2042 + }
2043 + printk("\n");
2044 +}
2045 +#endif
2046 +
2047 int fixup_exception(struct pt_regs *regs)
2048 {
2049 const struct exception_table_entry *fix;
2050 @@ -192,8 +303,33 @@ good_area:
2051
2052 acc_type = parisc_acctyp(code,regs->iir);
2053
2054 - if ((vma->vm_flags & acc_type) != acc_type)
2055 + if ((vma->vm_flags & acc_type) != acc_type) {
2056 +
2057 +#ifdef CONFIG_PAX_PAGEEXEC
2058 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
2059 + (address & ~3UL) == instruction_pointer(regs))
2060 + {
2061 + up_read(&mm->mmap_sem);
2062 + switch (pax_handle_fetch_fault(regs)) {
2063 +
2064 +#ifdef CONFIG_PAX_EMUPLT
2065 + case 3:
2066 + return;
2067 +#endif
2068 +
2069 +#ifdef CONFIG_PAX_EMUTRAMP
2070 + case 2:
2071 + return;
2072 +#endif
2073 +
2074 + }
2075 + pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
2076 + do_group_exit(SIGKILL);
2077 + }
2078 +#endif
2079 +
2080 goto bad_area;
2081 + }
2082
2083 /*
2084 * If for any reason at all we couldn't handle the fault, make
2085 diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
2086 index 3bf9cca..e7457d0 100644
2087 --- a/arch/powerpc/include/asm/elf.h
2088 +++ b/arch/powerpc/include/asm/elf.h
2089 @@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
2090 the loader. We need to make sure that it is out of the way of the program
2091 that it will "exec", and that there is sufficient room for the brk. */
2092
2093 -extern unsigned long randomize_et_dyn(unsigned long base);
2094 -#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
2095 +#define ELF_ET_DYN_BASE (0x20000000)
2096 +
2097 +#ifdef CONFIG_PAX_ASLR
2098 +#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
2099 +
2100 +#ifdef __powerpc64__
2101 +#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
2102 +#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
2103 +#else
2104 +#define PAX_DELTA_MMAP_LEN 15
2105 +#define PAX_DELTA_STACK_LEN 15
2106 +#endif
2107 +#endif
2108
2109 /*
2110 * Our registers are always unsigned longs, whether we're a 32 bit
2111 @@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
2112 (0x7ff >> (PAGE_SHIFT - 12)) : \
2113 (0x3ffff >> (PAGE_SHIFT - 12)))
2114
2115 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2116 -#define arch_randomize_brk arch_randomize_brk
2117 -
2118 #endif /* __KERNEL__ */
2119
2120 /*
2121 diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
2122 index bca8fdc..61e9580 100644
2123 --- a/arch/powerpc/include/asm/kmap_types.h
2124 +++ b/arch/powerpc/include/asm/kmap_types.h
2125 @@ -27,6 +27,7 @@ enum km_type {
2126 KM_PPC_SYNC_PAGE,
2127 KM_PPC_SYNC_ICACHE,
2128 KM_KDB,
2129 + KM_CLEARPAGE,
2130 KM_TYPE_NR
2131 };
2132
2133 diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
2134 index d4a7f64..451de1c 100644
2135 --- a/arch/powerpc/include/asm/mman.h
2136 +++ b/arch/powerpc/include/asm/mman.h
2137 @@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
2138 }
2139 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
2140
2141 -static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
2142 +static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
2143 {
2144 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
2145 }
2146 diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
2147 index dd9c4fd..a2ced87 100644
2148 --- a/arch/powerpc/include/asm/page.h
2149 +++ b/arch/powerpc/include/asm/page.h
2150 @@ -141,8 +141,9 @@ extern phys_addr_t kernstart_addr;
2151 * and needs to be executable. This means the whole heap ends
2152 * up being executable.
2153 */
2154 -#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2155 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2156 +#define VM_DATA_DEFAULT_FLAGS32 \
2157 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2158 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2159
2160 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2161 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2162 @@ -170,6 +171,9 @@ extern phys_addr_t kernstart_addr;
2163 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
2164 #endif
2165
2166 +#define ktla_ktva(addr) (addr)
2167 +#define ktva_ktla(addr) (addr)
2168 +
2169 /*
2170 * Use the top bit of the higher-level page table entries to indicate whether
2171 * the entries we point to contain hugepages. This works because we know that
2172 diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
2173 index fb40ede..d3ce956 100644
2174 --- a/arch/powerpc/include/asm/page_64.h
2175 +++ b/arch/powerpc/include/asm/page_64.h
2176 @@ -144,15 +144,18 @@ do { \
2177 * stack by default, so in the absence of a PT_GNU_STACK program header
2178 * we turn execute permission off.
2179 */
2180 -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2181 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2182 +#define VM_STACK_DEFAULT_FLAGS32 \
2183 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2184 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2185
2186 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2187 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2188
2189 +#ifndef CONFIG_PAX_PAGEEXEC
2190 #define VM_STACK_DEFAULT_FLAGS \
2191 (is_32bit_task() ? \
2192 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
2193 +#endif
2194
2195 #include <asm-generic/getorder.h>
2196
2197 diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
2198 index 88b0bd9..e32bc67 100644
2199 --- a/arch/powerpc/include/asm/pgtable.h
2200 +++ b/arch/powerpc/include/asm/pgtable.h
2201 @@ -2,6 +2,7 @@
2202 #define _ASM_POWERPC_PGTABLE_H
2203 #ifdef __KERNEL__
2204
2205 +#include <linux/const.h>
2206 #ifndef __ASSEMBLY__
2207 #include <asm/processor.h> /* For TASK_SIZE */
2208 #include <asm/mmu.h>
2209 diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
2210 index 4aad413..85d86bf 100644
2211 --- a/arch/powerpc/include/asm/pte-hash32.h
2212 +++ b/arch/powerpc/include/asm/pte-hash32.h
2213 @@ -21,6 +21,7 @@
2214 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
2215 #define _PAGE_USER 0x004 /* usermode access allowed */
2216 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
2217 +#define _PAGE_EXEC _PAGE_GUARDED
2218 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
2219 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
2220 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
2221 diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
2222 index 559da19..7e5835c 100644
2223 --- a/arch/powerpc/include/asm/reg.h
2224 +++ b/arch/powerpc/include/asm/reg.h
2225 @@ -212,6 +212,7 @@
2226 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
2227 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
2228 #define DSISR_NOHPTE 0x40000000 /* no translation found */
2229 +#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
2230 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
2231 #define DSISR_ISSTORE 0x02000000 /* access was a store */
2232 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
2233 diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
2234 index e30a13d..2b7d994 100644
2235 --- a/arch/powerpc/include/asm/system.h
2236 +++ b/arch/powerpc/include/asm/system.h
2237 @@ -530,7 +530,7 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
2238 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
2239 #endif
2240
2241 -extern unsigned long arch_align_stack(unsigned long sp);
2242 +#define arch_align_stack(x) ((x) & ~0xfUL)
2243
2244 /* Used in very early kernel initialization. */
2245 extern unsigned long reloc_offset(void);
2246 diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
2247 index bd0fb84..a42a14b 100644
2248 --- a/arch/powerpc/include/asm/uaccess.h
2249 +++ b/arch/powerpc/include/asm/uaccess.h
2250 @@ -13,6 +13,8 @@
2251 #define VERIFY_READ 0
2252 #define VERIFY_WRITE 1
2253
2254 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
2255 +
2256 /*
2257 * The fs value determines whether argument validity checking should be
2258 * performed or not. If get_fs() == USER_DS, checking is performed, with
2259 @@ -327,52 +329,6 @@ do { \
2260 extern unsigned long __copy_tofrom_user(void __user *to,
2261 const void __user *from, unsigned long size);
2262
2263 -#ifndef __powerpc64__
2264 -
2265 -static inline unsigned long copy_from_user(void *to,
2266 - const void __user *from, unsigned long n)
2267 -{
2268 - unsigned long over;
2269 -
2270 - if (access_ok(VERIFY_READ, from, n))
2271 - return __copy_tofrom_user((__force void __user *)to, from, n);
2272 - if ((unsigned long)from < TASK_SIZE) {
2273 - over = (unsigned long)from + n - TASK_SIZE;
2274 - return __copy_tofrom_user((__force void __user *)to, from,
2275 - n - over) + over;
2276 - }
2277 - return n;
2278 -}
2279 -
2280 -static inline unsigned long copy_to_user(void __user *to,
2281 - const void *from, unsigned long n)
2282 -{
2283 - unsigned long over;
2284 -
2285 - if (access_ok(VERIFY_WRITE, to, n))
2286 - return __copy_tofrom_user(to, (__force void __user *)from, n);
2287 - if ((unsigned long)to < TASK_SIZE) {
2288 - over = (unsigned long)to + n - TASK_SIZE;
2289 - return __copy_tofrom_user(to, (__force void __user *)from,
2290 - n - over) + over;
2291 - }
2292 - return n;
2293 -}
2294 -
2295 -#else /* __powerpc64__ */
2296 -
2297 -#define __copy_in_user(to, from, size) \
2298 - __copy_tofrom_user((to), (from), (size))
2299 -
2300 -extern unsigned long copy_from_user(void *to, const void __user *from,
2301 - unsigned long n);
2302 -extern unsigned long copy_to_user(void __user *to, const void *from,
2303 - unsigned long n);
2304 -extern unsigned long copy_in_user(void __user *to, const void __user *from,
2305 - unsigned long n);
2306 -
2307 -#endif /* __powerpc64__ */
2308 -
2309 static inline unsigned long __copy_from_user_inatomic(void *to,
2310 const void __user *from, unsigned long n)
2311 {
2312 @@ -396,6 +352,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
2313 if (ret == 0)
2314 return 0;
2315 }
2316 +
2317 + if (!__builtin_constant_p(n))
2318 + check_object_size(to, n, false);
2319 +
2320 return __copy_tofrom_user((__force void __user *)to, from, n);
2321 }
2322
2323 @@ -422,6 +382,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
2324 if (ret == 0)
2325 return 0;
2326 }
2327 +
2328 + if (!__builtin_constant_p(n))
2329 + check_object_size(from, n, true);
2330 +
2331 return __copy_tofrom_user(to, (__force const void __user *)from, n);
2332 }
2333
2334 @@ -439,6 +403,92 @@ static inline unsigned long __copy_to_user(void __user *to,
2335 return __copy_to_user_inatomic(to, from, size);
2336 }
2337
2338 +#ifndef __powerpc64__
2339 +
2340 +static inline unsigned long __must_check copy_from_user(void *to,
2341 + const void __user *from, unsigned long n)
2342 +{
2343 + unsigned long over;
2344 +
2345 + if ((long)n < 0)
2346 + return n;
2347 +
2348 + if (access_ok(VERIFY_READ, from, n)) {
2349 + if (!__builtin_constant_p(n))
2350 + check_object_size(to, n, false);
2351 + return __copy_tofrom_user((__force void __user *)to, from, n);
2352 + }
2353 + if ((unsigned long)from < TASK_SIZE) {
2354 + over = (unsigned long)from + n - TASK_SIZE;
2355 + if (!__builtin_constant_p(n - over))
2356 + check_object_size(to, n - over, false);
2357 + return __copy_tofrom_user((__force void __user *)to, from,
2358 + n - over) + over;
2359 + }
2360 + return n;
2361 +}
2362 +
2363 +static inline unsigned long __must_check copy_to_user(void __user *to,
2364 + const void *from, unsigned long n)
2365 +{
2366 + unsigned long over;
2367 +
2368 + if ((long)n < 0)
2369 + return n;
2370 +
2371 + if (access_ok(VERIFY_WRITE, to, n)) {
2372 + if (!__builtin_constant_p(n))
2373 + check_object_size(from, n, true);
2374 + return __copy_tofrom_user(to, (__force void __user *)from, n);
2375 + }
2376 + if ((unsigned long)to < TASK_SIZE) {
2377 + over = (unsigned long)to + n - TASK_SIZE;
2378 + if (!__builtin_constant_p(n))
2379 + check_object_size(from, n - over, true);
2380 + return __copy_tofrom_user(to, (__force void __user *)from,
2381 + n - over) + over;
2382 + }
2383 + return n;
2384 +}
2385 +
2386 +#else /* __powerpc64__ */
2387 +
2388 +#define __copy_in_user(to, from, size) \
2389 + __copy_tofrom_user((to), (from), (size))
2390 +
2391 +static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2392 +{
2393 + if ((long)n < 0 || n > INT_MAX)
2394 + return n;
2395 +
2396 + if (!__builtin_constant_p(n))
2397 + check_object_size(to, n, false);
2398 +
2399 + if (likely(access_ok(VERIFY_READ, from, n)))
2400 + n = __copy_from_user(to, from, n);
2401 + else
2402 + memset(to, 0, n);
2403 + return n;
2404 +}
2405 +
2406 +static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2407 +{
2408 + if ((long)n < 0 || n > INT_MAX)
2409 + return n;
2410 +
2411 + if (likely(access_ok(VERIFY_WRITE, to, n))) {
2412 + if (!__builtin_constant_p(n))
2413 + check_object_size(from, n, true);
2414 + n = __copy_to_user(to, from, n);
2415 + }
2416 + return n;
2417 +}
2418 +
2419 +extern unsigned long copy_in_user(void __user *to, const void __user *from,
2420 + unsigned long n);
2421 +
2422 +#endif /* __powerpc64__ */
2423 +
2424 extern unsigned long __clear_user(void __user *addr, unsigned long size);
2425
2426 static inline unsigned long clear_user(void __user *addr, unsigned long size)
2427 diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
2428 index 429983c..7af363b 100644
2429 --- a/arch/powerpc/kernel/exceptions-64e.S
2430 +++ b/arch/powerpc/kernel/exceptions-64e.S
2431 @@ -587,6 +587,7 @@ storage_fault_common:
2432 std r14,_DAR(r1)
2433 std r15,_DSISR(r1)
2434 addi r3,r1,STACK_FRAME_OVERHEAD
2435 + bl .save_nvgprs
2436 mr r4,r14
2437 mr r5,r15
2438 ld r14,PACA_EXGEN+EX_R14(r13)
2439 @@ -596,8 +597,7 @@ storage_fault_common:
2440 cmpdi r3,0
2441 bne- 1f
2442 b .ret_from_except_lite
2443 -1: bl .save_nvgprs
2444 - mr r5,r3
2445 +1: mr r5,r3
2446 addi r3,r1,STACK_FRAME_OVERHEAD
2447 ld r4,_DAR(r1)
2448 bl .bad_page_fault
2449 diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
2450 index cf9c69b..ebc9640 100644
2451 --- a/arch/powerpc/kernel/exceptions-64s.S
2452 +++ b/arch/powerpc/kernel/exceptions-64s.S
2453 @@ -1004,10 +1004,10 @@ handle_page_fault:
2454 11: ld r4,_DAR(r1)
2455 ld r5,_DSISR(r1)
2456 addi r3,r1,STACK_FRAME_OVERHEAD
2457 + bl .save_nvgprs
2458 bl .do_page_fault
2459 cmpdi r3,0
2460 beq+ 13f
2461 - bl .save_nvgprs
2462 mr r5,r3
2463 addi r3,r1,STACK_FRAME_OVERHEAD
2464 lwz r4,_DAR(r1)
2465 diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
2466 index 0b6d796..d760ddb 100644
2467 --- a/arch/powerpc/kernel/module_32.c
2468 +++ b/arch/powerpc/kernel/module_32.c
2469 @@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
2470 me->arch.core_plt_section = i;
2471 }
2472 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
2473 - printk("Module doesn't contain .plt or .init.plt sections.\n");
2474 + printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2475 return -ENOEXEC;
2476 }
2477
2478 @@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *location,
2479
2480 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2481 /* Init, or core PLT? */
2482 - if (location >= mod->module_core
2483 - && location < mod->module_core + mod->core_size)
2484 + if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2485 + (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2486 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2487 - else
2488 + else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2489 + (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2490 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2491 + else {
2492 + printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2493 + return ~0UL;
2494 + }
2495
2496 /* Find this entry, or if that fails, the next avail. entry */
2497 while (entry->jump[0]) {
2498 diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
2499 index 6457574..08b28d3 100644
2500 --- a/arch/powerpc/kernel/process.c
2501 +++ b/arch/powerpc/kernel/process.c
2502 @@ -660,8 +660,8 @@ void show_regs(struct pt_regs * regs)
2503 * Lookup NIP late so we have the best change of getting the
2504 * above info out without failing
2505 */
2506 - printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
2507 - printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
2508 + printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
2509 + printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
2510 #endif
2511 show_stack(current, (unsigned long *) regs->gpr[1]);
2512 if (!user_mode(regs))
2513 @@ -1165,10 +1165,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
2514 newsp = stack[0];
2515 ip = stack[STACK_FRAME_LR_SAVE];
2516 if (!firstframe || ip != lr) {
2517 - printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2518 + printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
2519 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2520 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
2521 - printk(" (%pS)",
2522 + printk(" (%pA)",
2523 (void *)current->ret_stack[curr_frame].ret);
2524 curr_frame--;
2525 }
2526 @@ -1188,7 +1188,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
2527 struct pt_regs *regs = (struct pt_regs *)
2528 (sp + STACK_FRAME_OVERHEAD);
2529 lr = regs->link;
2530 - printk("--- Exception: %lx at %pS\n LR = %pS\n",
2531 + printk("--- Exception: %lx at %pA\n LR = %pA\n",
2532 regs->trap, (void *)regs->nip, (void *)lr);
2533 firstframe = 1;
2534 }
2535 @@ -1263,58 +1263,3 @@ void thread_info_cache_init(void)
2536 }
2537
2538 #endif /* THREAD_SHIFT < PAGE_SHIFT */
2539 -
2540 -unsigned long arch_align_stack(unsigned long sp)
2541 -{
2542 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2543 - sp -= get_random_int() & ~PAGE_MASK;
2544 - return sp & ~0xf;
2545 -}
2546 -
2547 -static inline unsigned long brk_rnd(void)
2548 -{
2549 - unsigned long rnd = 0;
2550 -
2551 - /* 8MB for 32bit, 1GB for 64bit */
2552 - if (is_32bit_task())
2553 - rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2554 - else
2555 - rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2556 -
2557 - return rnd << PAGE_SHIFT;
2558 -}
2559 -
2560 -unsigned long arch_randomize_brk(struct mm_struct *mm)
2561 -{
2562 - unsigned long base = mm->brk;
2563 - unsigned long ret;
2564 -
2565 -#ifdef CONFIG_PPC_STD_MMU_64
2566 - /*
2567 - * If we are using 1TB segments and we are allowed to randomise
2568 - * the heap, we can put it above 1TB so it is backed by a 1TB
2569 - * segment. Otherwise the heap will be in the bottom 1TB
2570 - * which always uses 256MB segments and this may result in a
2571 - * performance penalty.
2572 - */
2573 - if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2574 - base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2575 -#endif
2576 -
2577 - ret = PAGE_ALIGN(base + brk_rnd());
2578 -
2579 - if (ret < mm->brk)
2580 - return mm->brk;
2581 -
2582 - return ret;
2583 -}
2584 -
2585 -unsigned long randomize_et_dyn(unsigned long base)
2586 -{
2587 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2588 -
2589 - if (ret < base)
2590 - return base;
2591 -
2592 - return ret;
2593 -}
2594 diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
2595 index 836a5a1..27289a3 100644
2596 --- a/arch/powerpc/kernel/signal_32.c
2597 +++ b/arch/powerpc/kernel/signal_32.c
2598 @@ -859,7 +859,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
2599 /* Save user registers on the stack */
2600 frame = &rt_sf->uc.uc_mcontext;
2601 addr = frame;
2602 - if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2603 + if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2604 if (save_user_regs(regs, frame, 0, 1))
2605 goto badframe;
2606 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2607 diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
2608 index a50b5ec..547078a 100644
2609 --- a/arch/powerpc/kernel/signal_64.c
2610 +++ b/arch/powerpc/kernel/signal_64.c
2611 @@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
2612 current->thread.fpscr.val = 0;
2613
2614 /* Set up to return from userspace. */
2615 - if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2616 + if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2617 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2618 } else {
2619 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2620 diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
2621 index 5459d14..10f8070 100644
2622 --- a/arch/powerpc/kernel/traps.c
2623 +++ b/arch/powerpc/kernel/traps.c
2624 @@ -98,6 +98,8 @@ static void pmac_backlight_unblank(void)
2625 static inline void pmac_backlight_unblank(void) { }
2626 #endif
2627
2628 +extern void gr_handle_kernel_exploit(void);
2629 +
2630 int die(const char *str, struct pt_regs *regs, long err)
2631 {
2632 static struct {
2633 @@ -171,6 +173,8 @@ int die(const char *str, struct pt_regs *regs, long err)
2634 if (panic_on_oops)
2635 panic("Fatal exception");
2636
2637 + gr_handle_kernel_exploit();
2638 +
2639 oops_exit();
2640 do_exit(err);
2641
2642 diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
2643 index 7d14bb6..1305601 100644
2644 --- a/arch/powerpc/kernel/vdso.c
2645 +++ b/arch/powerpc/kernel/vdso.c
2646 @@ -35,6 +35,7 @@
2647 #include <asm/firmware.h>
2648 #include <asm/vdso.h>
2649 #include <asm/vdso_datapage.h>
2650 +#include <asm/mman.h>
2651
2652 #include "setup.h"
2653
2654 @@ -219,7 +220,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
2655 vdso_base = VDSO32_MBASE;
2656 #endif
2657
2658 - current->mm->context.vdso_base = 0;
2659 + current->mm->context.vdso_base = ~0UL;
2660
2661 /* vDSO has a problem and was disabled, just don't "enable" it for the
2662 * process
2663 @@ -239,7 +240,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
2664 vdso_base = get_unmapped_area(NULL, vdso_base,
2665 (vdso_pages << PAGE_SHIFT) +
2666 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2667 - 0, 0);
2668 + 0, MAP_PRIVATE | MAP_EXECUTABLE);
2669 if (IS_ERR_VALUE(vdso_base)) {
2670 rc = vdso_base;
2671 goto fail_mmapsem;
2672 diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
2673 index 5eea6f3..5d10396 100644
2674 --- a/arch/powerpc/lib/usercopy_64.c
2675 +++ b/arch/powerpc/lib/usercopy_64.c
2676 @@ -9,22 +9,6 @@
2677 #include <linux/module.h>
2678 #include <asm/uaccess.h>
2679
2680 -unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2681 -{
2682 - if (likely(access_ok(VERIFY_READ, from, n)))
2683 - n = __copy_from_user(to, from, n);
2684 - else
2685 - memset(to, 0, n);
2686 - return n;
2687 -}
2688 -
2689 -unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2690 -{
2691 - if (likely(access_ok(VERIFY_WRITE, to, n)))
2692 - n = __copy_to_user(to, from, n);
2693 - return n;
2694 -}
2695 -
2696 unsigned long copy_in_user(void __user *to, const void __user *from,
2697 unsigned long n)
2698 {
2699 @@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
2700 return n;
2701 }
2702
2703 -EXPORT_SYMBOL(copy_from_user);
2704 -EXPORT_SYMBOL(copy_to_user);
2705 EXPORT_SYMBOL(copy_in_user);
2706
2707 diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
2708 index 5efe8c9..db9ceef 100644
2709 --- a/arch/powerpc/mm/fault.c
2710 +++ b/arch/powerpc/mm/fault.c
2711 @@ -32,6 +32,10 @@
2712 #include <linux/perf_event.h>
2713 #include <linux/magic.h>
2714 #include <linux/ratelimit.h>
2715 +#include <linux/slab.h>
2716 +#include <linux/pagemap.h>
2717 +#include <linux/compiler.h>
2718 +#include <linux/unistd.h>
2719
2720 #include <asm/firmware.h>
2721 #include <asm/page.h>
2722 @@ -43,6 +47,7 @@
2723 #include <asm/tlbflush.h>
2724 #include <asm/siginfo.h>
2725 #include <mm/mmu_decl.h>
2726 +#include <asm/ptrace.h>
2727
2728 #ifdef CONFIG_KPROBES
2729 static inline int notify_page_fault(struct pt_regs *regs)
2730 @@ -66,6 +71,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
2731 }
2732 #endif
2733
2734 +#ifdef CONFIG_PAX_PAGEEXEC
2735 +/*
2736 + * PaX: decide what to do with offenders (regs->nip = fault address)
2737 + *
2738 + * returns 1 when task should be killed
2739 + */
2740 +static int pax_handle_fetch_fault(struct pt_regs *regs)
2741 +{
2742 + return 1;
2743 +}
2744 +
2745 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2746 +{
2747 + unsigned long i;
2748 +
2749 + printk(KERN_ERR "PAX: bytes at PC: ");
2750 + for (i = 0; i < 5; i++) {
2751 + unsigned int c;
2752 + if (get_user(c, (unsigned int __user *)pc+i))
2753 + printk(KERN_CONT "???????? ");
2754 + else
2755 + printk(KERN_CONT "%08x ", c);
2756 + }
2757 + printk("\n");
2758 +}
2759 +#endif
2760 +
2761 /*
2762 * Check whether the instruction at regs->nip is a store using
2763 * an update addressing form which will update r1.
2764 @@ -136,7 +168,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
2765 * indicate errors in DSISR but can validly be set in SRR1.
2766 */
2767 if (trap == 0x400)
2768 - error_code &= 0x48200000;
2769 + error_code &= 0x58200000;
2770 else
2771 is_write = error_code & DSISR_ISSTORE;
2772 #else
2773 @@ -259,7 +291,7 @@ good_area:
2774 * "undefined". Of those that can be set, this is the only
2775 * one which seems bad.
2776 */
2777 - if (error_code & 0x10000000)
2778 + if (error_code & DSISR_GUARDED)
2779 /* Guarded storage error. */
2780 goto bad_area;
2781 #endif /* CONFIG_8xx */
2782 @@ -274,7 +306,7 @@ good_area:
2783 * processors use the same I/D cache coherency mechanism
2784 * as embedded.
2785 */
2786 - if (error_code & DSISR_PROTFAULT)
2787 + if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
2788 goto bad_area;
2789 #endif /* CONFIG_PPC_STD_MMU */
2790
2791 @@ -343,6 +375,23 @@ bad_area:
2792 bad_area_nosemaphore:
2793 /* User mode accesses cause a SIGSEGV */
2794 if (user_mode(regs)) {
2795 +
2796 +#ifdef CONFIG_PAX_PAGEEXEC
2797 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2798 +#ifdef CONFIG_PPC_STD_MMU
2799 + if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
2800 +#else
2801 + if (is_exec && regs->nip == address) {
2802 +#endif
2803 + switch (pax_handle_fetch_fault(regs)) {
2804 + }
2805 +
2806 + pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
2807 + do_group_exit(SIGKILL);
2808 + }
2809 + }
2810 +#endif
2811 +
2812 _exception(SIGSEGV, regs, code, address);
2813 return 0;
2814 }
2815 diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
2816 index 5a783d8..c23e14b 100644
2817 --- a/arch/powerpc/mm/mmap_64.c
2818 +++ b/arch/powerpc/mm/mmap_64.c
2819 @@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
2820 */
2821 if (mmap_is_legacy()) {
2822 mm->mmap_base = TASK_UNMAPPED_BASE;
2823 +
2824 +#ifdef CONFIG_PAX_RANDMMAP
2825 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2826 + mm->mmap_base += mm->delta_mmap;
2827 +#endif
2828 +
2829 mm->get_unmapped_area = arch_get_unmapped_area;
2830 mm->unmap_area = arch_unmap_area;
2831 } else {
2832 mm->mmap_base = mmap_base();
2833 +
2834 +#ifdef CONFIG_PAX_RANDMMAP
2835 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2836 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2837 +#endif
2838 +
2839 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2840 mm->unmap_area = arch_unmap_area_topdown;
2841 }
2842 diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
2843 index 73709f7..6b90313 100644
2844 --- a/arch/powerpc/mm/slice.c
2845 +++ b/arch/powerpc/mm/slice.c
2846 @@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
2847 if ((mm->task_size - len) < addr)
2848 return 0;
2849 vma = find_vma(mm, addr);
2850 - return (!vma || (addr + len) <= vma->vm_start);
2851 + return check_heap_stack_gap(vma, addr, len);
2852 }
2853
2854 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
2855 @@ -256,7 +256,7 @@ full_search:
2856 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
2857 continue;
2858 }
2859 - if (!vma || addr + len <= vma->vm_start) {
2860 + if (check_heap_stack_gap(vma, addr, len)) {
2861 /*
2862 * Remember the place where we stopped the search:
2863 */
2864 @@ -313,10 +313,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
2865 }
2866 }
2867
2868 - addr = mm->mmap_base;
2869 - while (addr > len) {
2870 + if (mm->mmap_base < len)
2871 + addr = -ENOMEM;
2872 + else
2873 + addr = mm->mmap_base - len;
2874 +
2875 + while (!IS_ERR_VALUE(addr)) {
2876 /* Go down by chunk size */
2877 - addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
2878 + addr = _ALIGN_DOWN(addr, 1ul << pshift);
2879
2880 /* Check for hit with different page size */
2881 mask = slice_range_to_mask(addr, len);
2882 @@ -336,7 +340,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
2883 * return with success:
2884 */
2885 vma = find_vma(mm, addr);
2886 - if (!vma || (addr + len) <= vma->vm_start) {
2887 + if (check_heap_stack_gap(vma, addr, len)) {
2888 /* remember the address as a hint for next time */
2889 if (use_cache)
2890 mm->free_area_cache = addr;
2891 @@ -348,7 +352,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
2892 mm->cached_hole_size = vma->vm_start - addr;
2893
2894 /* try just below the current vma->vm_start */
2895 - addr = vma->vm_start;
2896 + addr = skip_heap_stack_gap(vma, len);
2897 }
2898
2899 /*
2900 @@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
2901 if (fixed && addr > (mm->task_size - len))
2902 return -EINVAL;
2903
2904 +#ifdef CONFIG_PAX_RANDMMAP
2905 + if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
2906 + addr = 0;
2907 +#endif
2908 +
2909 /* If hint, make sure it matches our alignment restrictions */
2910 if (!fixed && addr) {
2911 addr = _ALIGN_UP(addr, 1ul << pshift);
2912 diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
2913 index 547f1a6..3fff354 100644
2914 --- a/arch/s390/include/asm/elf.h
2915 +++ b/arch/s390/include/asm/elf.h
2916 @@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
2917 the loader. We need to make sure that it is out of the way of the program
2918 that it will "exec", and that there is sufficient room for the brk. */
2919
2920 -extern unsigned long randomize_et_dyn(unsigned long base);
2921 -#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
2922 +#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
2923 +
2924 +#ifdef CONFIG_PAX_ASLR
2925 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
2926 +
2927 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2928 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2929 +#endif
2930
2931 /* This yields a mask that user programs can use to figure out what
2932 instruction set this CPU supports. */
2933 @@ -211,7 +217,4 @@ struct linux_binprm;
2934 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
2935 int arch_setup_additional_pages(struct linux_binprm *, int);
2936
2937 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2938 -#define arch_randomize_brk arch_randomize_brk
2939 -
2940 #endif
2941 diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h
2942 index ef573c1..75a1ce6 100644
2943 --- a/arch/s390/include/asm/system.h
2944 +++ b/arch/s390/include/asm/system.h
2945 @@ -262,7 +262,7 @@ extern void (*_machine_restart)(char *command);
2946 extern void (*_machine_halt)(void);
2947 extern void (*_machine_power_off)(void);
2948
2949 -extern unsigned long arch_align_stack(unsigned long sp);
2950 +#define arch_align_stack(x) ((x) & ~0xfUL)
2951
2952 static inline int tprot(unsigned long addr)
2953 {
2954 diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
2955 index 2b23885..e136e31 100644
2956 --- a/arch/s390/include/asm/uaccess.h
2957 +++ b/arch/s390/include/asm/uaccess.h
2958 @@ -235,6 +235,10 @@ static inline unsigned long __must_check
2959 copy_to_user(void __user *to, const void *from, unsigned long n)
2960 {
2961 might_fault();
2962 +
2963 + if ((long)n < 0)
2964 + return n;
2965 +
2966 if (access_ok(VERIFY_WRITE, to, n))
2967 n = __copy_to_user(to, from, n);
2968 return n;
2969 @@ -260,6 +264,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
2970 static inline unsigned long __must_check
2971 __copy_from_user(void *to, const void __user *from, unsigned long n)
2972 {
2973 + if ((long)n < 0)
2974 + return n;
2975 +
2976 if (__builtin_constant_p(n) && (n <= 256))
2977 return uaccess.copy_from_user_small(n, from, to);
2978 else
2979 @@ -294,6 +301,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
2980 unsigned int sz = __compiletime_object_size(to);
2981
2982 might_fault();
2983 +
2984 + if ((long)n < 0)
2985 + return n;
2986 +
2987 if (unlikely(sz != -1 && sz < n)) {
2988 copy_from_user_overflow();
2989 return n;
2990 diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
2991 index dfcb343..eda788a 100644
2992 --- a/arch/s390/kernel/module.c
2993 +++ b/arch/s390/kernel/module.c
2994 @@ -161,11 +161,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
2995
2996 /* Increase core size by size of got & plt and set start
2997 offsets for got and plt. */
2998 - me->core_size = ALIGN(me->core_size, 4);
2999 - me->arch.got_offset = me->core_size;
3000 - me->core_size += me->arch.got_size;
3001 - me->arch.plt_offset = me->core_size;
3002 - me->core_size += me->arch.plt_size;
3003 + me->core_size_rw = ALIGN(me->core_size_rw, 4);
3004 + me->arch.got_offset = me->core_size_rw;
3005 + me->core_size_rw += me->arch.got_size;
3006 + me->arch.plt_offset = me->core_size_rx;
3007 + me->core_size_rx += me->arch.plt_size;
3008 return 0;
3009 }
3010
3011 @@ -242,7 +242,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3012 if (info->got_initialized == 0) {
3013 Elf_Addr *gotent;
3014
3015 - gotent = me->module_core + me->arch.got_offset +
3016 + gotent = me->module_core_rw + me->arch.got_offset +
3017 info->got_offset;
3018 *gotent = val;
3019 info->got_initialized = 1;
3020 @@ -266,7 +266,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3021 else if (r_type == R_390_GOTENT ||
3022 r_type == R_390_GOTPLTENT)
3023 *(unsigned int *) loc =
3024 - (val + (Elf_Addr) me->module_core - loc) >> 1;
3025 + (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
3026 else if (r_type == R_390_GOT64 ||
3027 r_type == R_390_GOTPLT64)
3028 *(unsigned long *) loc = val;
3029 @@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3030 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
3031 if (info->plt_initialized == 0) {
3032 unsigned int *ip;
3033 - ip = me->module_core + me->arch.plt_offset +
3034 + ip = me->module_core_rx + me->arch.plt_offset +
3035 info->plt_offset;
3036 #ifndef CONFIG_64BIT
3037 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
3038 @@ -305,7 +305,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3039 val - loc + 0xffffUL < 0x1ffffeUL) ||
3040 (r_type == R_390_PLT32DBL &&
3041 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
3042 - val = (Elf_Addr) me->module_core +
3043 + val = (Elf_Addr) me->module_core_rx +
3044 me->arch.plt_offset +
3045 info->plt_offset;
3046 val += rela->r_addend - loc;
3047 @@ -327,7 +327,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3048 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
3049 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
3050 val = val + rela->r_addend -
3051 - ((Elf_Addr) me->module_core + me->arch.got_offset);
3052 + ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
3053 if (r_type == R_390_GOTOFF16)
3054 *(unsigned short *) loc = val;
3055 else if (r_type == R_390_GOTOFF32)
3056 @@ -337,7 +337,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3057 break;
3058 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
3059 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
3060 - val = (Elf_Addr) me->module_core + me->arch.got_offset +
3061 + val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
3062 rela->r_addend - loc;
3063 if (r_type == R_390_GOTPC)
3064 *(unsigned int *) loc = val;
3065 diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
3066 index 9451b21..ed8956f 100644
3067 --- a/arch/s390/kernel/process.c
3068 +++ b/arch/s390/kernel/process.c
3069 @@ -321,39 +321,3 @@ unsigned long get_wchan(struct task_struct *p)
3070 }
3071 return 0;
3072 }
3073 -
3074 -unsigned long arch_align_stack(unsigned long sp)
3075 -{
3076 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
3077 - sp -= get_random_int() & ~PAGE_MASK;
3078 - return sp & ~0xf;
3079 -}
3080 -
3081 -static inline unsigned long brk_rnd(void)
3082 -{
3083 - /* 8MB for 32bit, 1GB for 64bit */
3084 - if (is_32bit_task())
3085 - return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
3086 - else
3087 - return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
3088 -}
3089 -
3090 -unsigned long arch_randomize_brk(struct mm_struct *mm)
3091 -{
3092 - unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
3093 -
3094 - if (ret < mm->brk)
3095 - return mm->brk;
3096 - return ret;
3097 -}
3098 -
3099 -unsigned long randomize_et_dyn(unsigned long base)
3100 -{
3101 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
3102 -
3103 - if (!(current->flags & PF_RANDOMIZE))
3104 - return base;
3105 - if (ret < base)
3106 - return base;
3107 - return ret;
3108 -}
3109 diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
3110 index f09c748..cf9ec1d 100644
3111 --- a/arch/s390/mm/mmap.c
3112 +++ b/arch/s390/mm/mmap.c
3113 @@ -92,10 +92,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3114 */
3115 if (mmap_is_legacy()) {
3116 mm->mmap_base = TASK_UNMAPPED_BASE;
3117 +
3118 +#ifdef CONFIG_PAX_RANDMMAP
3119 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3120 + mm->mmap_base += mm->delta_mmap;
3121 +#endif
3122 +
3123 mm->get_unmapped_area = arch_get_unmapped_area;
3124 mm->unmap_area = arch_unmap_area;
3125 } else {
3126 mm->mmap_base = mmap_base();
3127 +
3128 +#ifdef CONFIG_PAX_RANDMMAP
3129 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3130 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3131 +#endif
3132 +
3133 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3134 mm->unmap_area = arch_unmap_area_topdown;
3135 }
3136 @@ -167,10 +179,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3137 */
3138 if (mmap_is_legacy()) {
3139 mm->mmap_base = TASK_UNMAPPED_BASE;
3140 +
3141 +#ifdef CONFIG_PAX_RANDMMAP
3142 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3143 + mm->mmap_base += mm->delta_mmap;
3144 +#endif
3145 +
3146 mm->get_unmapped_area = s390_get_unmapped_area;
3147 mm->unmap_area = arch_unmap_area;
3148 } else {
3149 mm->mmap_base = mmap_base();
3150 +
3151 +#ifdef CONFIG_PAX_RANDMMAP
3152 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3153 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3154 +#endif
3155 +
3156 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
3157 mm->unmap_area = arch_unmap_area_topdown;
3158 }
3159 diff --git a/arch/score/include/asm/system.h b/arch/score/include/asm/system.h
3160 index 589d5c7..669e274 100644
3161 --- a/arch/score/include/asm/system.h
3162 +++ b/arch/score/include/asm/system.h
3163 @@ -17,7 +17,7 @@ do { \
3164 #define finish_arch_switch(prev) do {} while (0)
3165
3166 typedef void (*vi_handler_t)(void);
3167 -extern unsigned long arch_align_stack(unsigned long sp);
3168 +#define arch_align_stack(x) (x)
3169
3170 #define mb() barrier()
3171 #define rmb() barrier()
3172 diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
3173 index 25d0803..d6c8e36 100644
3174 --- a/arch/score/kernel/process.c
3175 +++ b/arch/score/kernel/process.c
3176 @@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_struct *task)
3177
3178 return task_pt_regs(task)->cp0_epc;
3179 }
3180 -
3181 -unsigned long arch_align_stack(unsigned long sp)
3182 -{
3183 - return sp;
3184 -}
3185 diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
3186 index afeb710..d1d1289 100644
3187 --- a/arch/sh/mm/mmap.c
3188 +++ b/arch/sh/mm/mmap.c
3189 @@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
3190 addr = PAGE_ALIGN(addr);
3191
3192 vma = find_vma(mm, addr);
3193 - if (TASK_SIZE - len >= addr &&
3194 - (!vma || addr + len <= vma->vm_start))
3195 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3196 return addr;
3197 }
3198
3199 @@ -106,7 +105,7 @@ full_search:
3200 }
3201 return -ENOMEM;
3202 }
3203 - if (likely(!vma || addr + len <= vma->vm_start)) {
3204 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3205 /*
3206 * Remember the place where we stopped the search:
3207 */
3208 @@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3209 addr = PAGE_ALIGN(addr);
3210
3211 vma = find_vma(mm, addr);
3212 - if (TASK_SIZE - len >= addr &&
3213 - (!vma || addr + len <= vma->vm_start))
3214 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3215 return addr;
3216 }
3217
3218 @@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3219 /* make sure it can fit in the remaining address space */
3220 if (likely(addr > len)) {
3221 vma = find_vma(mm, addr-len);
3222 - if (!vma || addr <= vma->vm_start) {
3223 + if (check_heap_stack_gap(vma, addr - len, len)) {
3224 /* remember the address as a hint for next time */
3225 return (mm->free_area_cache = addr-len);
3226 }
3227 @@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3228 if (unlikely(mm->mmap_base < len))
3229 goto bottomup;
3230
3231 - addr = mm->mmap_base-len;
3232 - if (do_colour_align)
3233 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3234 + addr = mm->mmap_base - len;
3235
3236 do {
3237 + if (do_colour_align)
3238 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3239 /*
3240 * Lookup failure means no vma is above this address,
3241 * else if new region fits below vma->vm_start,
3242 * return with success:
3243 */
3244 vma = find_vma(mm, addr);
3245 - if (likely(!vma || addr+len <= vma->vm_start)) {
3246 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3247 /* remember the address as a hint for next time */
3248 return (mm->free_area_cache = addr);
3249 }
3250 @@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3251 mm->cached_hole_size = vma->vm_start - addr;
3252
3253 /* try just below the current vma->vm_start */
3254 - addr = vma->vm_start-len;
3255 - if (do_colour_align)
3256 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3257 - } while (likely(len < vma->vm_start));
3258 + addr = skip_heap_stack_gap(vma, len);
3259 + } while (!IS_ERR_VALUE(addr));
3260
3261 bottomup:
3262 /*
3263 diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
3264 index ad1fb5d..fc5315b 100644
3265 --- a/arch/sparc/Makefile
3266 +++ b/arch/sparc/Makefile
3267 @@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/
3268 # Export what is needed by arch/sparc/boot/Makefile
3269 export VMLINUX_INIT VMLINUX_MAIN
3270 VMLINUX_INIT := $(head-y) $(init-y)
3271 -VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
3272 +VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
3273 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
3274 VMLINUX_MAIN += $(drivers-y) $(net-y)
3275
3276 diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
3277 index 9f421df..b81fc12 100644
3278 --- a/arch/sparc/include/asm/atomic_64.h
3279 +++ b/arch/sparc/include/asm/atomic_64.h
3280 @@ -14,18 +14,40 @@
3281 #define ATOMIC64_INIT(i) { (i) }
3282
3283 #define atomic_read(v) (*(volatile int *)&(v)->counter)
3284 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
3285 +{
3286 + return v->counter;
3287 +}
3288 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
3289 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
3290 +{
3291 + return v->counter;
3292 +}
3293
3294 #define atomic_set(v, i) (((v)->counter) = i)
3295 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
3296 +{
3297 + v->counter = i;
3298 +}
3299 #define atomic64_set(v, i) (((v)->counter) = i)
3300 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
3301 +{
3302 + v->counter = i;
3303 +}
3304
3305 extern void atomic_add(int, atomic_t *);
3306 +extern void atomic_add_unchecked(int, atomic_unchecked_t *);
3307 extern void atomic64_add(long, atomic64_t *);
3308 +extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
3309 extern void atomic_sub(int, atomic_t *);
3310 +extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
3311 extern void atomic64_sub(long, atomic64_t *);
3312 +extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
3313
3314 extern int atomic_add_ret(int, atomic_t *);
3315 +extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
3316 extern long atomic64_add_ret(long, atomic64_t *);
3317 +extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
3318 extern int atomic_sub_ret(int, atomic_t *);
3319 extern long atomic64_sub_ret(long, atomic64_t *);
3320
3321 @@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
3322 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
3323
3324 #define atomic_inc_return(v) atomic_add_ret(1, v)
3325 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
3326 +{
3327 + return atomic_add_ret_unchecked(1, v);
3328 +}
3329 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
3330 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
3331 +{
3332 + return atomic64_add_ret_unchecked(1, v);
3333 +}
3334
3335 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
3336 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
3337
3338 #define atomic_add_return(i, v) atomic_add_ret(i, v)
3339 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
3340 +{
3341 + return atomic_add_ret_unchecked(i, v);
3342 +}
3343 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
3344 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
3345 +{
3346 + return atomic64_add_ret_unchecked(i, v);
3347 +}
3348
3349 /*
3350 * atomic_inc_and_test - increment and test
3351 @@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
3352 * other cases.
3353 */
3354 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
3355 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
3356 +{
3357 + return atomic_inc_return_unchecked(v) == 0;
3358 +}
3359 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
3360
3361 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
3362 @@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
3363 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
3364
3365 #define atomic_inc(v) atomic_add(1, v)
3366 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
3367 +{
3368 + atomic_add_unchecked(1, v);
3369 +}
3370 #define atomic64_inc(v) atomic64_add(1, v)
3371 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
3372 +{
3373 + atomic64_add_unchecked(1, v);
3374 +}
3375
3376 #define atomic_dec(v) atomic_sub(1, v)
3377 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
3378 +{
3379 + atomic_sub_unchecked(1, v);
3380 +}
3381 #define atomic64_dec(v) atomic64_sub(1, v)
3382 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
3383 +{
3384 + atomic64_sub_unchecked(1, v);
3385 +}
3386
3387 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
3388 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
3389
3390 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
3391 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
3392 +{
3393 + return cmpxchg(&v->counter, old, new);
3394 +}
3395 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
3396 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
3397 +{
3398 + return xchg(&v->counter, new);
3399 +}
3400
3401 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
3402 {
3403 - int c, old;
3404 + int c, old, new;
3405 c = atomic_read(v);
3406 for (;;) {
3407 - if (unlikely(c == (u)))
3408 + if (unlikely(c == u))
3409 break;
3410 - old = atomic_cmpxchg((v), c, c + (a));
3411 +
3412 + asm volatile("addcc %2, %0, %0\n"
3413 +
3414 +#ifdef CONFIG_PAX_REFCOUNT
3415 + "tvs %%icc, 6\n"
3416 +#endif
3417 +
3418 + : "=r" (new)
3419 + : "0" (c), "ir" (a)
3420 + : "cc");
3421 +
3422 + old = atomic_cmpxchg(v, c, new);
3423 if (likely(old == c))
3424 break;
3425 c = old;
3426 @@ -89,20 +166,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
3427 #define atomic64_cmpxchg(v, o, n) \
3428 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
3429 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
3430 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
3431 +{
3432 + return xchg(&v->counter, new);
3433 +}
3434
3435 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
3436 {
3437 - long c, old;
3438 + long c, old, new;
3439 c = atomic64_read(v);
3440 for (;;) {
3441 - if (unlikely(c == (u)))
3442 + if (unlikely(c == u))
3443 break;
3444 - old = atomic64_cmpxchg((v), c, c + (a));
3445 +
3446 + asm volatile("addcc %2, %0, %0\n"
3447 +
3448 +#ifdef CONFIG_PAX_REFCOUNT
3449 + "tvs %%xcc, 6\n"
3450 +#endif
3451 +
3452 + : "=r" (new)
3453 + : "0" (c), "ir" (a)
3454 + : "cc");
3455 +
3456 + old = atomic64_cmpxchg(v, c, new);
3457 if (likely(old == c))
3458 break;
3459 c = old;
3460 }
3461 - return c != (u);
3462 + return c != u;
3463 }
3464
3465 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3466 diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
3467 index 69358b5..17b4745 100644
3468 --- a/arch/sparc/include/asm/cache.h
3469 +++ b/arch/sparc/include/asm/cache.h
3470 @@ -10,7 +10,7 @@
3471 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
3472
3473 #define L1_CACHE_SHIFT 5
3474 -#define L1_CACHE_BYTES 32
3475 +#define L1_CACHE_BYTES 32UL
3476
3477 #ifdef CONFIG_SPARC32
3478 #define SMP_CACHE_BYTES_SHIFT 5
3479 diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
3480 index 4269ca6..e3da77f 100644
3481 --- a/arch/sparc/include/asm/elf_32.h
3482 +++ b/arch/sparc/include/asm/elf_32.h
3483 @@ -114,6 +114,13 @@ typedef struct {
3484
3485 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3486
3487 +#ifdef CONFIG_PAX_ASLR
3488 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
3489 +
3490 +#define PAX_DELTA_MMAP_LEN 16
3491 +#define PAX_DELTA_STACK_LEN 16
3492 +#endif
3493 +
3494 /* This yields a mask that user programs can use to figure out what
3495 instruction set this cpu supports. This can NOT be done in userspace
3496 on Sparc. */
3497 diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
3498 index 7df8b7f..4946269 100644
3499 --- a/arch/sparc/include/asm/elf_64.h
3500 +++ b/arch/sparc/include/asm/elf_64.h
3501 @@ -180,6 +180,13 @@ typedef struct {
3502 #define ELF_ET_DYN_BASE 0x0000010000000000UL
3503 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3504
3505 +#ifdef CONFIG_PAX_ASLR
3506 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3507 +
3508 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3509 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
3510 +#endif
3511 +
3512 extern unsigned long sparc64_elf_hwcap;
3513 #define ELF_HWCAP sparc64_elf_hwcap
3514
3515 diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
3516 index a790cc6..091ed94 100644
3517 --- a/arch/sparc/include/asm/pgtable_32.h
3518 +++ b/arch/sparc/include/asm/pgtable_32.h
3519 @@ -45,6 +45,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3520 BTFIXUPDEF_INT(page_none)
3521 BTFIXUPDEF_INT(page_copy)
3522 BTFIXUPDEF_INT(page_readonly)
3523 +
3524 +#ifdef CONFIG_PAX_PAGEEXEC
3525 +BTFIXUPDEF_INT(page_shared_noexec)
3526 +BTFIXUPDEF_INT(page_copy_noexec)
3527 +BTFIXUPDEF_INT(page_readonly_noexec)
3528 +#endif
3529 +
3530 BTFIXUPDEF_INT(page_kernel)
3531
3532 #define PMD_SHIFT SUN4C_PMD_SHIFT
3533 @@ -66,6 +73,16 @@ extern pgprot_t PAGE_SHARED;
3534 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
3535 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
3536
3537 +#ifdef CONFIG_PAX_PAGEEXEC
3538 +extern pgprot_t PAGE_SHARED_NOEXEC;
3539 +# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
3540 +# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
3541 +#else
3542 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
3543 +# define PAGE_COPY_NOEXEC PAGE_COPY
3544 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
3545 +#endif
3546 +
3547 extern unsigned long page_kernel;
3548
3549 #ifdef MODULE
3550 diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
3551 index f6ae2b2..b03ffc7 100644
3552 --- a/arch/sparc/include/asm/pgtsrmmu.h
3553 +++ b/arch/sparc/include/asm/pgtsrmmu.h
3554 @@ -115,6 +115,13 @@
3555 SRMMU_EXEC | SRMMU_REF)
3556 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
3557 SRMMU_EXEC | SRMMU_REF)
3558 +
3559 +#ifdef CONFIG_PAX_PAGEEXEC
3560 +#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
3561 +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3562 +#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3563 +#endif
3564 +
3565 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
3566 SRMMU_DIRTY | SRMMU_REF)
3567
3568 diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
3569 index 9689176..63c18ea 100644
3570 --- a/arch/sparc/include/asm/spinlock_64.h
3571 +++ b/arch/sparc/include/asm/spinlock_64.h
3572 @@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
3573
3574 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
3575
3576 -static void inline arch_read_lock(arch_rwlock_t *lock)
3577 +static inline void arch_read_lock(arch_rwlock_t *lock)
3578 {
3579 unsigned long tmp1, tmp2;
3580
3581 __asm__ __volatile__ (
3582 "1: ldsw [%2], %0\n"
3583 " brlz,pn %0, 2f\n"
3584 -"4: add %0, 1, %1\n"
3585 +"4: addcc %0, 1, %1\n"
3586 +
3587 +#ifdef CONFIG_PAX_REFCOUNT
3588 +" tvs %%icc, 6\n"
3589 +#endif
3590 +
3591 " cas [%2], %0, %1\n"
3592 " cmp %0, %1\n"
3593 " bne,pn %%icc, 1b\n"
3594 @@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
3595 " .previous"
3596 : "=&r" (tmp1), "=&r" (tmp2)
3597 : "r" (lock)
3598 - : "memory");
3599 + : "memory", "cc");
3600 }
3601
3602 -static int inline arch_read_trylock(arch_rwlock_t *lock)
3603 +static inline int arch_read_trylock(arch_rwlock_t *lock)
3604 {
3605 int tmp1, tmp2;
3606
3607 @@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
3608 "1: ldsw [%2], %0\n"
3609 " brlz,a,pn %0, 2f\n"
3610 " mov 0, %0\n"
3611 -" add %0, 1, %1\n"
3612 +" addcc %0, 1, %1\n"
3613 +
3614 +#ifdef CONFIG_PAX_REFCOUNT
3615 +" tvs %%icc, 6\n"
3616 +#endif
3617 +
3618 " cas [%2], %0, %1\n"
3619 " cmp %0, %1\n"
3620 " bne,pn %%icc, 1b\n"
3621 @@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
3622 return tmp1;
3623 }
3624
3625 -static void inline arch_read_unlock(arch_rwlock_t *lock)
3626 +static inline void arch_read_unlock(arch_rwlock_t *lock)
3627 {
3628 unsigned long tmp1, tmp2;
3629
3630 __asm__ __volatile__(
3631 "1: lduw [%2], %0\n"
3632 -" sub %0, 1, %1\n"
3633 +" subcc %0, 1, %1\n"
3634 +
3635 +#ifdef CONFIG_PAX_REFCOUNT
3636 +" tvs %%icc, 6\n"
3637 +#endif
3638 +
3639 " cas [%2], %0, %1\n"
3640 " cmp %0, %1\n"
3641 " bne,pn %%xcc, 1b\n"
3642 @@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
3643 : "memory");
3644 }
3645
3646 -static void inline arch_write_lock(arch_rwlock_t *lock)
3647 +static inline void arch_write_lock(arch_rwlock_t *lock)
3648 {
3649 unsigned long mask, tmp1, tmp2;
3650
3651 @@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
3652 : "memory");
3653 }
3654
3655 -static void inline arch_write_unlock(arch_rwlock_t *lock)
3656 +static inline void arch_write_unlock(arch_rwlock_t *lock)
3657 {
3658 __asm__ __volatile__(
3659 " stw %%g0, [%0]"
3660 @@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
3661 : "memory");
3662 }
3663
3664 -static int inline arch_write_trylock(arch_rwlock_t *lock)
3665 +static inline int arch_write_trylock(arch_rwlock_t *lock)
3666 {
3667 unsigned long mask, tmp1, tmp2, result;
3668
3669 diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
3670 index fa57532..e1a4c53 100644
3671 --- a/arch/sparc/include/asm/thread_info_32.h
3672 +++ b/arch/sparc/include/asm/thread_info_32.h
3673 @@ -50,6 +50,8 @@ struct thread_info {
3674 unsigned long w_saved;
3675
3676 struct restart_block restart_block;
3677 +
3678 + unsigned long lowest_stack;
3679 };
3680
3681 /*
3682 diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
3683 index 60d86be..952dea1 100644
3684 --- a/arch/sparc/include/asm/thread_info_64.h
3685 +++ b/arch/sparc/include/asm/thread_info_64.h
3686 @@ -63,6 +63,8 @@ struct thread_info {
3687 struct pt_regs *kern_una_regs;
3688 unsigned int kern_una_insn;
3689
3690 + unsigned long lowest_stack;
3691 +
3692 unsigned long fpregs[0] __attribute__ ((aligned(64)));
3693 };
3694
3695 diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
3696 index e88fbe5..96b0ce5 100644
3697 --- a/arch/sparc/include/asm/uaccess.h
3698 +++ b/arch/sparc/include/asm/uaccess.h
3699 @@ -1,5 +1,13 @@
3700 #ifndef ___ASM_SPARC_UACCESS_H
3701 #define ___ASM_SPARC_UACCESS_H
3702 +
3703 +#ifdef __KERNEL__
3704 +#ifndef __ASSEMBLY__
3705 +#include <linux/types.h>
3706 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
3707 +#endif
3708 +#endif
3709 +
3710 #if defined(__sparc__) && defined(__arch64__)
3711 #include <asm/uaccess_64.h>
3712 #else
3713 diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
3714 index 8303ac4..07f333d 100644
3715 --- a/arch/sparc/include/asm/uaccess_32.h
3716 +++ b/arch/sparc/include/asm/uaccess_32.h
3717 @@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
3718
3719 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3720 {
3721 - if (n && __access_ok((unsigned long) to, n))
3722 + if ((long)n < 0)
3723 + return n;
3724 +
3725 + if (n && __access_ok((unsigned long) to, n)) {
3726 + if (!__builtin_constant_p(n))
3727 + check_object_size(from, n, true);
3728 return __copy_user(to, (__force void __user *) from, n);
3729 - else
3730 + } else
3731 return n;
3732 }
3733
3734 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
3735 {
3736 + if ((long)n < 0)
3737 + return n;
3738 +
3739 + if (!__builtin_constant_p(n))
3740 + check_object_size(from, n, true);
3741 +
3742 return __copy_user(to, (__force void __user *) from, n);
3743 }
3744
3745 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
3746 {
3747 - if (n && __access_ok((unsigned long) from, n))
3748 + if ((long)n < 0)
3749 + return n;
3750 +
3751 + if (n && __access_ok((unsigned long) from, n)) {
3752 + if (!__builtin_constant_p(n))
3753 + check_object_size(to, n, false);
3754 return __copy_user((__force void __user *) to, from, n);
3755 - else
3756 + } else
3757 return n;
3758 }
3759
3760 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
3761 {
3762 + if ((long)n < 0)
3763 + return n;
3764 +
3765 return __copy_user((__force void __user *) to, from, n);
3766 }
3767
3768 diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
3769 index 3e1449f..5293a0e 100644
3770 --- a/arch/sparc/include/asm/uaccess_64.h
3771 +++ b/arch/sparc/include/asm/uaccess_64.h
3772 @@ -10,6 +10,7 @@
3773 #include <linux/compiler.h>
3774 #include <linux/string.h>
3775 #include <linux/thread_info.h>
3776 +#include <linux/kernel.h>
3777 #include <asm/asi.h>
3778 #include <asm/system.h>
3779 #include <asm/spitfire.h>
3780 @@ -213,8 +214,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
3781 static inline unsigned long __must_check
3782 copy_from_user(void *to, const void __user *from, unsigned long size)
3783 {
3784 - unsigned long ret = ___copy_from_user(to, from, size);
3785 + unsigned long ret;
3786
3787 + if ((long)size < 0 || size > INT_MAX)
3788 + return size;
3789 +
3790 + if (!__builtin_constant_p(size))
3791 + check_object_size(to, size, false);
3792 +
3793 + ret = ___copy_from_user(to, from, size);
3794 if (unlikely(ret))
3795 ret = copy_from_user_fixup(to, from, size);
3796
3797 @@ -230,8 +238,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
3798 static inline unsigned long __must_check
3799 copy_to_user(void __user *to, const void *from, unsigned long size)
3800 {
3801 - unsigned long ret = ___copy_to_user(to, from, size);
3802 + unsigned long ret;
3803
3804 + if ((long)size < 0 || size > INT_MAX)
3805 + return size;
3806 +
3807 + if (!__builtin_constant_p(size))
3808 + check_object_size(from, size, true);
3809 +
3810 + ret = ___copy_to_user(to, from, size);
3811 if (unlikely(ret))
3812 ret = copy_to_user_fixup(to, from, size);
3813 return ret;
3814 diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
3815 index cb85458..e063f17 100644
3816 --- a/arch/sparc/kernel/Makefile
3817 +++ b/arch/sparc/kernel/Makefile
3818 @@ -3,7 +3,7 @@
3819 #
3820
3821 asflags-y := -ansi
3822 -ccflags-y := -Werror
3823 +#ccflags-y := -Werror
3824
3825 extra-y := head_$(BITS).o
3826 extra-y += init_task.o
3827 diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
3828 index f793742..4d880af 100644
3829 --- a/arch/sparc/kernel/process_32.c
3830 +++ b/arch/sparc/kernel/process_32.c
3831 @@ -204,7 +204,7 @@ void __show_backtrace(unsigned long fp)
3832 rw->ins[4], rw->ins[5],
3833 rw->ins[6],
3834 rw->ins[7]);
3835 - printk("%pS\n", (void *) rw->ins[7]);
3836 + printk("%pA\n", (void *) rw->ins[7]);
3837 rw = (struct reg_window32 *) rw->ins[6];
3838 }
3839 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
3840 @@ -271,14 +271,14 @@ void show_regs(struct pt_regs *r)
3841
3842 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
3843 r->psr, r->pc, r->npc, r->y, print_tainted());
3844 - printk("PC: <%pS>\n", (void *) r->pc);
3845 + printk("PC: <%pA>\n", (void *) r->pc);
3846 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3847 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
3848 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
3849 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3850 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
3851 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
3852 - printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
3853 + printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
3854
3855 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3856 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
3857 @@ -313,7 +313,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
3858 rw = (struct reg_window32 *) fp;
3859 pc = rw->ins[7];
3860 printk("[%08lx : ", pc);
3861 - printk("%pS ] ", (void *) pc);
3862 + printk("%pA ] ", (void *) pc);
3863 fp = rw->ins[6];
3864 } while (++count < 16);
3865 printk("\n");
3866 diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
3867 index 3739a06..48b2ff0 100644
3868 --- a/arch/sparc/kernel/process_64.c
3869 +++ b/arch/sparc/kernel/process_64.c
3870 @@ -180,14 +180,14 @@ static void show_regwindow(struct pt_regs *regs)
3871 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
3872 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
3873 if (regs->tstate & TSTATE_PRIV)
3874 - printk("I7: <%pS>\n", (void *) rwk->ins[7]);
3875 + printk("I7: <%pA>\n", (void *) rwk->ins[7]);
3876 }
3877
3878 void show_regs(struct pt_regs *regs)
3879 {
3880 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
3881 regs->tpc, regs->tnpc, regs->y, print_tainted());
3882 - printk("TPC: <%pS>\n", (void *) regs->tpc);
3883 + printk("TPC: <%pA>\n", (void *) regs->tpc);
3884 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
3885 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
3886 regs->u_regs[3]);
3887 @@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
3888 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
3889 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
3890 regs->u_regs[15]);
3891 - printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
3892 + printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
3893 show_regwindow(regs);
3894 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
3895 }
3896 @@ -285,7 +285,7 @@ void arch_trigger_all_cpu_backtrace(void)
3897 ((tp && tp->task) ? tp->task->pid : -1));
3898
3899 if (gp->tstate & TSTATE_PRIV) {
3900 - printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
3901 + printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
3902 (void *) gp->tpc,
3903 (void *) gp->o7,
3904 (void *) gp->i7,
3905 diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
3906 index 42b282f..28ce9f2 100644
3907 --- a/arch/sparc/kernel/sys_sparc_32.c
3908 +++ b/arch/sparc/kernel/sys_sparc_32.c
3909 @@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
3910 if (ARCH_SUN4C && len > 0x20000000)
3911 return -ENOMEM;
3912 if (!addr)
3913 - addr = TASK_UNMAPPED_BASE;
3914 + addr = current->mm->mmap_base;
3915
3916 if (flags & MAP_SHARED)
3917 addr = COLOUR_ALIGN(addr);
3918 @@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
3919 }
3920 if (TASK_SIZE - PAGE_SIZE - len < addr)
3921 return -ENOMEM;
3922 - if (!vmm || addr + len <= vmm->vm_start)
3923 + if (check_heap_stack_gap(vmm, addr, len))
3924 return addr;
3925 addr = vmm->vm_end;
3926 if (flags & MAP_SHARED)
3927 diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
3928 index 441521a..b767073 100644
3929 --- a/arch/sparc/kernel/sys_sparc_64.c
3930 +++ b/arch/sparc/kernel/sys_sparc_64.c
3931 @@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
3932 /* We do not accept a shared mapping if it would violate
3933 * cache aliasing constraints.
3934 */
3935 - if ((flags & MAP_SHARED) &&
3936 + if ((filp || (flags & MAP_SHARED)) &&
3937 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3938 return -EINVAL;
3939 return addr;
3940 @@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
3941 if (filp || (flags & MAP_SHARED))
3942 do_color_align = 1;
3943
3944 +#ifdef CONFIG_PAX_RANDMMAP
3945 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
3946 +#endif
3947 +
3948 if (addr) {
3949 if (do_color_align)
3950 addr = COLOUR_ALIGN(addr, pgoff);
3951 @@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
3952 addr = PAGE_ALIGN(addr);
3953
3954 vma = find_vma(mm, addr);
3955 - if (task_size - len >= addr &&
3956 - (!vma || addr + len <= vma->vm_start))
3957 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3958 return addr;
3959 }
3960
3961 if (len > mm->cached_hole_size) {
3962 - start_addr = addr = mm->free_area_cache;
3963 + start_addr = addr = mm->free_area_cache;
3964 } else {
3965 - start_addr = addr = TASK_UNMAPPED_BASE;
3966 + start_addr = addr = mm->mmap_base;
3967 mm->cached_hole_size = 0;
3968 }
3969
3970 @@ -174,14 +177,14 @@ full_search:
3971 vma = find_vma(mm, VA_EXCLUDE_END);
3972 }
3973 if (unlikely(task_size < addr)) {
3974 - if (start_addr != TASK_UNMAPPED_BASE) {
3975 - start_addr = addr = TASK_UNMAPPED_BASE;
3976 + if (start_addr != mm->mmap_base) {
3977 + start_addr = addr = mm->mmap_base;
3978 mm->cached_hole_size = 0;
3979 goto full_search;
3980 }
3981 return -ENOMEM;
3982 }
3983 - if (likely(!vma || addr + len <= vma->vm_start)) {
3984 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3985 /*
3986 * Remember the place where we stopped the search:
3987 */
3988 @@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3989 /* We do not accept a shared mapping if it would violate
3990 * cache aliasing constraints.
3991 */
3992 - if ((flags & MAP_SHARED) &&
3993 + if ((filp || (flags & MAP_SHARED)) &&
3994 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3995 return -EINVAL;
3996 return addr;
3997 @@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3998 addr = PAGE_ALIGN(addr);
3999
4000 vma = find_vma(mm, addr);
4001 - if (task_size - len >= addr &&
4002 - (!vma || addr + len <= vma->vm_start))
4003 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4004 return addr;
4005 }
4006
4007 @@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4008 /* make sure it can fit in the remaining address space */
4009 if (likely(addr > len)) {
4010 vma = find_vma(mm, addr-len);
4011 - if (!vma || addr <= vma->vm_start) {
4012 + if (check_heap_stack_gap(vma, addr - len, len)) {
4013 /* remember the address as a hint for next time */
4014 return (mm->free_area_cache = addr-len);
4015 }
4016 @@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4017 if (unlikely(mm->mmap_base < len))
4018 goto bottomup;
4019
4020 - addr = mm->mmap_base-len;
4021 - if (do_color_align)
4022 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4023 + addr = mm->mmap_base - len;
4024
4025 do {
4026 + if (do_color_align)
4027 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4028 /*
4029 * Lookup failure means no vma is above this address,
4030 * else if new region fits below vma->vm_start,
4031 * return with success:
4032 */
4033 vma = find_vma(mm, addr);
4034 - if (likely(!vma || addr+len <= vma->vm_start)) {
4035 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4036 /* remember the address as a hint for next time */
4037 return (mm->free_area_cache = addr);
4038 }
4039 @@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4040 mm->cached_hole_size = vma->vm_start - addr;
4041
4042 /* try just below the current vma->vm_start */
4043 - addr = vma->vm_start-len;
4044 - if (do_color_align)
4045 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4046 - } while (likely(len < vma->vm_start));
4047 + addr = skip_heap_stack_gap(vma, len);
4048 + } while (!IS_ERR_VALUE(addr));
4049
4050 bottomup:
4051 /*
4052 @@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4053 gap == RLIM_INFINITY ||
4054 sysctl_legacy_va_layout) {
4055 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4056 +
4057 +#ifdef CONFIG_PAX_RANDMMAP
4058 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4059 + mm->mmap_base += mm->delta_mmap;
4060 +#endif
4061 +
4062 mm->get_unmapped_area = arch_get_unmapped_area;
4063 mm->unmap_area = arch_unmap_area;
4064 } else {
4065 @@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4066 gap = (task_size / 6 * 5);
4067
4068 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
4069 +
4070 +#ifdef CONFIG_PAX_RANDMMAP
4071 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4072 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4073 +#endif
4074 +
4075 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4076 mm->unmap_area = arch_unmap_area_topdown;
4077 }
4078 diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
4079 index 591f20c..0f1b925 100644
4080 --- a/arch/sparc/kernel/traps_32.c
4081 +++ b/arch/sparc/kernel/traps_32.c
4082 @@ -45,6 +45,8 @@ static void instruction_dump(unsigned long *pc)
4083 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
4084 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
4085
4086 +extern void gr_handle_kernel_exploit(void);
4087 +
4088 void die_if_kernel(char *str, struct pt_regs *regs)
4089 {
4090 static int die_counter;
4091 @@ -77,15 +79,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
4092 count++ < 30 &&
4093 (((unsigned long) rw) >= PAGE_OFFSET) &&
4094 !(((unsigned long) rw) & 0x7)) {
4095 - printk("Caller[%08lx]: %pS\n", rw->ins[7],
4096 + printk("Caller[%08lx]: %pA\n", rw->ins[7],
4097 (void *) rw->ins[7]);
4098 rw = (struct reg_window32 *)rw->ins[6];
4099 }
4100 }
4101 printk("Instruction DUMP:");
4102 instruction_dump ((unsigned long *) regs->pc);
4103 - if(regs->psr & PSR_PS)
4104 + if(regs->psr & PSR_PS) {
4105 + gr_handle_kernel_exploit();
4106 do_exit(SIGKILL);
4107 + }
4108 do_exit(SIGSEGV);
4109 }
4110
4111 diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
4112 index 0cbdaa4..438e4c9 100644
4113 --- a/arch/sparc/kernel/traps_64.c
4114 +++ b/arch/sparc/kernel/traps_64.c
4115 @@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
4116 i + 1,
4117 p->trapstack[i].tstate, p->trapstack[i].tpc,
4118 p->trapstack[i].tnpc, p->trapstack[i].tt);
4119 - printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
4120 + printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
4121 }
4122 }
4123
4124 @@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
4125
4126 lvl -= 0x100;
4127 if (regs->tstate & TSTATE_PRIV) {
4128 +
4129 +#ifdef CONFIG_PAX_REFCOUNT
4130 + if (lvl == 6)
4131 + pax_report_refcount_overflow(regs);
4132 +#endif
4133 +
4134 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
4135 die_if_kernel(buffer, regs);
4136 }
4137 @@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
4138 void bad_trap_tl1(struct pt_regs *regs, long lvl)
4139 {
4140 char buffer[32];
4141 -
4142 +
4143 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
4144 0, lvl, SIGTRAP) == NOTIFY_STOP)
4145 return;
4146
4147 +#ifdef CONFIG_PAX_REFCOUNT
4148 + if (lvl == 6)
4149 + pax_report_refcount_overflow(regs);
4150 +#endif
4151 +
4152 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
4153
4154 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
4155 @@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
4156 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
4157 printk("%s" "ERROR(%d): ",
4158 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
4159 - printk("TPC<%pS>\n", (void *) regs->tpc);
4160 + printk("TPC<%pA>\n", (void *) regs->tpc);
4161 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
4162 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
4163 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
4164 @@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
4165 smp_processor_id(),
4166 (type & 0x1) ? 'I' : 'D',
4167 regs->tpc);
4168 - printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
4169 + printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
4170 panic("Irrecoverable Cheetah+ parity error.");
4171 }
4172
4173 @@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
4174 smp_processor_id(),
4175 (type & 0x1) ? 'I' : 'D',
4176 regs->tpc);
4177 - printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
4178 + printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
4179 }
4180
4181 struct sun4v_error_entry {
4182 @@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
4183
4184 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
4185 regs->tpc, tl);
4186 - printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
4187 + printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
4188 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4189 - printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
4190 + printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
4191 (void *) regs->u_regs[UREG_I7]);
4192 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
4193 "pte[%lx] error[%lx]\n",
4194 @@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
4195
4196 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
4197 regs->tpc, tl);
4198 - printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
4199 + printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
4200 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4201 - printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
4202 + printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
4203 (void *) regs->u_regs[UREG_I7]);
4204 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
4205 "pte[%lx] error[%lx]\n",
4206 @@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
4207 fp = (unsigned long)sf->fp + STACK_BIAS;
4208 }
4209
4210 - printk(" [%016lx] %pS\n", pc, (void *) pc);
4211 + printk(" [%016lx] %pA\n", pc, (void *) pc);
4212 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4213 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
4214 int index = tsk->curr_ret_stack;
4215 if (tsk->ret_stack && index >= graph) {
4216 pc = tsk->ret_stack[index - graph].ret;
4217 - printk(" [%016lx] %pS\n", pc, (void *) pc);
4218 + printk(" [%016lx] %pA\n", pc, (void *) pc);
4219 graph++;
4220 }
4221 }
4222 @@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
4223 return (struct reg_window *) (fp + STACK_BIAS);
4224 }
4225
4226 +extern void gr_handle_kernel_exploit(void);
4227 +
4228 void die_if_kernel(char *str, struct pt_regs *regs)
4229 {
4230 static int die_counter;
4231 @@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
4232 while (rw &&
4233 count++ < 30 &&
4234 kstack_valid(tp, (unsigned long) rw)) {
4235 - printk("Caller[%016lx]: %pS\n", rw->ins[7],
4236 + printk("Caller[%016lx]: %pA\n", rw->ins[7],
4237 (void *) rw->ins[7]);
4238
4239 rw = kernel_stack_up(rw);
4240 @@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
4241 }
4242 user_instruction_dump ((unsigned int __user *) regs->tpc);
4243 }
4244 - if (regs->tstate & TSTATE_PRIV)
4245 + if (regs->tstate & TSTATE_PRIV) {
4246 + gr_handle_kernel_exploit();
4247 do_exit(SIGKILL);
4248 + }
4249 do_exit(SIGSEGV);
4250 }
4251 EXPORT_SYMBOL(die_if_kernel);
4252 diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
4253 index 76e4ac1..78f8bb1 100644
4254 --- a/arch/sparc/kernel/unaligned_64.c
4255 +++ b/arch/sparc/kernel/unaligned_64.c
4256 @@ -279,7 +279,7 @@ static void log_unaligned(struct pt_regs *regs)
4257 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
4258
4259 if (__ratelimit(&ratelimit)) {
4260 - printk("Kernel unaligned access at TPC[%lx] %pS\n",
4261 + printk("Kernel unaligned access at TPC[%lx] %pA\n",
4262 regs->tpc, (void *) regs->tpc);
4263 }
4264 }
4265 diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
4266 index a3fc437..fea9957 100644
4267 --- a/arch/sparc/lib/Makefile
4268 +++ b/arch/sparc/lib/Makefile
4269 @@ -2,7 +2,7 @@
4270 #
4271
4272 asflags-y := -ansi -DST_DIV0=0x02
4273 -ccflags-y := -Werror
4274 +#ccflags-y := -Werror
4275
4276 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
4277 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
4278 diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
4279 index 59186e0..f747d7a 100644
4280 --- a/arch/sparc/lib/atomic_64.S
4281 +++ b/arch/sparc/lib/atomic_64.S
4282 @@ -18,7 +18,12 @@
4283 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
4284 BACKOFF_SETUP(%o2)
4285 1: lduw [%o1], %g1
4286 - add %g1, %o0, %g7
4287 + addcc %g1, %o0, %g7
4288 +
4289 +#ifdef CONFIG_PAX_REFCOUNT
4290 + tvs %icc, 6
4291 +#endif
4292 +
4293 cas [%o1], %g1, %g7
4294 cmp %g1, %g7
4295 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
4296 @@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
4297 2: BACKOFF_SPIN(%o2, %o3, 1b)
4298 .size atomic_add, .-atomic_add
4299
4300 + .globl atomic_add_unchecked
4301 + .type atomic_add_unchecked,#function
4302 +atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4303 + BACKOFF_SETUP(%o2)
4304 +1: lduw [%o1], %g1
4305 + add %g1, %o0, %g7
4306 + cas [%o1], %g1, %g7
4307 + cmp %g1, %g7
4308 + bne,pn %icc, 2f
4309 + nop
4310 + retl
4311 + nop
4312 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4313 + .size atomic_add_unchecked, .-atomic_add_unchecked
4314 +
4315 .globl atomic_sub
4316 .type atomic_sub,#function
4317 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4318 BACKOFF_SETUP(%o2)
4319 1: lduw [%o1], %g1
4320 - sub %g1, %o0, %g7
4321 + subcc %g1, %o0, %g7
4322 +
4323 +#ifdef CONFIG_PAX_REFCOUNT
4324 + tvs %icc, 6
4325 +#endif
4326 +
4327 cas [%o1], %g1, %g7
4328 cmp %g1, %g7
4329 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
4330 @@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4331 2: BACKOFF_SPIN(%o2, %o3, 1b)
4332 .size atomic_sub, .-atomic_sub
4333
4334 + .globl atomic_sub_unchecked
4335 + .type atomic_sub_unchecked,#function
4336 +atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4337 + BACKOFF_SETUP(%o2)
4338 +1: lduw [%o1], %g1
4339 + sub %g1, %o0, %g7
4340 + cas [%o1], %g1, %g7
4341 + cmp %g1, %g7
4342 + bne,pn %icc, 2f
4343 + nop
4344 + retl
4345 + nop
4346 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4347 + .size atomic_sub_unchecked, .-atomic_sub_unchecked
4348 +
4349 .globl atomic_add_ret
4350 .type atomic_add_ret,#function
4351 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4352 BACKOFF_SETUP(%o2)
4353 1: lduw [%o1], %g1
4354 - add %g1, %o0, %g7
4355 + addcc %g1, %o0, %g7
4356 +
4357 +#ifdef CONFIG_PAX_REFCOUNT
4358 + tvs %icc, 6
4359 +#endif
4360 +
4361 cas [%o1], %g1, %g7
4362 cmp %g1, %g7
4363 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
4364 @@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4365 2: BACKOFF_SPIN(%o2, %o3, 1b)
4366 .size atomic_add_ret, .-atomic_add_ret
4367
4368 + .globl atomic_add_ret_unchecked
4369 + .type atomic_add_ret_unchecked,#function
4370 +atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4371 + BACKOFF_SETUP(%o2)
4372 +1: lduw [%o1], %g1
4373 + addcc %g1, %o0, %g7
4374 + cas [%o1], %g1, %g7
4375 + cmp %g1, %g7
4376 + bne,pn %icc, 2f
4377 + add %g7, %o0, %g7
4378 + sra %g7, 0, %o0
4379 + retl
4380 + nop
4381 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4382 + .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
4383 +
4384 .globl atomic_sub_ret
4385 .type atomic_sub_ret,#function
4386 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4387 BACKOFF_SETUP(%o2)
4388 1: lduw [%o1], %g1
4389 - sub %g1, %o0, %g7
4390 + subcc %g1, %o0, %g7
4391 +
4392 +#ifdef CONFIG_PAX_REFCOUNT
4393 + tvs %icc, 6
4394 +#endif
4395 +
4396 cas [%o1], %g1, %g7
4397 cmp %g1, %g7
4398 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
4399 @@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4400 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
4401 BACKOFF_SETUP(%o2)
4402 1: ldx [%o1], %g1
4403 - add %g1, %o0, %g7
4404 + addcc %g1, %o0, %g7
4405 +
4406 +#ifdef CONFIG_PAX_REFCOUNT
4407 + tvs %xcc, 6
4408 +#endif
4409 +
4410 casx [%o1], %g1, %g7
4411 cmp %g1, %g7
4412 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4413 @@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
4414 2: BACKOFF_SPIN(%o2, %o3, 1b)
4415 .size atomic64_add, .-atomic64_add
4416
4417 + .globl atomic64_add_unchecked
4418 + .type atomic64_add_unchecked,#function
4419 +atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4420 + BACKOFF_SETUP(%o2)
4421 +1: ldx [%o1], %g1
4422 + addcc %g1, %o0, %g7
4423 + casx [%o1], %g1, %g7
4424 + cmp %g1, %g7
4425 + bne,pn %xcc, 2f
4426 + nop
4427 + retl
4428 + nop
4429 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4430 + .size atomic64_add_unchecked, .-atomic64_add_unchecked
4431 +
4432 .globl atomic64_sub
4433 .type atomic64_sub,#function
4434 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4435 BACKOFF_SETUP(%o2)
4436 1: ldx [%o1], %g1
4437 - sub %g1, %o0, %g7
4438 + subcc %g1, %o0, %g7
4439 +
4440 +#ifdef CONFIG_PAX_REFCOUNT
4441 + tvs %xcc, 6
4442 +#endif
4443 +
4444 casx [%o1], %g1, %g7
4445 cmp %g1, %g7
4446 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4447 @@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4448 2: BACKOFF_SPIN(%o2, %o3, 1b)
4449 .size atomic64_sub, .-atomic64_sub
4450
4451 + .globl atomic64_sub_unchecked
4452 + .type atomic64_sub_unchecked,#function
4453 +atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4454 + BACKOFF_SETUP(%o2)
4455 +1: ldx [%o1], %g1
4456 + subcc %g1, %o0, %g7
4457 + casx [%o1], %g1, %g7
4458 + cmp %g1, %g7
4459 + bne,pn %xcc, 2f
4460 + nop
4461 + retl
4462 + nop
4463 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4464 + .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
4465 +
4466 .globl atomic64_add_ret
4467 .type atomic64_add_ret,#function
4468 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4469 BACKOFF_SETUP(%o2)
4470 1: ldx [%o1], %g1
4471 - add %g1, %o0, %g7
4472 + addcc %g1, %o0, %g7
4473 +
4474 +#ifdef CONFIG_PAX_REFCOUNT
4475 + tvs %xcc, 6
4476 +#endif
4477 +
4478 casx [%o1], %g1, %g7
4479 cmp %g1, %g7
4480 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4481 @@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4482 2: BACKOFF_SPIN(%o2, %o3, 1b)
4483 .size atomic64_add_ret, .-atomic64_add_ret
4484
4485 + .globl atomic64_add_ret_unchecked
4486 + .type atomic64_add_ret_unchecked,#function
4487 +atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4488 + BACKOFF_SETUP(%o2)
4489 +1: ldx [%o1], %g1
4490 + addcc %g1, %o0, %g7
4491 + casx [%o1], %g1, %g7
4492 + cmp %g1, %g7
4493 + bne,pn %xcc, 2f
4494 + add %g7, %o0, %g7
4495 + mov %g7, %o0
4496 + retl
4497 + nop
4498 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4499 + .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
4500 +
4501 .globl atomic64_sub_ret
4502 .type atomic64_sub_ret,#function
4503 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4504 BACKOFF_SETUP(%o2)
4505 1: ldx [%o1], %g1
4506 - sub %g1, %o0, %g7
4507 + subcc %g1, %o0, %g7
4508 +
4509 +#ifdef CONFIG_PAX_REFCOUNT
4510 + tvs %xcc, 6
4511 +#endif
4512 +
4513 casx [%o1], %g1, %g7
4514 cmp %g1, %g7
4515 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4516 diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
4517 index 1b30bb3..b4a16c7 100644
4518 --- a/arch/sparc/lib/ksyms.c
4519 +++ b/arch/sparc/lib/ksyms.c
4520 @@ -142,12 +142,18 @@ EXPORT_SYMBOL(__downgrade_write);
4521
4522 /* Atomic counter implementation. */
4523 EXPORT_SYMBOL(atomic_add);
4524 +EXPORT_SYMBOL(atomic_add_unchecked);
4525 EXPORT_SYMBOL(atomic_add_ret);
4526 +EXPORT_SYMBOL(atomic_add_ret_unchecked);
4527 EXPORT_SYMBOL(atomic_sub);
4528 +EXPORT_SYMBOL(atomic_sub_unchecked);
4529 EXPORT_SYMBOL(atomic_sub_ret);
4530 EXPORT_SYMBOL(atomic64_add);
4531 +EXPORT_SYMBOL(atomic64_add_unchecked);
4532 EXPORT_SYMBOL(atomic64_add_ret);
4533 +EXPORT_SYMBOL(atomic64_add_ret_unchecked);
4534 EXPORT_SYMBOL(atomic64_sub);
4535 +EXPORT_SYMBOL(atomic64_sub_unchecked);
4536 EXPORT_SYMBOL(atomic64_sub_ret);
4537
4538 /* Atomic bit operations. */
4539 diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
4540 index 301421c..e2535d1 100644
4541 --- a/arch/sparc/mm/Makefile
4542 +++ b/arch/sparc/mm/Makefile
4543 @@ -2,7 +2,7 @@
4544 #
4545
4546 asflags-y := -ansi
4547 -ccflags-y := -Werror
4548 +#ccflags-y := -Werror
4549
4550 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
4551 obj-y += fault_$(BITS).o
4552 diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
4553 index 8023fd7..c8e89e9 100644
4554 --- a/arch/sparc/mm/fault_32.c
4555 +++ b/arch/sparc/mm/fault_32.c
4556 @@ -21,6 +21,9 @@
4557 #include <linux/perf_event.h>
4558 #include <linux/interrupt.h>
4559 #include <linux/kdebug.h>
4560 +#include <linux/slab.h>
4561 +#include <linux/pagemap.h>
4562 +#include <linux/compiler.h>
4563
4564 #include <asm/system.h>
4565 #include <asm/page.h>
4566 @@ -208,6 +211,268 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
4567 return safe_compute_effective_address(regs, insn);
4568 }
4569
4570 +#ifdef CONFIG_PAX_PAGEEXEC
4571 +#ifdef CONFIG_PAX_DLRESOLVE
4572 +static void pax_emuplt_close(struct vm_area_struct *vma)
4573 +{
4574 + vma->vm_mm->call_dl_resolve = 0UL;
4575 +}
4576 +
4577 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4578 +{
4579 + unsigned int *kaddr;
4580 +
4581 + vmf->page = alloc_page(GFP_HIGHUSER);
4582 + if (!vmf->page)
4583 + return VM_FAULT_OOM;
4584 +
4585 + kaddr = kmap(vmf->page);
4586 + memset(kaddr, 0, PAGE_SIZE);
4587 + kaddr[0] = 0x9DE3BFA8U; /* save */
4588 + flush_dcache_page(vmf->page);
4589 + kunmap(vmf->page);
4590 + return VM_FAULT_MAJOR;
4591 +}
4592 +
4593 +static const struct vm_operations_struct pax_vm_ops = {
4594 + .close = pax_emuplt_close,
4595 + .fault = pax_emuplt_fault
4596 +};
4597 +
4598 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4599 +{
4600 + int ret;
4601 +
4602 + INIT_LIST_HEAD(&vma->anon_vma_chain);
4603 + vma->vm_mm = current->mm;
4604 + vma->vm_start = addr;
4605 + vma->vm_end = addr + PAGE_SIZE;
4606 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4607 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4608 + vma->vm_ops = &pax_vm_ops;
4609 +
4610 + ret = insert_vm_struct(current->mm, vma);
4611 + if (ret)
4612 + return ret;
4613 +
4614 + ++current->mm->total_vm;
4615 + return 0;
4616 +}
4617 +#endif
4618 +
4619 +/*
4620 + * PaX: decide what to do with offenders (regs->pc = fault address)
4621 + *
4622 + * returns 1 when task should be killed
4623 + * 2 when patched PLT trampoline was detected
4624 + * 3 when unpatched PLT trampoline was detected
4625 + */
4626 +static int pax_handle_fetch_fault(struct pt_regs *regs)
4627 +{
4628 +
4629 +#ifdef CONFIG_PAX_EMUPLT
4630 + int err;
4631 +
4632 + do { /* PaX: patched PLT emulation #1 */
4633 + unsigned int sethi1, sethi2, jmpl;
4634 +
4635 + err = get_user(sethi1, (unsigned int *)regs->pc);
4636 + err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
4637 + err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
4638 +
4639 + if (err)
4640 + break;
4641 +
4642 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4643 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
4644 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
4645 + {
4646 + unsigned int addr;
4647 +
4648 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4649 + addr = regs->u_regs[UREG_G1];
4650 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4651 + regs->pc = addr;
4652 + regs->npc = addr+4;
4653 + return 2;
4654 + }
4655 + } while (0);
4656 +
4657 + { /* PaX: patched PLT emulation #2 */
4658 + unsigned int ba;
4659 +
4660 + err = get_user(ba, (unsigned int *)regs->pc);
4661 +
4662 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4663 + unsigned int addr;
4664 +
4665 + addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4666 + regs->pc = addr;
4667 + regs->npc = addr+4;
4668 + return 2;
4669 + }
4670 + }
4671 +
4672 + do { /* PaX: patched PLT emulation #3 */
4673 + unsigned int sethi, jmpl, nop;
4674 +
4675 + err = get_user(sethi, (unsigned int *)regs->pc);
4676 + err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
4677 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
4678 +
4679 + if (err)
4680 + break;
4681 +
4682 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4683 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4684 + nop == 0x01000000U)
4685 + {
4686 + unsigned int addr;
4687 +
4688 + addr = (sethi & 0x003FFFFFU) << 10;
4689 + regs->u_regs[UREG_G1] = addr;
4690 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4691 + regs->pc = addr;
4692 + regs->npc = addr+4;
4693 + return 2;
4694 + }
4695 + } while (0);
4696 +
4697 + do { /* PaX: unpatched PLT emulation step 1 */
4698 + unsigned int sethi, ba, nop;
4699 +
4700 + err = get_user(sethi, (unsigned int *)regs->pc);
4701 + err |= get_user(ba, (unsigned int *)(regs->pc+4));
4702 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
4703 +
4704 + if (err)
4705 + break;
4706 +
4707 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4708 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4709 + nop == 0x01000000U)
4710 + {
4711 + unsigned int addr, save, call;
4712 +
4713 + if ((ba & 0xFFC00000U) == 0x30800000U)
4714 + addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4715 + else
4716 + addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
4717 +
4718 + err = get_user(save, (unsigned int *)addr);
4719 + err |= get_user(call, (unsigned int *)(addr+4));
4720 + err |= get_user(nop, (unsigned int *)(addr+8));
4721 + if (err)
4722 + break;
4723 +
4724 +#ifdef CONFIG_PAX_DLRESOLVE
4725 + if (save == 0x9DE3BFA8U &&
4726 + (call & 0xC0000000U) == 0x40000000U &&
4727 + nop == 0x01000000U)
4728 + {
4729 + struct vm_area_struct *vma;
4730 + unsigned long call_dl_resolve;
4731 +
4732 + down_read(&current->mm->mmap_sem);
4733 + call_dl_resolve = current->mm->call_dl_resolve;
4734 + up_read(&current->mm->mmap_sem);
4735 + if (likely(call_dl_resolve))
4736 + goto emulate;
4737 +
4738 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4739 +
4740 + down_write(&current->mm->mmap_sem);
4741 + if (current->mm->call_dl_resolve) {
4742 + call_dl_resolve = current->mm->call_dl_resolve;
4743 + up_write(&current->mm->mmap_sem);
4744 + if (vma)
4745 + kmem_cache_free(vm_area_cachep, vma);
4746 + goto emulate;
4747 + }
4748 +
4749 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4750 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4751 + up_write(&current->mm->mmap_sem);
4752 + if (vma)
4753 + kmem_cache_free(vm_area_cachep, vma);
4754 + return 1;
4755 + }
4756 +
4757 + if (pax_insert_vma(vma, call_dl_resolve)) {
4758 + up_write(&current->mm->mmap_sem);
4759 + kmem_cache_free(vm_area_cachep, vma);
4760 + return 1;
4761 + }
4762 +
4763 + current->mm->call_dl_resolve = call_dl_resolve;
4764 + up_write(&current->mm->mmap_sem);
4765 +
4766 +emulate:
4767 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4768 + regs->pc = call_dl_resolve;
4769 + regs->npc = addr+4;
4770 + return 3;
4771 + }
4772 +#endif
4773 +
4774 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4775 + if ((save & 0xFFC00000U) == 0x05000000U &&
4776 + (call & 0xFFFFE000U) == 0x85C0A000U &&
4777 + nop == 0x01000000U)
4778 + {
4779 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4780 + regs->u_regs[UREG_G2] = addr + 4;
4781 + addr = (save & 0x003FFFFFU) << 10;
4782 + addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4783 + regs->pc = addr;
4784 + regs->npc = addr+4;
4785 + return 3;
4786 + }
4787 + }
4788 + } while (0);
4789 +
4790 + do { /* PaX: unpatched PLT emulation step 2 */
4791 + unsigned int save, call, nop;
4792 +
4793 + err = get_user(save, (unsigned int *)(regs->pc-4));
4794 + err |= get_user(call, (unsigned int *)regs->pc);
4795 + err |= get_user(nop, (unsigned int *)(regs->pc+4));
4796 + if (err)
4797 + break;
4798 +
4799 + if (save == 0x9DE3BFA8U &&
4800 + (call & 0xC0000000U) == 0x40000000U &&
4801 + nop == 0x01000000U)
4802 + {
4803 + unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
4804 +
4805 + regs->u_regs[UREG_RETPC] = regs->pc;
4806 + regs->pc = dl_resolve;
4807 + regs->npc = dl_resolve+4;
4808 + return 3;
4809 + }
4810 + } while (0);
4811 +#endif
4812 +
4813 + return 1;
4814 +}
4815 +
4816 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4817 +{
4818 + unsigned long i;
4819 +
4820 + printk(KERN_ERR "PAX: bytes at PC: ");
4821 + for (i = 0; i < 8; i++) {
4822 + unsigned int c;
4823 + if (get_user(c, (unsigned int *)pc+i))
4824 + printk(KERN_CONT "???????? ");
4825 + else
4826 + printk(KERN_CONT "%08x ", c);
4827 + }
4828 + printk("\n");
4829 +}
4830 +#endif
4831 +
4832 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
4833 int text_fault)
4834 {
4835 @@ -280,6 +545,24 @@ good_area:
4836 if(!(vma->vm_flags & VM_WRITE))
4837 goto bad_area;
4838 } else {
4839 +
4840 +#ifdef CONFIG_PAX_PAGEEXEC
4841 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
4842 + up_read(&mm->mmap_sem);
4843 + switch (pax_handle_fetch_fault(regs)) {
4844 +
4845 +#ifdef CONFIG_PAX_EMUPLT
4846 + case 2:
4847 + case 3:
4848 + return;
4849 +#endif
4850 +
4851 + }
4852 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
4853 + do_group_exit(SIGKILL);
4854 + }
4855 +#endif
4856 +
4857 /* Allow reads even for write-only mappings */
4858 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
4859 goto bad_area;
4860 diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
4861 index 504c062..6fcb9c6 100644
4862 --- a/arch/sparc/mm/fault_64.c
4863 +++ b/arch/sparc/mm/fault_64.c
4864 @@ -21,6 +21,9 @@
4865 #include <linux/kprobes.h>
4866 #include <linux/kdebug.h>
4867 #include <linux/percpu.h>
4868 +#include <linux/slab.h>
4869 +#include <linux/pagemap.h>
4870 +#include <linux/compiler.h>
4871
4872 #include <asm/page.h>
4873 #include <asm/pgtable.h>
4874 @@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
4875 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
4876 regs->tpc);
4877 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
4878 - printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
4879 + printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
4880 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
4881 dump_stack();
4882 unhandled_fault(regs->tpc, current, regs);
4883 @@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
4884 show_regs(regs);
4885 }
4886
4887 +#ifdef CONFIG_PAX_PAGEEXEC
4888 +#ifdef CONFIG_PAX_DLRESOLVE
4889 +static void pax_emuplt_close(struct vm_area_struct *vma)
4890 +{
4891 + vma->vm_mm->call_dl_resolve = 0UL;
4892 +}
4893 +
4894 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4895 +{
4896 + unsigned int *kaddr;
4897 +
4898 + vmf->page = alloc_page(GFP_HIGHUSER);
4899 + if (!vmf->page)
4900 + return VM_FAULT_OOM;
4901 +
4902 + kaddr = kmap(vmf->page);
4903 + memset(kaddr, 0, PAGE_SIZE);
4904 + kaddr[0] = 0x9DE3BFA8U; /* save */
4905 + flush_dcache_page(vmf->page);
4906 + kunmap(vmf->page);
4907 + return VM_FAULT_MAJOR;
4908 +}
4909 +
4910 +static const struct vm_operations_struct pax_vm_ops = {
4911 + .close = pax_emuplt_close,
4912 + .fault = pax_emuplt_fault
4913 +};
4914 +
4915 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4916 +{
4917 + int ret;
4918 +
4919 + INIT_LIST_HEAD(&vma->anon_vma_chain);
4920 + vma->vm_mm = current->mm;
4921 + vma->vm_start = addr;
4922 + vma->vm_end = addr + PAGE_SIZE;
4923 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4924 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4925 + vma->vm_ops = &pax_vm_ops;
4926 +
4927 + ret = insert_vm_struct(current->mm, vma);
4928 + if (ret)
4929 + return ret;
4930 +
4931 + ++current->mm->total_vm;
4932 + return 0;
4933 +}
4934 +#endif
4935 +
4936 +/*
4937 + * PaX: decide what to do with offenders (regs->tpc = fault address)
4938 + *
4939 + * returns 1 when task should be killed
4940 + * 2 when patched PLT trampoline was detected
4941 + * 3 when unpatched PLT trampoline was detected
4942 + */
4943 +static int pax_handle_fetch_fault(struct pt_regs *regs)
4944 +{
4945 +
4946 +#ifdef CONFIG_PAX_EMUPLT
4947 + int err;
4948 +
4949 + do { /* PaX: patched PLT emulation #1 */
4950 + unsigned int sethi1, sethi2, jmpl;
4951 +
4952 + err = get_user(sethi1, (unsigned int *)regs->tpc);
4953 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
4954 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
4955 +
4956 + if (err)
4957 + break;
4958 +
4959 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4960 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
4961 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
4962 + {
4963 + unsigned long addr;
4964 +
4965 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4966 + addr = regs->u_regs[UREG_G1];
4967 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4968 +
4969 + if (test_thread_flag(TIF_32BIT))
4970 + addr &= 0xFFFFFFFFUL;
4971 +
4972 + regs->tpc = addr;
4973 + regs->tnpc = addr+4;
4974 + return 2;
4975 + }
4976 + } while (0);
4977 +
4978 + { /* PaX: patched PLT emulation #2 */
4979 + unsigned int ba;
4980 +
4981 + err = get_user(ba, (unsigned int *)regs->tpc);
4982 +
4983 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4984 + unsigned long addr;
4985 +
4986 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4987 +
4988 + if (test_thread_flag(TIF_32BIT))
4989 + addr &= 0xFFFFFFFFUL;
4990 +
4991 + regs->tpc = addr;
4992 + regs->tnpc = addr+4;
4993 + return 2;
4994 + }
4995 + }
4996 +
4997 + do { /* PaX: patched PLT emulation #3 */
4998 + unsigned int sethi, jmpl, nop;
4999 +
5000 + err = get_user(sethi, (unsigned int *)regs->tpc);
5001 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
5002 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5003 +
5004 + if (err)
5005 + break;
5006 +
5007 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5008 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
5009 + nop == 0x01000000U)
5010 + {
5011 + unsigned long addr;
5012 +
5013 + addr = (sethi & 0x003FFFFFU) << 10;
5014 + regs->u_regs[UREG_G1] = addr;
5015 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5016 +
5017 + if (test_thread_flag(TIF_32BIT))
5018 + addr &= 0xFFFFFFFFUL;
5019 +
5020 + regs->tpc = addr;
5021 + regs->tnpc = addr+4;
5022 + return 2;
5023 + }
5024 + } while (0);
5025 +
5026 + do { /* PaX: patched PLT emulation #4 */
5027 + unsigned int sethi, mov1, call, mov2;
5028 +
5029 + err = get_user(sethi, (unsigned int *)regs->tpc);
5030 + err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
5031 + err |= get_user(call, (unsigned int *)(regs->tpc+8));
5032 + err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
5033 +
5034 + if (err)
5035 + break;
5036 +
5037 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5038 + mov1 == 0x8210000FU &&
5039 + (call & 0xC0000000U) == 0x40000000U &&
5040 + mov2 == 0x9E100001U)
5041 + {
5042 + unsigned long addr;
5043 +
5044 + regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
5045 + addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5046 +
5047 + if (test_thread_flag(TIF_32BIT))
5048 + addr &= 0xFFFFFFFFUL;
5049 +
5050 + regs->tpc = addr;
5051 + regs->tnpc = addr+4;
5052 + return 2;
5053 + }
5054 + } while (0);
5055 +
5056 + do { /* PaX: patched PLT emulation #5 */
5057 + unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
5058 +
5059 + err = get_user(sethi, (unsigned int *)regs->tpc);
5060 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5061 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5062 + err |= get_user(or1, (unsigned int *)(regs->tpc+12));
5063 + err |= get_user(or2, (unsigned int *)(regs->tpc+16));
5064 + err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
5065 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
5066 + err |= get_user(nop, (unsigned int *)(regs->tpc+28));
5067 +
5068 + if (err)
5069 + break;
5070 +
5071 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5072 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
5073 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5074 + (or1 & 0xFFFFE000U) == 0x82106000U &&
5075 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
5076 + sllx == 0x83287020U &&
5077 + jmpl == 0x81C04005U &&
5078 + nop == 0x01000000U)
5079 + {
5080 + unsigned long addr;
5081 +
5082 + regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5083 + regs->u_regs[UREG_G1] <<= 32;
5084 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5085 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5086 + regs->tpc = addr;
5087 + regs->tnpc = addr+4;
5088 + return 2;
5089 + }
5090 + } while (0);
5091 +
5092 + do { /* PaX: patched PLT emulation #6 */
5093 + unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
5094 +
5095 + err = get_user(sethi, (unsigned int *)regs->tpc);
5096 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5097 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5098 + err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
5099 + err |= get_user(or, (unsigned int *)(regs->tpc+16));
5100 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
5101 + err |= get_user(nop, (unsigned int *)(regs->tpc+24));
5102 +
5103 + if (err)
5104 + break;
5105 +
5106 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5107 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
5108 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5109 + sllx == 0x83287020U &&
5110 + (or & 0xFFFFE000U) == 0x8A116000U &&
5111 + jmpl == 0x81C04005U &&
5112 + nop == 0x01000000U)
5113 + {
5114 + unsigned long addr;
5115 +
5116 + regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
5117 + regs->u_regs[UREG_G1] <<= 32;
5118 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
5119 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5120 + regs->tpc = addr;
5121 + regs->tnpc = addr+4;
5122 + return 2;
5123 + }
5124 + } while (0);
5125 +
5126 + do { /* PaX: unpatched PLT emulation step 1 */
5127 + unsigned int sethi, ba, nop;
5128 +
5129 + err = get_user(sethi, (unsigned int *)regs->tpc);
5130 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5131 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5132 +
5133 + if (err)
5134 + break;
5135 +
5136 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5137 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
5138 + nop == 0x01000000U)
5139 + {
5140 + unsigned long addr;
5141 + unsigned int save, call;
5142 + unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
5143 +
5144 + if ((ba & 0xFFC00000U) == 0x30800000U)
5145 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5146 + else
5147 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5148 +
5149 + if (test_thread_flag(TIF_32BIT))
5150 + addr &= 0xFFFFFFFFUL;
5151 +
5152 + err = get_user(save, (unsigned int *)addr);
5153 + err |= get_user(call, (unsigned int *)(addr+4));
5154 + err |= get_user(nop, (unsigned int *)(addr+8));
5155 + if (err)
5156 + break;
5157 +
5158 +#ifdef CONFIG_PAX_DLRESOLVE
5159 + if (save == 0x9DE3BFA8U &&
5160 + (call & 0xC0000000U) == 0x40000000U &&
5161 + nop == 0x01000000U)
5162 + {
5163 + struct vm_area_struct *vma;
5164 + unsigned long call_dl_resolve;
5165 +
5166 + down_read(&current->mm->mmap_sem);
5167 + call_dl_resolve = current->mm->call_dl_resolve;
5168 + up_read(&current->mm->mmap_sem);
5169 + if (likely(call_dl_resolve))
5170 + goto emulate;
5171 +
5172 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5173 +
5174 + down_write(&current->mm->mmap_sem);
5175 + if (current->mm->call_dl_resolve) {
5176 + call_dl_resolve = current->mm->call_dl_resolve;
5177 + up_write(&current->mm->mmap_sem);
5178 + if (vma)
5179 + kmem_cache_free(vm_area_cachep, vma);
5180 + goto emulate;
5181 + }
5182 +
5183 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5184 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5185 + up_write(&current->mm->mmap_sem);
5186 + if (vma)
5187 + kmem_cache_free(vm_area_cachep, vma);
5188 + return 1;
5189 + }
5190 +
5191 + if (pax_insert_vma(vma, call_dl_resolve)) {
5192 + up_write(&current->mm->mmap_sem);
5193 + kmem_cache_free(vm_area_cachep, vma);
5194 + return 1;
5195 + }
5196 +
5197 + current->mm->call_dl_resolve = call_dl_resolve;
5198 + up_write(&current->mm->mmap_sem);
5199 +
5200 +emulate:
5201 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5202 + regs->tpc = call_dl_resolve;
5203 + regs->tnpc = addr+4;
5204 + return 3;
5205 + }
5206 +#endif
5207 +
5208 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
5209 + if ((save & 0xFFC00000U) == 0x05000000U &&
5210 + (call & 0xFFFFE000U) == 0x85C0A000U &&
5211 + nop == 0x01000000U)
5212 + {
5213 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5214 + regs->u_regs[UREG_G2] = addr + 4;
5215 + addr = (save & 0x003FFFFFU) << 10;
5216 + addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5217 +
5218 + if (test_thread_flag(TIF_32BIT))
5219 + addr &= 0xFFFFFFFFUL;
5220 +
5221 + regs->tpc = addr;
5222 + regs->tnpc = addr+4;
5223 + return 3;
5224 + }
5225 +
5226 + /* PaX: 64-bit PLT stub */
5227 + err = get_user(sethi1, (unsigned int *)addr);
5228 + err |= get_user(sethi2, (unsigned int *)(addr+4));
5229 + err |= get_user(or1, (unsigned int *)(addr+8));
5230 + err |= get_user(or2, (unsigned int *)(addr+12));
5231 + err |= get_user(sllx, (unsigned int *)(addr+16));
5232 + err |= get_user(add, (unsigned int *)(addr+20));
5233 + err |= get_user(jmpl, (unsigned int *)(addr+24));
5234 + err |= get_user(nop, (unsigned int *)(addr+28));
5235 + if (err)
5236 + break;
5237 +
5238 + if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
5239 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5240 + (or1 & 0xFFFFE000U) == 0x88112000U &&
5241 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
5242 + sllx == 0x89293020U &&
5243 + add == 0x8A010005U &&
5244 + jmpl == 0x89C14000U &&
5245 + nop == 0x01000000U)
5246 + {
5247 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5248 + regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5249 + regs->u_regs[UREG_G4] <<= 32;
5250 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5251 + regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
5252 + regs->u_regs[UREG_G4] = addr + 24;
5253 + addr = regs->u_regs[UREG_G5];
5254 + regs->tpc = addr;
5255 + regs->tnpc = addr+4;
5256 + return 3;
5257 + }
5258 + }
5259 + } while (0);
5260 +
5261 +#ifdef CONFIG_PAX_DLRESOLVE
5262 + do { /* PaX: unpatched PLT emulation step 2 */
5263 + unsigned int save, call, nop;
5264 +
5265 + err = get_user(save, (unsigned int *)(regs->tpc-4));
5266 + err |= get_user(call, (unsigned int *)regs->tpc);
5267 + err |= get_user(nop, (unsigned int *)(regs->tpc+4));
5268 + if (err)
5269 + break;
5270 +
5271 + if (save == 0x9DE3BFA8U &&
5272 + (call & 0xC0000000U) == 0x40000000U &&
5273 + nop == 0x01000000U)
5274 + {
5275 + unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5276 +
5277 + if (test_thread_flag(TIF_32BIT))
5278 + dl_resolve &= 0xFFFFFFFFUL;
5279 +
5280 + regs->u_regs[UREG_RETPC] = regs->tpc;
5281 + regs->tpc = dl_resolve;
5282 + regs->tnpc = dl_resolve+4;
5283 + return 3;
5284 + }
5285 + } while (0);
5286 +#endif
5287 +
5288 + do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
5289 + unsigned int sethi, ba, nop;
5290 +
5291 + err = get_user(sethi, (unsigned int *)regs->tpc);
5292 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5293 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5294 +
5295 + if (err)
5296 + break;
5297 +
5298 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5299 + (ba & 0xFFF00000U) == 0x30600000U &&
5300 + nop == 0x01000000U)
5301 + {
5302 + unsigned long addr;
5303 +
5304 + addr = (sethi & 0x003FFFFFU) << 10;
5305 + regs->u_regs[UREG_G1] = addr;
5306 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5307 +
5308 + if (test_thread_flag(TIF_32BIT))
5309 + addr &= 0xFFFFFFFFUL;
5310 +
5311 + regs->tpc = addr;
5312 + regs->tnpc = addr+4;
5313 + return 2;
5314 + }
5315 + } while (0);
5316 +
5317 +#endif
5318 +
5319 + return 1;
5320 +}
5321 +
5322 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5323 +{
5324 + unsigned long i;
5325 +
5326 + printk(KERN_ERR "PAX: bytes at PC: ");
5327 + for (i = 0; i < 8; i++) {
5328 + unsigned int c;
5329 + if (get_user(c, (unsigned int *)pc+i))
5330 + printk(KERN_CONT "???????? ");
5331 + else
5332 + printk(KERN_CONT "%08x ", c);
5333 + }
5334 + printk("\n");
5335 +}
5336 +#endif
5337 +
5338 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
5339 {
5340 struct mm_struct *mm = current->mm;
5341 @@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
5342 if (!vma)
5343 goto bad_area;
5344
5345 +#ifdef CONFIG_PAX_PAGEEXEC
5346 + /* PaX: detect ITLB misses on non-exec pages */
5347 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
5348 + !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
5349 + {
5350 + if (address != regs->tpc)
5351 + goto good_area;
5352 +
5353 + up_read(&mm->mmap_sem);
5354 + switch (pax_handle_fetch_fault(regs)) {
5355 +
5356 +#ifdef CONFIG_PAX_EMUPLT
5357 + case 2:
5358 + case 3:
5359 + return;
5360 +#endif
5361 +
5362 + }
5363 + pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
5364 + do_group_exit(SIGKILL);
5365 + }
5366 +#endif
5367 +
5368 /* Pure DTLB misses do not tell us whether the fault causing
5369 * load/store/atomic was a write or not, it only says that there
5370 * was no match. So in such a case we (carefully) read the
5371 diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
5372 index 07e1453..0a7d9e9 100644
5373 --- a/arch/sparc/mm/hugetlbpage.c
5374 +++ b/arch/sparc/mm/hugetlbpage.c
5375 @@ -67,7 +67,7 @@ full_search:
5376 }
5377 return -ENOMEM;
5378 }
5379 - if (likely(!vma || addr + len <= vma->vm_start)) {
5380 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5381 /*
5382 * Remember the place where we stopped the search:
5383 */
5384 @@ -106,7 +106,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5385 /* make sure it can fit in the remaining address space */
5386 if (likely(addr > len)) {
5387 vma = find_vma(mm, addr-len);
5388 - if (!vma || addr <= vma->vm_start) {
5389 + if (check_heap_stack_gap(vma, addr - len, len)) {
5390 /* remember the address as a hint for next time */
5391 return (mm->free_area_cache = addr-len);
5392 }
5393 @@ -115,16 +115,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5394 if (unlikely(mm->mmap_base < len))
5395 goto bottomup;
5396
5397 - addr = (mm->mmap_base-len) & HPAGE_MASK;
5398 + addr = mm->mmap_base - len;
5399
5400 do {
5401 + addr &= HPAGE_MASK;
5402 /*
5403 * Lookup failure means no vma is above this address,
5404 * else if new region fits below vma->vm_start,
5405 * return with success:
5406 */
5407 vma = find_vma(mm, addr);
5408 - if (likely(!vma || addr+len <= vma->vm_start)) {
5409 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5410 /* remember the address as a hint for next time */
5411 return (mm->free_area_cache = addr);
5412 }
5413 @@ -134,8 +135,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5414 mm->cached_hole_size = vma->vm_start - addr;
5415
5416 /* try just below the current vma->vm_start */
5417 - addr = (vma->vm_start-len) & HPAGE_MASK;
5418 - } while (likely(len < vma->vm_start));
5419 + addr = skip_heap_stack_gap(vma, len);
5420 + } while (!IS_ERR_VALUE(addr));
5421
5422 bottomup:
5423 /*
5424 @@ -181,8 +182,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
5425 if (addr) {
5426 addr = ALIGN(addr, HPAGE_SIZE);
5427 vma = find_vma(mm, addr);
5428 - if (task_size - len >= addr &&
5429 - (!vma || addr + len <= vma->vm_start))
5430 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5431 return addr;
5432 }
5433 if (mm->get_unmapped_area == arch_get_unmapped_area)
5434 diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
5435 index 7b00de6..78239f4 100644
5436 --- a/arch/sparc/mm/init_32.c
5437 +++ b/arch/sparc/mm/init_32.c
5438 @@ -316,6 +316,9 @@ extern void device_scan(void);
5439 pgprot_t PAGE_SHARED __read_mostly;
5440 EXPORT_SYMBOL(PAGE_SHARED);
5441
5442 +pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
5443 +EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
5444 +
5445 void __init paging_init(void)
5446 {
5447 switch(sparc_cpu_model) {
5448 @@ -344,17 +347,17 @@ void __init paging_init(void)
5449
5450 /* Initialize the protection map with non-constant, MMU dependent values. */
5451 protection_map[0] = PAGE_NONE;
5452 - protection_map[1] = PAGE_READONLY;
5453 - protection_map[2] = PAGE_COPY;
5454 - protection_map[3] = PAGE_COPY;
5455 + protection_map[1] = PAGE_READONLY_NOEXEC;
5456 + protection_map[2] = PAGE_COPY_NOEXEC;
5457 + protection_map[3] = PAGE_COPY_NOEXEC;
5458 protection_map[4] = PAGE_READONLY;
5459 protection_map[5] = PAGE_READONLY;
5460 protection_map[6] = PAGE_COPY;
5461 protection_map[7] = PAGE_COPY;
5462 protection_map[8] = PAGE_NONE;
5463 - protection_map[9] = PAGE_READONLY;
5464 - protection_map[10] = PAGE_SHARED;
5465 - protection_map[11] = PAGE_SHARED;
5466 + protection_map[9] = PAGE_READONLY_NOEXEC;
5467 + protection_map[10] = PAGE_SHARED_NOEXEC;
5468 + protection_map[11] = PAGE_SHARED_NOEXEC;
5469 protection_map[12] = PAGE_READONLY;
5470 protection_map[13] = PAGE_READONLY;
5471 protection_map[14] = PAGE_SHARED;
5472 diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
5473 index cbef74e..c38fead 100644
5474 --- a/arch/sparc/mm/srmmu.c
5475 +++ b/arch/sparc/mm/srmmu.c
5476 @@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
5477 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
5478 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
5479 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
5480 +
5481 +#ifdef CONFIG_PAX_PAGEEXEC
5482 + PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
5483 + BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
5484 + BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
5485 +#endif
5486 +
5487 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
5488 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
5489
5490 diff --git a/arch/um/Makefile b/arch/um/Makefile
5491 index 7730af6..cce5b19 100644
5492 --- a/arch/um/Makefile
5493 +++ b/arch/um/Makefile
5494 @@ -61,6 +61,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
5495 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
5496 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
5497
5498 +ifdef CONSTIFY_PLUGIN
5499 +USER_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5500 +endif
5501 +
5502 #This will adjust *FLAGS accordingly to the platform.
5503 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
5504
5505 diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
5506 index 6c03acd..a5e0215 100644
5507 --- a/arch/um/include/asm/kmap_types.h
5508 +++ b/arch/um/include/asm/kmap_types.h
5509 @@ -23,6 +23,7 @@ enum km_type {
5510 KM_IRQ1,
5511 KM_SOFTIRQ0,
5512 KM_SOFTIRQ1,
5513 + KM_CLEARPAGE,
5514 KM_TYPE_NR
5515 };
5516
5517 diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
5518 index 7cfc3ce..cbd1a58 100644
5519 --- a/arch/um/include/asm/page.h
5520 +++ b/arch/um/include/asm/page.h
5521 @@ -14,6 +14,9 @@
5522 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
5523 #define PAGE_MASK (~(PAGE_SIZE-1))
5524
5525 +#define ktla_ktva(addr) (addr)
5526 +#define ktva_ktla(addr) (addr)
5527 +
5528 #ifndef __ASSEMBLY__
5529
5530 struct page;
5531 diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
5532 index c533835..84db18e 100644
5533 --- a/arch/um/kernel/process.c
5534 +++ b/arch/um/kernel/process.c
5535 @@ -406,22 +406,6 @@ int singlestepping(void * t)
5536 return 2;
5537 }
5538
5539 -/*
5540 - * Only x86 and x86_64 have an arch_align_stack().
5541 - * All other arches have "#define arch_align_stack(x) (x)"
5542 - * in their asm/system.h
5543 - * As this is included in UML from asm-um/system-generic.h,
5544 - * we can use it to behave as the subarch does.
5545 - */
5546 -#ifndef arch_align_stack
5547 -unsigned long arch_align_stack(unsigned long sp)
5548 -{
5549 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5550 - sp -= get_random_int() % 8192;
5551 - return sp & ~0xf;
5552 -}
5553 -#endif
5554 -
5555 unsigned long get_wchan(struct task_struct *p)
5556 {
5557 unsigned long stack_page, sp, ip;
5558 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
5559 index efb4294..61bc18c 100644
5560 --- a/arch/x86/Kconfig
5561 +++ b/arch/x86/Kconfig
5562 @@ -235,7 +235,7 @@ config X86_HT
5563
5564 config X86_32_LAZY_GS
5565 def_bool y
5566 - depends on X86_32 && !CC_STACKPROTECTOR
5567 + depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
5568
5569 config ARCH_HWEIGHT_CFLAGS
5570 string
5571 @@ -1022,7 +1022,7 @@ choice
5572
5573 config NOHIGHMEM
5574 bool "off"
5575 - depends on !X86_NUMAQ
5576 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
5577 ---help---
5578 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
5579 However, the address space of 32-bit x86 processors is only 4
5580 @@ -1059,7 +1059,7 @@ config NOHIGHMEM
5581
5582 config HIGHMEM4G
5583 bool "4GB"
5584 - depends on !X86_NUMAQ
5585 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
5586 ---help---
5587 Select this if you have a 32-bit processor and between 1 and 4
5588 gigabytes of physical RAM.
5589 @@ -1113,7 +1113,7 @@ config PAGE_OFFSET
5590 hex
5591 default 0xB0000000 if VMSPLIT_3G_OPT
5592 default 0x80000000 if VMSPLIT_2G
5593 - default 0x78000000 if VMSPLIT_2G_OPT
5594 + default 0x70000000 if VMSPLIT_2G_OPT
5595 default 0x40000000 if VMSPLIT_1G
5596 default 0xC0000000
5597 depends on X86_32
5598 @@ -1496,6 +1496,7 @@ config SECCOMP
5599
5600 config CC_STACKPROTECTOR
5601 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
5602 + depends on X86_64 || !PAX_MEMORY_UDEREF
5603 ---help---
5604 This option turns on the -fstack-protector GCC feature. This
5605 feature puts, at the beginning of functions, a canary value on
5606 @@ -1553,6 +1554,7 @@ config KEXEC_JUMP
5607 config PHYSICAL_START
5608 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
5609 default "0x1000000"
5610 + range 0x400000 0x40000000
5611 ---help---
5612 This gives the physical address where the kernel is loaded.
5613
5614 @@ -1616,6 +1618,7 @@ config X86_NEED_RELOCS
5615 config PHYSICAL_ALIGN
5616 hex "Alignment value to which kernel should be aligned" if X86_32
5617 default "0x1000000"
5618 + range 0x400000 0x1000000 if PAX_KERNEXEC
5619 range 0x2000 0x1000000
5620 ---help---
5621 This value puts the alignment restrictions on physical address
5622 @@ -1647,9 +1650,10 @@ config HOTPLUG_CPU
5623 Say N if you want to disable CPU hotplug.
5624
5625 config COMPAT_VDSO
5626 - def_bool y
5627 + def_bool n
5628 prompt "Compat VDSO support"
5629 depends on X86_32 || IA32_EMULATION
5630 + depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
5631 ---help---
5632 Map the 32-bit VDSO to the predictable old-style address too.
5633
5634 diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
5635 index e3ca7e0..b30b28a 100644
5636 --- a/arch/x86/Kconfig.cpu
5637 +++ b/arch/x86/Kconfig.cpu
5638 @@ -341,7 +341,7 @@ config X86_PPRO_FENCE
5639
5640 config X86_F00F_BUG
5641 def_bool y
5642 - depends on M586MMX || M586TSC || M586 || M486 || M386
5643 + depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
5644
5645 config X86_INVD_BUG
5646 def_bool y
5647 @@ -365,7 +365,7 @@ config X86_POPAD_OK
5648
5649 config X86_ALIGNMENT_16
5650 def_bool y
5651 - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
5652 + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
5653
5654 config X86_INTEL_USERCOPY
5655 def_bool y
5656 @@ -411,7 +411,7 @@ config X86_CMPXCHG64
5657 # generates cmov.
5658 config X86_CMOV
5659 def_bool y
5660 - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
5661 + depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
5662
5663 config X86_MINIMUM_CPU_FAMILY
5664 int
5665 diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
5666 index bf56e17..05f9891 100644
5667 --- a/arch/x86/Kconfig.debug
5668 +++ b/arch/x86/Kconfig.debug
5669 @@ -81,7 +81,7 @@ config X86_PTDUMP
5670 config DEBUG_RODATA
5671 bool "Write protect kernel read-only data structures"
5672 default y
5673 - depends on DEBUG_KERNEL
5674 + depends on DEBUG_KERNEL && BROKEN
5675 ---help---
5676 Mark the kernel read-only data as write-protected in the pagetables,
5677 in order to catch accidental (and incorrect) writes to such const
5678 @@ -99,7 +99,7 @@ config DEBUG_RODATA_TEST
5679
5680 config DEBUG_SET_MODULE_RONX
5681 bool "Set loadable kernel module data as NX and text as RO"
5682 - depends on MODULES
5683 + depends on MODULES && BROKEN
5684 ---help---
5685 This option helps catch unintended modifications to loadable
5686 kernel module's text and read-only data. It also prevents execution
5687 diff --git a/arch/x86/Makefile b/arch/x86/Makefile
5688 index b02e509..2631e48 100644
5689 --- a/arch/x86/Makefile
5690 +++ b/arch/x86/Makefile
5691 @@ -46,6 +46,7 @@ else
5692 UTS_MACHINE := x86_64
5693 CHECKFLAGS += -D__x86_64__ -m64
5694
5695 + biarch := $(call cc-option,-m64)
5696 KBUILD_AFLAGS += -m64
5697 KBUILD_CFLAGS += -m64
5698
5699 @@ -195,3 +196,12 @@ define archhelp
5700 echo ' FDARGS="..." arguments for the booted kernel'
5701 echo ' FDINITRD=file initrd for the booted kernel'
5702 endef
5703 +
5704 +define OLD_LD
5705 +
5706 +*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
5707 +*** Please upgrade your binutils to 2.18 or newer
5708 +endef
5709 +
5710 +archprepare:
5711 + $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
5712 diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
5713 index 95365a8..52f857b 100644
5714 --- a/arch/x86/boot/Makefile
5715 +++ b/arch/x86/boot/Makefile
5716 @@ -63,6 +63,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
5717 $(call cc-option, -fno-stack-protector) \
5718 $(call cc-option, -mpreferred-stack-boundary=2)
5719 KBUILD_CFLAGS += $(call cc-option, -m32)
5720 +ifdef CONSTIFY_PLUGIN
5721 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5722 +endif
5723 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5724 GCOV_PROFILE := n
5725
5726 diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
5727 index 878e4b9..20537ab 100644
5728 --- a/arch/x86/boot/bitops.h
5729 +++ b/arch/x86/boot/bitops.h
5730 @@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
5731 u8 v;
5732 const u32 *p = (const u32 *)addr;
5733
5734 - asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5735 + asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5736 return v;
5737 }
5738
5739 @@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
5740
5741 static inline void set_bit(int nr, void *addr)
5742 {
5743 - asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5744 + asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5745 }
5746
5747 #endif /* BOOT_BITOPS_H */
5748 diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
5749 index c7093bd..d4247ffe0 100644
5750 --- a/arch/x86/boot/boot.h
5751 +++ b/arch/x86/boot/boot.h
5752 @@ -85,7 +85,7 @@ static inline void io_delay(void)
5753 static inline u16 ds(void)
5754 {
5755 u16 seg;
5756 - asm("movw %%ds,%0" : "=rm" (seg));
5757 + asm volatile("movw %%ds,%0" : "=rm" (seg));
5758 return seg;
5759 }
5760
5761 @@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr)
5762 static inline int memcmp(const void *s1, const void *s2, size_t len)
5763 {
5764 u8 diff;
5765 - asm("repe; cmpsb; setnz %0"
5766 + asm volatile("repe; cmpsb; setnz %0"
5767 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
5768 return diff;
5769 }
5770 diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
5771 index 09664ef..edc5d03 100644
5772 --- a/arch/x86/boot/compressed/Makefile
5773 +++ b/arch/x86/boot/compressed/Makefile
5774 @@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
5775 KBUILD_CFLAGS += $(cflags-y)
5776 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
5777 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
5778 +ifdef CONSTIFY_PLUGIN
5779 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5780 +endif
5781
5782 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5783 GCOV_PROFILE := n
5784 diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
5785 index 67a655a..b924059 100644
5786 --- a/arch/x86/boot/compressed/head_32.S
5787 +++ b/arch/x86/boot/compressed/head_32.S
5788 @@ -76,7 +76,7 @@ ENTRY(startup_32)
5789 notl %eax
5790 andl %eax, %ebx
5791 #else
5792 - movl $LOAD_PHYSICAL_ADDR, %ebx
5793 + movl $____LOAD_PHYSICAL_ADDR, %ebx
5794 #endif
5795
5796 /* Target address to relocate to for decompression */
5797 @@ -162,7 +162,7 @@ relocated:
5798 * and where it was actually loaded.
5799 */
5800 movl %ebp, %ebx
5801 - subl $LOAD_PHYSICAL_ADDR, %ebx
5802 + subl $____LOAD_PHYSICAL_ADDR, %ebx
5803 jz 2f /* Nothing to be done if loaded at compiled addr. */
5804 /*
5805 * Process relocations.
5806 @@ -170,8 +170,7 @@ relocated:
5807
5808 1: subl $4, %edi
5809 movl (%edi), %ecx
5810 - testl %ecx, %ecx
5811 - jz 2f
5812 + jecxz 2f
5813 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
5814 jmp 1b
5815 2:
5816 diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
5817 index 35af09d..99c9676 100644
5818 --- a/arch/x86/boot/compressed/head_64.S
5819 +++ b/arch/x86/boot/compressed/head_64.S
5820 @@ -91,7 +91,7 @@ ENTRY(startup_32)
5821 notl %eax
5822 andl %eax, %ebx
5823 #else
5824 - movl $LOAD_PHYSICAL_ADDR, %ebx
5825 + movl $____LOAD_PHYSICAL_ADDR, %ebx
5826 #endif
5827
5828 /* Target address to relocate to for decompression */
5829 @@ -233,7 +233,7 @@ ENTRY(startup_64)
5830 notq %rax
5831 andq %rax, %rbp
5832 #else
5833 - movq $LOAD_PHYSICAL_ADDR, %rbp
5834 + movq $____LOAD_PHYSICAL_ADDR, %rbp
5835 #endif
5836
5837 /* Target address to relocate to for decompression */
5838 diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
5839 index 3a19d04..7c1d55a 100644
5840 --- a/arch/x86/boot/compressed/misc.c
5841 +++ b/arch/x86/boot/compressed/misc.c
5842 @@ -310,7 +310,7 @@ static void parse_elf(void *output)
5843 case PT_LOAD:
5844 #ifdef CONFIG_RELOCATABLE
5845 dest = output;
5846 - dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
5847 + dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
5848 #else
5849 dest = (void *)(phdr->p_paddr);
5850 #endif
5851 @@ -363,7 +363,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
5852 error("Destination address too large");
5853 #endif
5854 #ifndef CONFIG_RELOCATABLE
5855 - if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
5856 + if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
5857 error("Wrong destination address");
5858 #endif
5859
5860 diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
5861 index 89bbf4e..869908e 100644
5862 --- a/arch/x86/boot/compressed/relocs.c
5863 +++ b/arch/x86/boot/compressed/relocs.c
5864 @@ -13,8 +13,11 @@
5865
5866 static void die(char *fmt, ...);
5867
5868 +#include "../../../../include/generated/autoconf.h"
5869 +
5870 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
5871 static Elf32_Ehdr ehdr;
5872 +static Elf32_Phdr *phdr;
5873 static unsigned long reloc_count, reloc_idx;
5874 static unsigned long *relocs;
5875
5876 @@ -270,9 +273,39 @@ static void read_ehdr(FILE *fp)
5877 }
5878 }
5879
5880 +static void read_phdrs(FILE *fp)
5881 +{
5882 + unsigned int i;
5883 +
5884 + phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
5885 + if (!phdr) {
5886 + die("Unable to allocate %d program headers\n",
5887 + ehdr.e_phnum);
5888 + }
5889 + if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
5890 + die("Seek to %d failed: %s\n",
5891 + ehdr.e_phoff, strerror(errno));
5892 + }
5893 + if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
5894 + die("Cannot read ELF program headers: %s\n",
5895 + strerror(errno));
5896 + }
5897 + for(i = 0; i < ehdr.e_phnum; i++) {
5898 + phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
5899 + phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
5900 + phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
5901 + phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
5902 + phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
5903 + phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
5904 + phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
5905 + phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
5906 + }
5907 +
5908 +}
5909 +
5910 static void read_shdrs(FILE *fp)
5911 {
5912 - int i;
5913 + unsigned int i;
5914 Elf32_Shdr shdr;
5915
5916 secs = calloc(ehdr.e_shnum, sizeof(struct section));
5917 @@ -307,7 +340,7 @@ static void read_shdrs(FILE *fp)
5918
5919 static void read_strtabs(FILE *fp)
5920 {
5921 - int i;
5922 + unsigned int i;
5923 for (i = 0; i < ehdr.e_shnum; i++) {
5924 struct section *sec = &secs[i];
5925 if (sec->shdr.sh_type != SHT_STRTAB) {
5926 @@ -332,7 +365,7 @@ static void read_strtabs(FILE *fp)
5927
5928 static void read_symtabs(FILE *fp)
5929 {
5930 - int i,j;
5931 + unsigned int i,j;
5932 for (i = 0; i < ehdr.e_shnum; i++) {
5933 struct section *sec = &secs[i];
5934 if (sec->shdr.sh_type != SHT_SYMTAB) {
5935 @@ -365,7 +398,9 @@ static void read_symtabs(FILE *fp)
5936
5937 static void read_relocs(FILE *fp)
5938 {
5939 - int i,j;
5940 + unsigned int i,j;
5941 + uint32_t base;
5942 +
5943 for (i = 0; i < ehdr.e_shnum; i++) {
5944 struct section *sec = &secs[i];
5945 if (sec->shdr.sh_type != SHT_REL) {
5946 @@ -385,9 +420,18 @@ static void read_relocs(FILE *fp)
5947 die("Cannot read symbol table: %s\n",
5948 strerror(errno));
5949 }
5950 + base = 0;
5951 + for (j = 0; j < ehdr.e_phnum; j++) {
5952 + if (phdr[j].p_type != PT_LOAD )
5953 + continue;
5954 + if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
5955 + continue;
5956 + base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
5957 + break;
5958 + }
5959 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
5960 Elf32_Rel *rel = &sec->reltab[j];
5961 - rel->r_offset = elf32_to_cpu(rel->r_offset);
5962 + rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
5963 rel->r_info = elf32_to_cpu(rel->r_info);
5964 }
5965 }
5966 @@ -396,14 +440,14 @@ static void read_relocs(FILE *fp)
5967
5968 static void print_absolute_symbols(void)
5969 {
5970 - int i;
5971 + unsigned int i;
5972 printf("Absolute symbols\n");
5973 printf(" Num: Value Size Type Bind Visibility Name\n");
5974 for (i = 0; i < ehdr.e_shnum; i++) {
5975 struct section *sec = &secs[i];
5976 char *sym_strtab;
5977 Elf32_Sym *sh_symtab;
5978 - int j;
5979 + unsigned int j;
5980
5981 if (sec->shdr.sh_type != SHT_SYMTAB) {
5982 continue;
5983 @@ -431,14 +475,14 @@ static void print_absolute_symbols(void)
5984
5985 static void print_absolute_relocs(void)
5986 {
5987 - int i, printed = 0;
5988 + unsigned int i, printed = 0;
5989
5990 for (i = 0; i < ehdr.e_shnum; i++) {
5991 struct section *sec = &secs[i];
5992 struct section *sec_applies, *sec_symtab;
5993 char *sym_strtab;
5994 Elf32_Sym *sh_symtab;
5995 - int j;
5996 + unsigned int j;
5997 if (sec->shdr.sh_type != SHT_REL) {
5998 continue;
5999 }
6000 @@ -499,13 +543,13 @@ static void print_absolute_relocs(void)
6001
6002 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
6003 {
6004 - int i;
6005 + unsigned int i;
6006 /* Walk through the relocations */
6007 for (i = 0; i < ehdr.e_shnum; i++) {
6008 char *sym_strtab;
6009 Elf32_Sym *sh_symtab;
6010 struct section *sec_applies, *sec_symtab;
6011 - int j;
6012 + unsigned int j;
6013 struct section *sec = &secs[i];
6014
6015 if (sec->shdr.sh_type != SHT_REL) {
6016 @@ -530,6 +574,22 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
6017 !is_rel_reloc(sym_name(sym_strtab, sym))) {
6018 continue;
6019 }
6020 + /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
6021 + if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
6022 + continue;
6023 +
6024 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
6025 + /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
6026 + if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
6027 + continue;
6028 + if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
6029 + continue;
6030 + if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
6031 + continue;
6032 + if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
6033 + continue;
6034 +#endif
6035 +
6036 switch (r_type) {
6037 case R_386_NONE:
6038 case R_386_PC32:
6039 @@ -571,7 +631,7 @@ static int cmp_relocs(const void *va, const void *vb)
6040
6041 static void emit_relocs(int as_text)
6042 {
6043 - int i;
6044 + unsigned int i;
6045 /* Count how many relocations I have and allocate space for them. */
6046 reloc_count = 0;
6047 walk_relocs(count_reloc);
6048 @@ -665,6 +725,7 @@ int main(int argc, char **argv)
6049 fname, strerror(errno));
6050 }
6051 read_ehdr(fp);
6052 + read_phdrs(fp);
6053 read_shdrs(fp);
6054 read_strtabs(fp);
6055 read_symtabs(fp);
6056 diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
6057 index 4d3ff03..e4972ff 100644
6058 --- a/arch/x86/boot/cpucheck.c
6059 +++ b/arch/x86/boot/cpucheck.c
6060 @@ -74,7 +74,7 @@ static int has_fpu(void)
6061 u16 fcw = -1, fsw = -1;
6062 u32 cr0;
6063
6064 - asm("movl %%cr0,%0" : "=r" (cr0));
6065 + asm volatile("movl %%cr0,%0" : "=r" (cr0));
6066 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
6067 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
6068 asm volatile("movl %0,%%cr0" : : "r" (cr0));
6069 @@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
6070 {
6071 u32 f0, f1;
6072
6073 - asm("pushfl ; "
6074 + asm volatile("pushfl ; "
6075 "pushfl ; "
6076 "popl %0 ; "
6077 "movl %0,%1 ; "
6078 @@ -115,7 +115,7 @@ static void get_flags(void)
6079 set_bit(X86_FEATURE_FPU, cpu.flags);
6080
6081 if (has_eflag(X86_EFLAGS_ID)) {
6082 - asm("cpuid"
6083 + asm volatile("cpuid"
6084 : "=a" (max_intel_level),
6085 "=b" (cpu_vendor[0]),
6086 "=d" (cpu_vendor[1]),
6087 @@ -124,7 +124,7 @@ static void get_flags(void)
6088
6089 if (max_intel_level >= 0x00000001 &&
6090 max_intel_level <= 0x0000ffff) {
6091 - asm("cpuid"
6092 + asm volatile("cpuid"
6093 : "=a" (tfms),
6094 "=c" (cpu.flags[4]),
6095 "=d" (cpu.flags[0])
6096 @@ -136,7 +136,7 @@ static void get_flags(void)
6097 cpu.model += ((tfms >> 16) & 0xf) << 4;
6098 }
6099
6100 - asm("cpuid"
6101 + asm volatile("cpuid"
6102 : "=a" (max_amd_level)
6103 : "a" (0x80000000)
6104 : "ebx", "ecx", "edx");
6105 @@ -144,7 +144,7 @@ static void get_flags(void)
6106 if (max_amd_level >= 0x80000001 &&
6107 max_amd_level <= 0x8000ffff) {
6108 u32 eax = 0x80000001;
6109 - asm("cpuid"
6110 + asm volatile("cpuid"
6111 : "+a" (eax),
6112 "=c" (cpu.flags[6]),
6113 "=d" (cpu.flags[1])
6114 @@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
6115 u32 ecx = MSR_K7_HWCR;
6116 u32 eax, edx;
6117
6118 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6119 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6120 eax &= ~(1 << 15);
6121 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6122 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6123
6124 get_flags(); /* Make sure it really did something */
6125 err = check_flags();
6126 @@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
6127 u32 ecx = MSR_VIA_FCR;
6128 u32 eax, edx;
6129
6130 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6131 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6132 eax |= (1<<1)|(1<<7);
6133 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6134 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6135
6136 set_bit(X86_FEATURE_CX8, cpu.flags);
6137 err = check_flags();
6138 @@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
6139 u32 eax, edx;
6140 u32 level = 1;
6141
6142 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6143 - asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6144 - asm("cpuid"
6145 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6146 + asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6147 + asm volatile("cpuid"
6148 : "+a" (level), "=d" (cpu.flags[0])
6149 : : "ecx", "ebx");
6150 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6151 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6152
6153 err = check_flags();
6154 }
6155 diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
6156 index bdb4d45..0476680 100644
6157 --- a/arch/x86/boot/header.S
6158 +++ b/arch/x86/boot/header.S
6159 @@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical pointer to
6160 # single linked list of
6161 # struct setup_data
6162
6163 -pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
6164 +pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
6165
6166 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
6167 #define VO_INIT_SIZE (VO__end - VO__text)
6168 diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
6169 index db75d07..8e6d0af 100644
6170 --- a/arch/x86/boot/memory.c
6171 +++ b/arch/x86/boot/memory.c
6172 @@ -19,7 +19,7 @@
6173
6174 static int detect_memory_e820(void)
6175 {
6176 - int count = 0;
6177 + unsigned int count = 0;
6178 struct biosregs ireg, oreg;
6179 struct e820entry *desc = boot_params.e820_map;
6180 static struct e820entry buf; /* static so it is zeroed */
6181 diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
6182 index 11e8c6e..fdbb1ed 100644
6183 --- a/arch/x86/boot/video-vesa.c
6184 +++ b/arch/x86/boot/video-vesa.c
6185 @@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
6186
6187 boot_params.screen_info.vesapm_seg = oreg.es;
6188 boot_params.screen_info.vesapm_off = oreg.di;
6189 + boot_params.screen_info.vesapm_size = oreg.cx;
6190 }
6191
6192 /*
6193 diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
6194 index 43eda28..5ab5fdb 100644
6195 --- a/arch/x86/boot/video.c
6196 +++ b/arch/x86/boot/video.c
6197 @@ -96,7 +96,7 @@ static void store_mode_params(void)
6198 static unsigned int get_entry(void)
6199 {
6200 char entry_buf[4];
6201 - int i, len = 0;
6202 + unsigned int i, len = 0;
6203 int key;
6204 unsigned int v;
6205
6206 diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
6207 index 5b577d5..3c1fed4 100644
6208 --- a/arch/x86/crypto/aes-x86_64-asm_64.S
6209 +++ b/arch/x86/crypto/aes-x86_64-asm_64.S
6210 @@ -8,6 +8,8 @@
6211 * including this sentence is retained in full.
6212 */
6213
6214 +#include <asm/alternative-asm.h>
6215 +
6216 .extern crypto_ft_tab
6217 .extern crypto_it_tab
6218 .extern crypto_fl_tab
6219 @@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
6220 je B192; \
6221 leaq 32(r9),r9;
6222
6223 +#define ret pax_force_retaddr 0, 1; ret
6224 +
6225 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
6226 movq r1,r2; \
6227 movq r3,r4; \
6228 diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
6229 index be6d9e3..21fbbca 100644
6230 --- a/arch/x86/crypto/aesni-intel_asm.S
6231 +++ b/arch/x86/crypto/aesni-intel_asm.S
6232 @@ -31,6 +31,7 @@
6233
6234 #include <linux/linkage.h>
6235 #include <asm/inst.h>
6236 +#include <asm/alternative-asm.h>
6237
6238 #ifdef __x86_64__
6239 .data
6240 @@ -1436,7 +1437,9 @@ _return_T_done_decrypt:
6241 pop %r14
6242 pop %r13
6243 pop %r12
6244 + pax_force_retaddr 0, 1
6245 ret
6246 +ENDPROC(aesni_gcm_dec)
6247
6248
6249 /*****************************************************************************
6250 @@ -1699,7 +1702,9 @@ _return_T_done_encrypt:
6251 pop %r14
6252 pop %r13
6253 pop %r12
6254 + pax_force_retaddr 0, 1
6255 ret
6256 +ENDPROC(aesni_gcm_enc)
6257
6258 #endif
6259
6260 @@ -1714,6 +1719,7 @@ _key_expansion_256a:
6261 pxor %xmm1, %xmm0
6262 movaps %xmm0, (TKEYP)
6263 add $0x10, TKEYP
6264 + pax_force_retaddr_bts
6265 ret
6266
6267 .align 4
6268 @@ -1738,6 +1744,7 @@ _key_expansion_192a:
6269 shufps $0b01001110, %xmm2, %xmm1
6270 movaps %xmm1, 0x10(TKEYP)
6271 add $0x20, TKEYP
6272 + pax_force_retaddr_bts
6273 ret
6274
6275 .align 4
6276 @@ -1757,6 +1764,7 @@ _key_expansion_192b:
6277
6278 movaps %xmm0, (TKEYP)
6279 add $0x10, TKEYP
6280 + pax_force_retaddr_bts
6281 ret
6282
6283 .align 4
6284 @@ -1769,6 +1777,7 @@ _key_expansion_256b:
6285 pxor %xmm1, %xmm2
6286 movaps %xmm2, (TKEYP)
6287 add $0x10, TKEYP
6288 + pax_force_retaddr_bts
6289 ret
6290
6291 /*
6292 @@ -1881,7 +1890,9 @@ ENTRY(aesni_set_key)
6293 #ifndef __x86_64__
6294 popl KEYP
6295 #endif
6296 + pax_force_retaddr 0, 1
6297 ret
6298 +ENDPROC(aesni_set_key)
6299
6300 /*
6301 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
6302 @@ -1902,7 +1913,9 @@ ENTRY(aesni_enc)
6303 popl KLEN
6304 popl KEYP
6305 #endif
6306 + pax_force_retaddr 0, 1
6307 ret
6308 +ENDPROC(aesni_enc)
6309
6310 /*
6311 * _aesni_enc1: internal ABI
6312 @@ -1959,6 +1972,7 @@ _aesni_enc1:
6313 AESENC KEY STATE
6314 movaps 0x70(TKEYP), KEY
6315 AESENCLAST KEY STATE
6316 + pax_force_retaddr_bts
6317 ret
6318
6319 /*
6320 @@ -2067,6 +2081,7 @@ _aesni_enc4:
6321 AESENCLAST KEY STATE2
6322 AESENCLAST KEY STATE3
6323 AESENCLAST KEY STATE4
6324 + pax_force_retaddr_bts
6325 ret
6326
6327 /*
6328 @@ -2089,7 +2104,9 @@ ENTRY(aesni_dec)
6329 popl KLEN
6330 popl KEYP
6331 #endif
6332 + pax_force_retaddr 0, 1
6333 ret
6334 +ENDPROC(aesni_dec)
6335
6336 /*
6337 * _aesni_dec1: internal ABI
6338 @@ -2146,6 +2163,7 @@ _aesni_dec1:
6339 AESDEC KEY STATE
6340 movaps 0x70(TKEYP), KEY
6341 AESDECLAST KEY STATE
6342 + pax_force_retaddr_bts
6343 ret
6344
6345 /*
6346 @@ -2254,6 +2272,7 @@ _aesni_dec4:
6347 AESDECLAST KEY STATE2
6348 AESDECLAST KEY STATE3
6349 AESDECLAST KEY STATE4
6350 + pax_force_retaddr_bts
6351 ret
6352
6353 /*
6354 @@ -2311,7 +2330,9 @@ ENTRY(aesni_ecb_enc)
6355 popl KEYP
6356 popl LEN
6357 #endif
6358 + pax_force_retaddr 0, 1
6359 ret
6360 +ENDPROC(aesni_ecb_enc)
6361
6362 /*
6363 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
6364 @@ -2369,7 +2390,9 @@ ENTRY(aesni_ecb_dec)
6365 popl KEYP
6366 popl LEN
6367 #endif
6368 + pax_force_retaddr 0, 1
6369 ret
6370 +ENDPROC(aesni_ecb_dec)
6371
6372 /*
6373 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
6374 @@ -2410,7 +2433,9 @@ ENTRY(aesni_cbc_enc)
6375 popl LEN
6376 popl IVP
6377 #endif
6378 + pax_force_retaddr 0, 1
6379 ret
6380 +ENDPROC(aesni_cbc_enc)
6381
6382 /*
6383 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
6384 @@ -2498,7 +2523,9 @@ ENTRY(aesni_cbc_dec)
6385 popl LEN
6386 popl IVP
6387 #endif
6388 + pax_force_retaddr 0, 1
6389 ret
6390 +ENDPROC(aesni_cbc_dec)
6391
6392 #ifdef __x86_64__
6393 .align 16
6394 @@ -2524,6 +2551,7 @@ _aesni_inc_init:
6395 mov $1, TCTR_LOW
6396 MOVQ_R64_XMM TCTR_LOW INC
6397 MOVQ_R64_XMM CTR TCTR_LOW
6398 + pax_force_retaddr_bts
6399 ret
6400
6401 /*
6402 @@ -2552,6 +2580,7 @@ _aesni_inc:
6403 .Linc_low:
6404 movaps CTR, IV
6405 PSHUFB_XMM BSWAP_MASK IV
6406 + pax_force_retaddr_bts
6407 ret
6408
6409 /*
6410 @@ -2612,5 +2641,7 @@ ENTRY(aesni_ctr_enc)
6411 .Lctr_enc_ret:
6412 movups IV, (IVP)
6413 .Lctr_enc_just_ret:
6414 + pax_force_retaddr 0, 1
6415 ret
6416 +ENDPROC(aesni_ctr_enc)
6417 #endif
6418 diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
6419 index 391d245..67f35c2 100644
6420 --- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
6421 +++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
6422 @@ -20,6 +20,8 @@
6423 *
6424 */
6425
6426 +#include <asm/alternative-asm.h>
6427 +
6428 .file "blowfish-x86_64-asm.S"
6429 .text
6430
6431 @@ -151,9 +153,11 @@ __blowfish_enc_blk:
6432 jnz __enc_xor;
6433
6434 write_block();
6435 + pax_force_retaddr 0, 1
6436 ret;
6437 __enc_xor:
6438 xor_block();
6439 + pax_force_retaddr 0, 1
6440 ret;
6441
6442 .align 8
6443 @@ -188,6 +192,7 @@ blowfish_dec_blk:
6444
6445 movq %r11, %rbp;
6446
6447 + pax_force_retaddr 0, 1
6448 ret;
6449
6450 /**********************************************************************
6451 @@ -342,6 +347,7 @@ __blowfish_enc_blk_4way:
6452
6453 popq %rbx;
6454 popq %rbp;
6455 + pax_force_retaddr 0, 1
6456 ret;
6457
6458 __enc_xor4:
6459 @@ -349,6 +355,7 @@ __enc_xor4:
6460
6461 popq %rbx;
6462 popq %rbp;
6463 + pax_force_retaddr 0, 1
6464 ret;
6465
6466 .align 8
6467 @@ -386,5 +393,6 @@ blowfish_dec_blk_4way:
6468 popq %rbx;
6469 popq %rbp;
6470
6471 + pax_force_retaddr 0, 1
6472 ret;
6473
6474 diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
6475 index 6214a9b..1f4fc9a 100644
6476 --- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
6477 +++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
6478 @@ -1,3 +1,5 @@
6479 +#include <asm/alternative-asm.h>
6480 +
6481 # enter ECRYPT_encrypt_bytes
6482 .text
6483 .p2align 5
6484 @@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
6485 add %r11,%rsp
6486 mov %rdi,%rax
6487 mov %rsi,%rdx
6488 + pax_force_retaddr 0, 1
6489 ret
6490 # bytesatleast65:
6491 ._bytesatleast65:
6492 @@ -891,6 +894,7 @@ ECRYPT_keysetup:
6493 add %r11,%rsp
6494 mov %rdi,%rax
6495 mov %rsi,%rdx
6496 + pax_force_retaddr
6497 ret
6498 # enter ECRYPT_ivsetup
6499 .text
6500 @@ -917,4 +921,5 @@ ECRYPT_ivsetup:
6501 add %r11,%rsp
6502 mov %rdi,%rax
6503 mov %rsi,%rdx
6504 + pax_force_retaddr
6505 ret
6506 diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
6507 index b2c2f57..8470cab 100644
6508 --- a/arch/x86/crypto/sha1_ssse3_asm.S
6509 +++ b/arch/x86/crypto/sha1_ssse3_asm.S
6510 @@ -28,6 +28,8 @@
6511 * (at your option) any later version.
6512 */
6513
6514 +#include <asm/alternative-asm.h>
6515 +
6516 #define CTX %rdi // arg1
6517 #define BUF %rsi // arg2
6518 #define CNT %rdx // arg3
6519 @@ -104,6 +106,7 @@
6520 pop %r12
6521 pop %rbp
6522 pop %rbx
6523 + pax_force_retaddr 0, 1
6524 ret
6525
6526 .size \name, .-\name
6527 diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
6528 index 5b012a2..36d5364 100644
6529 --- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
6530 +++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
6531 @@ -20,6 +20,8 @@
6532 *
6533 */
6534
6535 +#include <asm/alternative-asm.h>
6536 +
6537 .file "twofish-x86_64-asm-3way.S"
6538 .text
6539
6540 @@ -260,6 +262,7 @@ __twofish_enc_blk_3way:
6541 popq %r13;
6542 popq %r14;
6543 popq %r15;
6544 + pax_force_retaddr 0, 1
6545 ret;
6546
6547 __enc_xor3:
6548 @@ -271,6 +274,7 @@ __enc_xor3:
6549 popq %r13;
6550 popq %r14;
6551 popq %r15;
6552 + pax_force_retaddr 0, 1
6553 ret;
6554
6555 .global twofish_dec_blk_3way
6556 @@ -312,5 +316,6 @@ twofish_dec_blk_3way:
6557 popq %r13;
6558 popq %r14;
6559 popq %r15;
6560 + pax_force_retaddr 0, 1
6561 ret;
6562
6563 diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
6564 index 7bcf3fc..f53832f 100644
6565 --- a/arch/x86/crypto/twofish-x86_64-asm_64.S
6566 +++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
6567 @@ -21,6 +21,7 @@
6568 .text
6569
6570 #include <asm/asm-offsets.h>
6571 +#include <asm/alternative-asm.h>
6572
6573 #define a_offset 0
6574 #define b_offset 4
6575 @@ -268,6 +269,7 @@ twofish_enc_blk:
6576
6577 popq R1
6578 movq $1,%rax
6579 + pax_force_retaddr 0, 1
6580 ret
6581
6582 twofish_dec_blk:
6583 @@ -319,4 +321,5 @@ twofish_dec_blk:
6584
6585 popq R1
6586 movq $1,%rax
6587 + pax_force_retaddr 0, 1
6588 ret
6589 diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
6590 index fd84387..0b4af7d 100644
6591 --- a/arch/x86/ia32/ia32_aout.c
6592 +++ b/arch/x86/ia32/ia32_aout.c
6593 @@ -162,6 +162,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
6594 unsigned long dump_start, dump_size;
6595 struct user32 dump;
6596
6597 + memset(&dump, 0, sizeof(dump));
6598 +
6599 fs = get_fs();
6600 set_fs(KERNEL_DS);
6601 has_dumped = 1;
6602 diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
6603 index 6557769..ef6ae89 100644
6604 --- a/arch/x86/ia32/ia32_signal.c
6605 +++ b/arch/x86/ia32/ia32_signal.c
6606 @@ -169,7 +169,7 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
6607 }
6608 seg = get_fs();
6609 set_fs(KERNEL_DS);
6610 - ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
6611 + ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
6612 set_fs(seg);
6613 if (ret >= 0 && uoss_ptr) {
6614 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
6615 @@ -370,7 +370,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
6616 */
6617 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
6618 size_t frame_size,
6619 - void **fpstate)
6620 + void __user **fpstate)
6621 {
6622 unsigned long sp;
6623
6624 @@ -391,7 +391,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
6625
6626 if (used_math()) {
6627 sp = sp - sig_xstate_ia32_size;
6628 - *fpstate = (struct _fpstate_ia32 *) sp;
6629 + *fpstate = (struct _fpstate_ia32 __user *) sp;
6630 if (save_i387_xstate_ia32(*fpstate) < 0)
6631 return (void __user *) -1L;
6632 }
6633 @@ -399,7 +399,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
6634 sp -= frame_size;
6635 /* Align the stack pointer according to the i386 ABI,
6636 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
6637 - sp = ((sp + 4) & -16ul) - 4;
6638 + sp = ((sp - 12) & -16ul) - 4;
6639 return (void __user *) sp;
6640 }
6641
6642 @@ -457,7 +457,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
6643 * These are actually not used anymore, but left because some
6644 * gdb versions depend on them as a marker.
6645 */
6646 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6647 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
6648 } put_user_catch(err);
6649
6650 if (err)
6651 @@ -499,7 +499,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
6652 0xb8,
6653 __NR_ia32_rt_sigreturn,
6654 0x80cd,
6655 - 0,
6656 + 0
6657 };
6658
6659 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
6660 @@ -529,16 +529,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
6661
6662 if (ka->sa.sa_flags & SA_RESTORER)
6663 restorer = ka->sa.sa_restorer;
6664 + else if (current->mm->context.vdso)
6665 + /* Return stub is in 32bit vsyscall page */
6666 + restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
6667 else
6668 - restorer = VDSO32_SYMBOL(current->mm->context.vdso,
6669 - rt_sigreturn);
6670 + restorer = &frame->retcode;
6671 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
6672
6673 /*
6674 * Not actually used anymore, but left because some gdb
6675 * versions need it.
6676 */
6677 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6678 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
6679 } put_user_catch(err);
6680
6681 if (err)
6682 diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
6683 index a6253ec..4ad2120 100644
6684 --- a/arch/x86/ia32/ia32entry.S
6685 +++ b/arch/x86/ia32/ia32entry.S
6686 @@ -13,7 +13,9 @@
6687 #include <asm/thread_info.h>
6688 #include <asm/segment.h>
6689 #include <asm/irqflags.h>
6690 +#include <asm/pgtable.h>
6691 #include <linux/linkage.h>
6692 +#include <asm/alternative-asm.h>
6693
6694 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
6695 #include <linux/elf-em.h>
6696 @@ -95,6 +97,32 @@ ENTRY(native_irq_enable_sysexit)
6697 ENDPROC(native_irq_enable_sysexit)
6698 #endif
6699
6700 + .macro pax_enter_kernel_user
6701 + pax_set_fptr_mask
6702 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6703 + call pax_enter_kernel_user
6704 +#endif
6705 + .endm
6706 +
6707 + .macro pax_exit_kernel_user
6708 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6709 + call pax_exit_kernel_user
6710 +#endif
6711 +#ifdef CONFIG_PAX_RANDKSTACK
6712 + pushq %rax
6713 + pushq %r11
6714 + call pax_randomize_kstack
6715 + popq %r11
6716 + popq %rax
6717 +#endif
6718 + .endm
6719 +
6720 +.macro pax_erase_kstack
6721 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
6722 + call pax_erase_kstack
6723 +#endif
6724 +.endm
6725 +
6726 /*
6727 * 32bit SYSENTER instruction entry.
6728 *
6729 @@ -121,12 +149,6 @@ ENTRY(ia32_sysenter_target)
6730 CFI_REGISTER rsp,rbp
6731 SWAPGS_UNSAFE_STACK
6732 movq PER_CPU_VAR(kernel_stack), %rsp
6733 - addq $(KERNEL_STACK_OFFSET),%rsp
6734 - /*
6735 - * No need to follow this irqs on/off section: the syscall
6736 - * disabled irqs, here we enable it straight after entry:
6737 - */
6738 - ENABLE_INTERRUPTS(CLBR_NONE)
6739 movl %ebp,%ebp /* zero extension */
6740 pushq_cfi $__USER32_DS
6741 /*CFI_REL_OFFSET ss,0*/
6742 @@ -134,25 +156,39 @@ ENTRY(ia32_sysenter_target)
6743 CFI_REL_OFFSET rsp,0
6744 pushfq_cfi
6745 /*CFI_REL_OFFSET rflags,0*/
6746 - movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
6747 - CFI_REGISTER rip,r10
6748 + orl $X86_EFLAGS_IF,(%rsp)
6749 + GET_THREAD_INFO(%r11)
6750 + movl TI_sysenter_return(%r11), %r11d
6751 + CFI_REGISTER rip,r11
6752 pushq_cfi $__USER32_CS
6753 /*CFI_REL_OFFSET cs,0*/
6754 movl %eax, %eax
6755 - pushq_cfi %r10
6756 + pushq_cfi %r11
6757 CFI_REL_OFFSET rip,0
6758 pushq_cfi %rax
6759 cld
6760 SAVE_ARGS 0,1,0
6761 + pax_enter_kernel_user
6762 + /*
6763 + * No need to follow this irqs on/off section: the syscall
6764 + * disabled irqs, here we enable it straight after entry:
6765 + */
6766 + ENABLE_INTERRUPTS(CLBR_NONE)
6767 /* no need to do an access_ok check here because rbp has been
6768 32bit zero extended */
6769 +
6770 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6771 + mov $PAX_USER_SHADOW_BASE,%r11
6772 + add %r11,%rbp
6773 +#endif
6774 +
6775 1: movl (%rbp),%ebp
6776 .section __ex_table,"a"
6777 .quad 1b,ia32_badarg
6778 .previous
6779 - GET_THREAD_INFO(%r10)
6780 - orl $TS_COMPAT,TI_status(%r10)
6781 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
6782 + GET_THREAD_INFO(%r11)
6783 + orl $TS_COMPAT,TI_status(%r11)
6784 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
6785 CFI_REMEMBER_STATE
6786 jnz sysenter_tracesys
6787 cmpq $(IA32_NR_syscalls-1),%rax
6788 @@ -162,13 +198,15 @@ sysenter_do_call:
6789 sysenter_dispatch:
6790 call *ia32_sys_call_table(,%rax,8)
6791 movq %rax,RAX-ARGOFFSET(%rsp)
6792 - GET_THREAD_INFO(%r10)
6793 + GET_THREAD_INFO(%r11)
6794 DISABLE_INTERRUPTS(CLBR_NONE)
6795 TRACE_IRQS_OFF
6796 - testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6797 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
6798 jnz sysexit_audit
6799 sysexit_from_sys_call:
6800 - andl $~TS_COMPAT,TI_status(%r10)
6801 + pax_exit_kernel_user
6802 + pax_erase_kstack
6803 + andl $~TS_COMPAT,TI_status(%r11)
6804 /* clear IF, that popfq doesn't enable interrupts early */
6805 andl $~0x200,EFLAGS-R11(%rsp)
6806 movl RIP-R11(%rsp),%edx /* User %eip */
6807 @@ -194,6 +232,9 @@ sysexit_from_sys_call:
6808 movl %eax,%esi /* 2nd arg: syscall number */
6809 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
6810 call audit_syscall_entry
6811 +
6812 + pax_erase_kstack
6813 +
6814 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
6815 cmpq $(IA32_NR_syscalls-1),%rax
6816 ja ia32_badsys
6817 @@ -205,7 +246,7 @@ sysexit_from_sys_call:
6818 .endm
6819
6820 .macro auditsys_exit exit
6821 - testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
6822 + testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
6823 jnz ia32_ret_from_sys_call
6824 TRACE_IRQS_ON
6825 sti
6826 @@ -215,12 +256,12 @@ sysexit_from_sys_call:
6827 movzbl %al,%edi /* zero-extend that into %edi */
6828 inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
6829 call audit_syscall_exit
6830 - GET_THREAD_INFO(%r10)
6831 + GET_THREAD_INFO(%r11)
6832 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall return value */
6833 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
6834 cli
6835 TRACE_IRQS_OFF
6836 - testl %edi,TI_flags(%r10)
6837 + testl %edi,TI_flags(%r11)
6838 jz \exit
6839 CLEAR_RREGS -ARGOFFSET
6840 jmp int_with_check
6841 @@ -238,7 +279,7 @@ sysexit_audit:
6842
6843 sysenter_tracesys:
6844 #ifdef CONFIG_AUDITSYSCALL
6845 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
6846 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
6847 jz sysenter_auditsys
6848 #endif
6849 SAVE_REST
6850 @@ -246,6 +287,9 @@ sysenter_tracesys:
6851 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
6852 movq %rsp,%rdi /* &pt_regs -> arg1 */
6853 call syscall_trace_enter
6854 +
6855 + pax_erase_kstack
6856 +
6857 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6858 RESTORE_REST
6859 cmpq $(IA32_NR_syscalls-1),%rax
6860 @@ -277,19 +321,20 @@ ENDPROC(ia32_sysenter_target)
6861 ENTRY(ia32_cstar_target)
6862 CFI_STARTPROC32 simple
6863 CFI_SIGNAL_FRAME
6864 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
6865 + CFI_DEF_CFA rsp,0
6866 CFI_REGISTER rip,rcx
6867 /*CFI_REGISTER rflags,r11*/
6868 SWAPGS_UNSAFE_STACK
6869 movl %esp,%r8d
6870 CFI_REGISTER rsp,r8
6871 movq PER_CPU_VAR(kernel_stack),%rsp
6872 + SAVE_ARGS 8*6,0,0
6873 + pax_enter_kernel_user
6874 /*
6875 * No need to follow this irqs on/off section: the syscall
6876 * disabled irqs and here we enable it straight after entry:
6877 */
6878 ENABLE_INTERRUPTS(CLBR_NONE)
6879 - SAVE_ARGS 8,0,0
6880 movl %eax,%eax /* zero extension */
6881 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
6882 movq %rcx,RIP-ARGOFFSET(%rsp)
6883 @@ -305,13 +350,19 @@ ENTRY(ia32_cstar_target)
6884 /* no need to do an access_ok check here because r8 has been
6885 32bit zero extended */
6886 /* hardware stack frame is complete now */
6887 +
6888 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6889 + mov $PAX_USER_SHADOW_BASE,%r11
6890 + add %r11,%r8
6891 +#endif
6892 +
6893 1: movl (%r8),%r9d
6894 .section __ex_table,"a"
6895 .quad 1b,ia32_badarg
6896 .previous
6897 - GET_THREAD_INFO(%r10)
6898 - orl $TS_COMPAT,TI_status(%r10)
6899 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
6900 + GET_THREAD_INFO(%r11)
6901 + orl $TS_COMPAT,TI_status(%r11)
6902 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
6903 CFI_REMEMBER_STATE
6904 jnz cstar_tracesys
6905 cmpq $IA32_NR_syscalls-1,%rax
6906 @@ -321,13 +372,15 @@ cstar_do_call:
6907 cstar_dispatch:
6908 call *ia32_sys_call_table(,%rax,8)
6909 movq %rax,RAX-ARGOFFSET(%rsp)
6910 - GET_THREAD_INFO(%r10)
6911 + GET_THREAD_INFO(%r11)
6912 DISABLE_INTERRUPTS(CLBR_NONE)
6913 TRACE_IRQS_OFF
6914 - testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6915 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
6916 jnz sysretl_audit
6917 sysretl_from_sys_call:
6918 - andl $~TS_COMPAT,TI_status(%r10)
6919 + pax_exit_kernel_user
6920 + pax_erase_kstack
6921 + andl $~TS_COMPAT,TI_status(%r11)
6922 RESTORE_ARGS 0,-ARG_SKIP,0,0,0
6923 movl RIP-ARGOFFSET(%rsp),%ecx
6924 CFI_REGISTER rip,rcx
6925 @@ -355,7 +408,7 @@ sysretl_audit:
6926
6927 cstar_tracesys:
6928 #ifdef CONFIG_AUDITSYSCALL
6929 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
6930 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
6931 jz cstar_auditsys
6932 #endif
6933 xchgl %r9d,%ebp
6934 @@ -364,6 +417,9 @@ cstar_tracesys:
6935 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6936 movq %rsp,%rdi /* &pt_regs -> arg1 */
6937 call syscall_trace_enter
6938 +
6939 + pax_erase_kstack
6940 +
6941 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
6942 RESTORE_REST
6943 xchgl %ebp,%r9d
6944 @@ -409,20 +465,21 @@ ENTRY(ia32_syscall)
6945 CFI_REL_OFFSET rip,RIP-RIP
6946 PARAVIRT_ADJUST_EXCEPTION_FRAME
6947 SWAPGS
6948 - /*
6949 - * No need to follow this irqs on/off section: the syscall
6950 - * disabled irqs and here we enable it straight after entry:
6951 - */
6952 - ENABLE_INTERRUPTS(CLBR_NONE)
6953 movl %eax,%eax
6954 pushq_cfi %rax
6955 cld
6956 /* note the registers are not zero extended to the sf.
6957 this could be a problem. */
6958 SAVE_ARGS 0,1,0
6959 - GET_THREAD_INFO(%r10)
6960 - orl $TS_COMPAT,TI_status(%r10)
6961 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
6962 + pax_enter_kernel_user
6963 + /*
6964 + * No need to follow this irqs on/off section: the syscall
6965 + * disabled irqs and here we enable it straight after entry:
6966 + */
6967 + ENABLE_INTERRUPTS(CLBR_NONE)
6968 + GET_THREAD_INFO(%r11)
6969 + orl $TS_COMPAT,TI_status(%r11)
6970 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
6971 jnz ia32_tracesys
6972 cmpq $(IA32_NR_syscalls-1),%rax
6973 ja ia32_badsys
6974 @@ -441,6 +498,9 @@ ia32_tracesys:
6975 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6976 movq %rsp,%rdi /* &pt_regs -> arg1 */
6977 call syscall_trace_enter
6978 +
6979 + pax_erase_kstack
6980 +
6981 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6982 RESTORE_REST
6983 cmpq $(IA32_NR_syscalls-1),%rax
6984 @@ -455,6 +515,7 @@ ia32_badsys:
6985
6986 quiet_ni_syscall:
6987 movq $-ENOSYS,%rax
6988 + pax_force_retaddr
6989 ret
6990 CFI_ENDPROC
6991
6992 diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
6993 index f6f5c53..b358b28 100644
6994 --- a/arch/x86/ia32/sys_ia32.c
6995 +++ b/arch/x86/ia32/sys_ia32.c
6996 @@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
6997 */
6998 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
6999 {
7000 - typeof(ubuf->st_uid) uid = 0;
7001 - typeof(ubuf->st_gid) gid = 0;
7002 + typeof(((struct stat64 *)0)->st_uid) uid = 0;
7003 + typeof(((struct stat64 *)0)->st_gid) gid = 0;
7004 SET_UID(uid, stat->uid);
7005 SET_GID(gid, stat->gid);
7006 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
7007 @@ -308,8 +308,8 @@ asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
7008 }
7009 set_fs(KERNEL_DS);
7010 ret = sys_rt_sigprocmask(how,
7011 - set ? (sigset_t __user *)&s : NULL,
7012 - oset ? (sigset_t __user *)&s : NULL,
7013 + set ? (sigset_t __force_user *)&s : NULL,
7014 + oset ? (sigset_t __force_user *)&s : NULL,
7015 sigsetsize);
7016 set_fs(old_fs);
7017 if (ret)
7018 @@ -332,7 +332,7 @@ asmlinkage long sys32_alarm(unsigned int seconds)
7019 return alarm_setitimer(seconds);
7020 }
7021
7022 -asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr,
7023 +asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr,
7024 int options)
7025 {
7026 return compat_sys_wait4(pid, stat_addr, options, NULL);
7027 @@ -353,7 +353,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
7028 mm_segment_t old_fs = get_fs();
7029
7030 set_fs(KERNEL_DS);
7031 - ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
7032 + ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
7033 set_fs(old_fs);
7034 if (put_compat_timespec(&t, interval))
7035 return -EFAULT;
7036 @@ -369,7 +369,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
7037 mm_segment_t old_fs = get_fs();
7038
7039 set_fs(KERNEL_DS);
7040 - ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
7041 + ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
7042 set_fs(old_fs);
7043 if (!ret) {
7044 switch (_NSIG_WORDS) {
7045 @@ -394,7 +394,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
7046 if (copy_siginfo_from_user32(&info, uinfo))
7047 return -EFAULT;
7048 set_fs(KERNEL_DS);
7049 - ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
7050 + ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
7051 set_fs(old_fs);
7052 return ret;
7053 }
7054 @@ -439,7 +439,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
7055 return -EFAULT;
7056
7057 set_fs(KERNEL_DS);
7058 - ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
7059 + ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
7060 count);
7061 set_fs(old_fs);
7062
7063 diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
7064 index 091508b..e245ff2 100644
7065 --- a/arch/x86/include/asm/alternative-asm.h
7066 +++ b/arch/x86/include/asm/alternative-asm.h
7067 @@ -4,10 +4,10 @@
7068
7069 #ifdef CONFIG_SMP
7070 .macro LOCK_PREFIX
7071 -1: lock
7072 +672: lock
7073 .section .smp_locks,"a"
7074 .balign 4
7075 - .long 1b - .
7076 + .long 672b - .
7077 .previous
7078 .endm
7079 #else
7080 @@ -15,6 +15,45 @@
7081 .endm
7082 #endif
7083
7084 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
7085 + .macro pax_force_retaddr_bts rip=0
7086 + btsq $63,\rip(%rsp)
7087 + .endm
7088 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
7089 + .macro pax_force_retaddr rip=0, reload=0
7090 + btsq $63,\rip(%rsp)
7091 + .endm
7092 + .macro pax_force_fptr ptr
7093 + btsq $63,\ptr
7094 + .endm
7095 + .macro pax_set_fptr_mask
7096 + .endm
7097 +#endif
7098 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
7099 + .macro pax_force_retaddr rip=0, reload=0
7100 + .if \reload
7101 + pax_set_fptr_mask
7102 + .endif
7103 + orq %r10,\rip(%rsp)
7104 + .endm
7105 + .macro pax_force_fptr ptr
7106 + orq %r10,\ptr
7107 + .endm
7108 + .macro pax_set_fptr_mask
7109 + movabs $0x8000000000000000,%r10
7110 + .endm
7111 +#endif
7112 +#else
7113 + .macro pax_force_retaddr rip=0, reload=0
7114 + .endm
7115 + .macro pax_force_fptr ptr
7116 + .endm
7117 + .macro pax_force_retaddr_bts rip=0
7118 + .endm
7119 + .macro pax_set_fptr_mask
7120 + .endm
7121 +#endif
7122 +
7123 .macro altinstruction_entry orig alt feature orig_len alt_len
7124 .long \orig - .
7125 .long \alt - .
7126 diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
7127 index 37ad100..7d47faa 100644
7128 --- a/arch/x86/include/asm/alternative.h
7129 +++ b/arch/x86/include/asm/alternative.h
7130 @@ -89,7 +89,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
7131 ".section .discard,\"aw\",@progbits\n" \
7132 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
7133 ".previous\n" \
7134 - ".section .altinstr_replacement, \"ax\"\n" \
7135 + ".section .altinstr_replacement, \"a\"\n" \
7136 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
7137 ".previous"
7138
7139 diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
7140 index 1a6c09a..fec2432 100644
7141 --- a/arch/x86/include/asm/apic.h
7142 +++ b/arch/x86/include/asm/apic.h
7143 @@ -45,7 +45,7 @@ static inline void generic_apic_probe(void)
7144
7145 #ifdef CONFIG_X86_LOCAL_APIC
7146
7147 -extern unsigned int apic_verbosity;
7148 +extern int apic_verbosity;
7149 extern int local_apic_timer_c2_ok;
7150
7151 extern int disable_apic;
7152 diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
7153 index 20370c6..a2eb9b0 100644
7154 --- a/arch/x86/include/asm/apm.h
7155 +++ b/arch/x86/include/asm/apm.h
7156 @@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
7157 __asm__ __volatile__(APM_DO_ZERO_SEGS
7158 "pushl %%edi\n\t"
7159 "pushl %%ebp\n\t"
7160 - "lcall *%%cs:apm_bios_entry\n\t"
7161 + "lcall *%%ss:apm_bios_entry\n\t"
7162 "setc %%al\n\t"
7163 "popl %%ebp\n\t"
7164 "popl %%edi\n\t"
7165 @@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
7166 __asm__ __volatile__(APM_DO_ZERO_SEGS
7167 "pushl %%edi\n\t"
7168 "pushl %%ebp\n\t"
7169 - "lcall *%%cs:apm_bios_entry\n\t"
7170 + "lcall *%%ss:apm_bios_entry\n\t"
7171 "setc %%bl\n\t"
7172 "popl %%ebp\n\t"
7173 "popl %%edi\n\t"
7174 diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
7175 index 58cb6d4..ca9010d 100644
7176 --- a/arch/x86/include/asm/atomic.h
7177 +++ b/arch/x86/include/asm/atomic.h
7178 @@ -22,7 +22,18 @@
7179 */
7180 static inline int atomic_read(const atomic_t *v)
7181 {
7182 - return (*(volatile int *)&(v)->counter);
7183 + return (*(volatile const int *)&(v)->counter);
7184 +}
7185 +
7186 +/**
7187 + * atomic_read_unchecked - read atomic variable
7188 + * @v: pointer of type atomic_unchecked_t
7189 + *
7190 + * Atomically reads the value of @v.
7191 + */
7192 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
7193 +{
7194 + return (*(volatile const int *)&(v)->counter);
7195 }
7196
7197 /**
7198 @@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i)
7199 }
7200
7201 /**
7202 + * atomic_set_unchecked - set atomic variable
7203 + * @v: pointer of type atomic_unchecked_t
7204 + * @i: required value
7205 + *
7206 + * Atomically sets the value of @v to @i.
7207 + */
7208 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7209 +{
7210 + v->counter = i;
7211 +}
7212 +
7213 +/**
7214 * atomic_add - add integer to atomic variable
7215 * @i: integer value to add
7216 * @v: pointer of type atomic_t
7217 @@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i)
7218 */
7219 static inline void atomic_add(int i, atomic_t *v)
7220 {
7221 - asm volatile(LOCK_PREFIX "addl %1,%0"
7222 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
7223 +
7224 +#ifdef CONFIG_PAX_REFCOUNT
7225 + "jno 0f\n"
7226 + LOCK_PREFIX "subl %1,%0\n"
7227 + "int $4\n0:\n"
7228 + _ASM_EXTABLE(0b, 0b)
7229 +#endif
7230 +
7231 + : "+m" (v->counter)
7232 + : "ir" (i));
7233 +}
7234 +
7235 +/**
7236 + * atomic_add_unchecked - add integer to atomic variable
7237 + * @i: integer value to add
7238 + * @v: pointer of type atomic_unchecked_t
7239 + *
7240 + * Atomically adds @i to @v.
7241 + */
7242 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
7243 +{
7244 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
7245 : "+m" (v->counter)
7246 : "ir" (i));
7247 }
7248 @@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v)
7249 */
7250 static inline void atomic_sub(int i, atomic_t *v)
7251 {
7252 - asm volatile(LOCK_PREFIX "subl %1,%0"
7253 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
7254 +
7255 +#ifdef CONFIG_PAX_REFCOUNT
7256 + "jno 0f\n"
7257 + LOCK_PREFIX "addl %1,%0\n"
7258 + "int $4\n0:\n"
7259 + _ASM_EXTABLE(0b, 0b)
7260 +#endif
7261 +
7262 + : "+m" (v->counter)
7263 + : "ir" (i));
7264 +}
7265 +
7266 +/**
7267 + * atomic_sub_unchecked - subtract integer from atomic variable
7268 + * @i: integer value to subtract
7269 + * @v: pointer of type atomic_unchecked_t
7270 + *
7271 + * Atomically subtracts @i from @v.
7272 + */
7273 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
7274 +{
7275 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
7276 : "+m" (v->counter)
7277 : "ir" (i));
7278 }
7279 @@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
7280 {
7281 unsigned char c;
7282
7283 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
7284 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
7285 +
7286 +#ifdef CONFIG_PAX_REFCOUNT
7287 + "jno 0f\n"
7288 + LOCK_PREFIX "addl %2,%0\n"
7289 + "int $4\n0:\n"
7290 + _ASM_EXTABLE(0b, 0b)
7291 +#endif
7292 +
7293 + "sete %1\n"
7294 : "+m" (v->counter), "=qm" (c)
7295 : "ir" (i) : "memory");
7296 return c;
7297 @@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
7298 */
7299 static inline void atomic_inc(atomic_t *v)
7300 {
7301 - asm volatile(LOCK_PREFIX "incl %0"
7302 + asm volatile(LOCK_PREFIX "incl %0\n"
7303 +
7304 +#ifdef CONFIG_PAX_REFCOUNT
7305 + "jno 0f\n"
7306 + LOCK_PREFIX "decl %0\n"
7307 + "int $4\n0:\n"
7308 + _ASM_EXTABLE(0b, 0b)
7309 +#endif
7310 +
7311 + : "+m" (v->counter));
7312 +}
7313 +
7314 +/**
7315 + * atomic_inc_unchecked - increment atomic variable
7316 + * @v: pointer of type atomic_unchecked_t
7317 + *
7318 + * Atomically increments @v by 1.
7319 + */
7320 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7321 +{
7322 + asm volatile(LOCK_PREFIX "incl %0\n"
7323 : "+m" (v->counter));
7324 }
7325
7326 @@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v)
7327 */
7328 static inline void atomic_dec(atomic_t *v)
7329 {
7330 - asm volatile(LOCK_PREFIX "decl %0"
7331 + asm volatile(LOCK_PREFIX "decl %0\n"
7332 +
7333 +#ifdef CONFIG_PAX_REFCOUNT
7334 + "jno 0f\n"
7335 + LOCK_PREFIX "incl %0\n"
7336 + "int $4\n0:\n"
7337 + _ASM_EXTABLE(0b, 0b)
7338 +#endif
7339 +
7340 + : "+m" (v->counter));
7341 +}
7342 +
7343 +/**
7344 + * atomic_dec_unchecked - decrement atomic variable
7345 + * @v: pointer of type atomic_unchecked_t
7346 + *
7347 + * Atomically decrements @v by 1.
7348 + */
7349 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
7350 +{
7351 + asm volatile(LOCK_PREFIX "decl %0\n"
7352 : "+m" (v->counter));
7353 }
7354
7355 @@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
7356 {
7357 unsigned char c;
7358
7359 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
7360 + asm volatile(LOCK_PREFIX "decl %0\n"
7361 +
7362 +#ifdef CONFIG_PAX_REFCOUNT
7363 + "jno 0f\n"
7364 + LOCK_PREFIX "incl %0\n"
7365 + "int $4\n0:\n"
7366 + _ASM_EXTABLE(0b, 0b)
7367 +#endif
7368 +
7369 + "sete %1\n"
7370 : "+m" (v->counter), "=qm" (c)
7371 : : "memory");
7372 return c != 0;
7373 @@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
7374 {
7375 unsigned char c;
7376
7377 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
7378 + asm volatile(LOCK_PREFIX "incl %0\n"
7379 +
7380 +#ifdef CONFIG_PAX_REFCOUNT
7381 + "jno 0f\n"
7382 + LOCK_PREFIX "decl %0\n"
7383 + "int $4\n0:\n"
7384 + _ASM_EXTABLE(0b, 0b)
7385 +#endif
7386 +
7387 + "sete %1\n"
7388 + : "+m" (v->counter), "=qm" (c)
7389 + : : "memory");
7390 + return c != 0;
7391 +}
7392 +
7393 +/**
7394 + * atomic_inc_and_test_unchecked - increment and test
7395 + * @v: pointer of type atomic_unchecked_t
7396 + *
7397 + * Atomically increments @v by 1
7398 + * and returns true if the result is zero, or false for all
7399 + * other cases.
7400 + */
7401 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7402 +{
7403 + unsigned char c;
7404 +
7405 + asm volatile(LOCK_PREFIX "incl %0\n"
7406 + "sete %1\n"
7407 : "+m" (v->counter), "=qm" (c)
7408 : : "memory");
7409 return c != 0;
7410 @@ -157,7 +310,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
7411 {
7412 unsigned char c;
7413
7414 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
7415 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
7416 +
7417 +#ifdef CONFIG_PAX_REFCOUNT
7418 + "jno 0f\n"
7419 + LOCK_PREFIX "subl %2,%0\n"
7420 + "int $4\n0:\n"
7421 + _ASM_EXTABLE(0b, 0b)
7422 +#endif
7423 +
7424 + "sets %1\n"
7425 : "+m" (v->counter), "=qm" (c)
7426 : "ir" (i) : "memory");
7427 return c;
7428 @@ -179,7 +341,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
7429 goto no_xadd;
7430 #endif
7431 /* Modern 486+ processor */
7432 - return i + xadd(&v->counter, i);
7433 + return i + xadd_check_overflow(&v->counter, i);
7434
7435 #ifdef CONFIG_M386
7436 no_xadd: /* Legacy 386 processor */
7437 @@ -192,6 +354,34 @@ no_xadd: /* Legacy 386 processor */
7438 }
7439
7440 /**
7441 + * atomic_add_return_unchecked - add integer and return
7442 + * @i: integer value to add
7443 + * @v: pointer of type atomic_unchecked_t
7444 + *
7445 + * Atomically adds @i to @v and returns @i + @v
7446 + */
7447 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
7448 +{
7449 +#ifdef CONFIG_M386
7450 + int __i;
7451 + unsigned long flags;
7452 + if (unlikely(boot_cpu_data.x86 <= 3))
7453 + goto no_xadd;
7454 +#endif
7455 + /* Modern 486+ processor */
7456 + return i + xadd(&v->counter, i);
7457 +
7458 +#ifdef CONFIG_M386
7459 +no_xadd: /* Legacy 386 processor */
7460 + raw_local_irq_save(flags);
7461 + __i = atomic_read_unchecked(v);
7462 + atomic_set_unchecked(v, i + __i);
7463 + raw_local_irq_restore(flags);
7464 + return i + __i;
7465 +#endif
7466 +}
7467 +
7468 +/**
7469 * atomic_sub_return - subtract integer and return
7470 * @v: pointer of type atomic_t
7471 * @i: integer value to subtract
7472 @@ -204,6 +394,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
7473 }
7474
7475 #define atomic_inc_return(v) (atomic_add_return(1, v))
7476 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7477 +{
7478 + return atomic_add_return_unchecked(1, v);
7479 +}
7480 #define atomic_dec_return(v) (atomic_sub_return(1, v))
7481
7482 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
7483 @@ -211,11 +405,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
7484 return cmpxchg(&v->counter, old, new);
7485 }
7486
7487 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
7488 +{
7489 + return cmpxchg(&v->counter, old, new);
7490 +}
7491 +
7492 static inline int atomic_xchg(atomic_t *v, int new)
7493 {
7494 return xchg(&v->counter, new);
7495 }
7496
7497 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7498 +{
7499 + return xchg(&v->counter, new);
7500 +}
7501 +
7502 /**
7503 * __atomic_add_unless - add unless the number is already a given value
7504 * @v: pointer of type atomic_t
7505 @@ -227,12 +431,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
7506 */
7507 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
7508 {
7509 - int c, old;
7510 + int c, old, new;
7511 c = atomic_read(v);
7512 for (;;) {
7513 - if (unlikely(c == (u)))
7514 + if (unlikely(c == u))
7515 break;
7516 - old = atomic_cmpxchg((v), c, c + (a));
7517 +
7518 + asm volatile("addl %2,%0\n"
7519 +
7520 +#ifdef CONFIG_PAX_REFCOUNT
7521 + "jno 0f\n"
7522 + "subl %2,%0\n"
7523 + "int $4\n0:\n"
7524 + _ASM_EXTABLE(0b, 0b)
7525 +#endif
7526 +
7527 + : "=r" (new)
7528 + : "0" (c), "ir" (a));
7529 +
7530 + old = atomic_cmpxchg(v, c, new);
7531 if (likely(old == c))
7532 break;
7533 c = old;
7534 @@ -240,6 +457,48 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
7535 return c;
7536 }
7537
7538 +/**
7539 + * atomic_inc_not_zero_hint - increment if not null
7540 + * @v: pointer of type atomic_t
7541 + * @hint: probable value of the atomic before the increment
7542 + *
7543 + * This version of atomic_inc_not_zero() gives a hint of probable
7544 + * value of the atomic. This helps processor to not read the memory
7545 + * before doing the atomic read/modify/write cycle, lowering
7546 + * number of bus transactions on some arches.
7547 + *
7548 + * Returns: 0 if increment was not done, 1 otherwise.
7549 + */
7550 +#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
7551 +static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
7552 +{
7553 + int val, c = hint, new;
7554 +
7555 + /* sanity test, should be removed by compiler if hint is a constant */
7556 + if (!hint)
7557 + return __atomic_add_unless(v, 1, 0);
7558 +
7559 + do {
7560 + asm volatile("incl %0\n"
7561 +
7562 +#ifdef CONFIG_PAX_REFCOUNT
7563 + "jno 0f\n"
7564 + "decl %0\n"
7565 + "int $4\n0:\n"
7566 + _ASM_EXTABLE(0b, 0b)
7567 +#endif
7568 +
7569 + : "=r" (new)
7570 + : "0" (c));
7571 +
7572 + val = atomic_cmpxchg(v, c, new);
7573 + if (val == c)
7574 + return 1;
7575 + c = val;
7576 + } while (c);
7577 +
7578 + return 0;
7579 +}
7580
7581 /*
7582 * atomic_dec_if_positive - decrement by 1 if old value positive
7583 diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
7584 index 24098aa..1e37723 100644
7585 --- a/arch/x86/include/asm/atomic64_32.h
7586 +++ b/arch/x86/include/asm/atomic64_32.h
7587 @@ -12,6 +12,14 @@ typedef struct {
7588 u64 __aligned(8) counter;
7589 } atomic64_t;
7590
7591 +#ifdef CONFIG_PAX_REFCOUNT
7592 +typedef struct {
7593 + u64 __aligned(8) counter;
7594 +} atomic64_unchecked_t;
7595 +#else
7596 +typedef atomic64_t atomic64_unchecked_t;
7597 +#endif
7598 +
7599 #define ATOMIC64_INIT(val) { (val) }
7600
7601 #ifdef CONFIG_X86_CMPXCHG64
7602 @@ -38,6 +46,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
7603 }
7604
7605 /**
7606 + * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
7607 + * @p: pointer to type atomic64_unchecked_t
7608 + * @o: expected value
7609 + * @n: new value
7610 + *
7611 + * Atomically sets @v to @n if it was equal to @o and returns
7612 + * the old value.
7613 + */
7614 +
7615 +static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
7616 +{
7617 + return cmpxchg64(&v->counter, o, n);
7618 +}
7619 +
7620 +/**
7621 * atomic64_xchg - xchg atomic64 variable
7622 * @v: pointer to type atomic64_t
7623 * @n: value to assign
7624 @@ -77,6 +100,24 @@ static inline void atomic64_set(atomic64_t *v, long long i)
7625 }
7626
7627 /**
7628 + * atomic64_set_unchecked - set atomic64 variable
7629 + * @v: pointer to type atomic64_unchecked_t
7630 + * @n: value to assign
7631 + *
7632 + * Atomically sets the value of @v to @n.
7633 + */
7634 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
7635 +{
7636 + unsigned high = (unsigned)(i >> 32);
7637 + unsigned low = (unsigned)i;
7638 + asm volatile(ATOMIC64_ALTERNATIVE(set)
7639 + : "+b" (low), "+c" (high)
7640 + : "S" (v)
7641 + : "eax", "edx", "memory"
7642 + );
7643 +}
7644 +
7645 +/**
7646 * atomic64_read - read atomic64 variable
7647 * @v: pointer to type atomic64_t
7648 *
7649 @@ -93,6 +134,22 @@ static inline long long atomic64_read(atomic64_t *v)
7650 }
7651
7652 /**
7653 + * atomic64_read_unchecked - read atomic64 variable
7654 + * @v: pointer to type atomic64_unchecked_t
7655 + *
7656 + * Atomically reads the value of @v and returns it.
7657 + */
7658 +static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
7659 +{
7660 + long long r;
7661 + asm volatile(ATOMIC64_ALTERNATIVE(read_unchecked)
7662 + : "=A" (r), "+c" (v)
7663 + : : "memory"
7664 + );
7665 + return r;
7666 + }
7667 +
7668 +/**
7669 * atomic64_add_return - add and return
7670 * @i: integer value to add
7671 * @v: pointer to type atomic64_t
7672 @@ -108,6 +165,22 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
7673 return i;
7674 }
7675
7676 +/**
7677 + * atomic64_add_return_unchecked - add and return
7678 + * @i: integer value to add
7679 + * @v: pointer to type atomic64_unchecked_t
7680 + *
7681 + * Atomically adds @i to @v and returns @i + *@v
7682 + */
7683 +static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
7684 +{
7685 + asm volatile(ATOMIC64_ALTERNATIVE(add_return_unchecked)
7686 + : "+A" (i), "+c" (v)
7687 + : : "memory"
7688 + );
7689 + return i;
7690 +}
7691 +
7692 /*
7693 * Other variants with different arithmetic operators:
7694 */
7695 @@ -131,6 +204,17 @@ static inline long long atomic64_inc_return(atomic64_t *v)
7696 return a;
7697 }
7698
7699 +static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
7700 +{
7701 + long long a;
7702 + asm volatile(ATOMIC64_ALTERNATIVE(inc_return_unchecked)
7703 + : "=A" (a)
7704 + : "S" (v)
7705 + : "memory", "ecx"
7706 + );
7707 + return a;
7708 +}
7709 +
7710 static inline long long atomic64_dec_return(atomic64_t *v)
7711 {
7712 long long a;
7713 @@ -159,6 +243,22 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
7714 }
7715
7716 /**
7717 + * atomic64_add_unchecked - add integer to atomic64 variable
7718 + * @i: integer value to add
7719 + * @v: pointer to type atomic64_unchecked_t
7720 + *
7721 + * Atomically adds @i to @v.
7722 + */
7723 +static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
7724 +{
7725 + asm volatile(ATOMIC64_ALTERNATIVE_(add_unchecked, add_return_unchecked)
7726 + : "+A" (i), "+c" (v)
7727 + : : "memory"
7728 + );
7729 + return i;
7730 +}
7731 +
7732 +/**
7733 * atomic64_sub - subtract the atomic64 variable
7734 * @i: integer value to subtract
7735 * @v: pointer to type atomic64_t
7736 diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
7737 index 0e1cbfc..5623683 100644
7738 --- a/arch/x86/include/asm/atomic64_64.h
7739 +++ b/arch/x86/include/asm/atomic64_64.h
7740 @@ -18,7 +18,19 @@
7741 */
7742 static inline long atomic64_read(const atomic64_t *v)
7743 {
7744 - return (*(volatile long *)&(v)->counter);
7745 + return (*(volatile const long *)&(v)->counter);
7746 +}
7747 +
7748 +/**
7749 + * atomic64_read_unchecked - read atomic64 variable
7750 + * @v: pointer of type atomic64_unchecked_t
7751 + *
7752 + * Atomically reads the value of @v.
7753 + * Doesn't imply a read memory barrier.
7754 + */
7755 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
7756 +{
7757 + return (*(volatile const long *)&(v)->counter);
7758 }
7759
7760 /**
7761 @@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
7762 }
7763
7764 /**
7765 + * atomic64_set_unchecked - set atomic64 variable
7766 + * @v: pointer to type atomic64_unchecked_t
7767 + * @i: required value
7768 + *
7769 + * Atomically sets the value of @v to @i.
7770 + */
7771 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
7772 +{
7773 + v->counter = i;
7774 +}
7775 +
7776 +/**
7777 * atomic64_add - add integer to atomic64 variable
7778 * @i: integer value to add
7779 * @v: pointer to type atomic64_t
7780 @@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
7781 */
7782 static inline void atomic64_add(long i, atomic64_t *v)
7783 {
7784 + asm volatile(LOCK_PREFIX "addq %1,%0\n"
7785 +
7786 +#ifdef CONFIG_PAX_REFCOUNT
7787 + "jno 0f\n"
7788 + LOCK_PREFIX "subq %1,%0\n"
7789 + "int $4\n0:\n"
7790 + _ASM_EXTABLE(0b, 0b)
7791 +#endif
7792 +
7793 + : "=m" (v->counter)
7794 + : "er" (i), "m" (v->counter));
7795 +}
7796 +
7797 +/**
7798 + * atomic64_add_unchecked - add integer to atomic64 variable
7799 + * @i: integer value to add
7800 + * @v: pointer to type atomic64_unchecked_t
7801 + *
7802 + * Atomically adds @i to @v.
7803 + */
7804 +static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
7805 +{
7806 asm volatile(LOCK_PREFIX "addq %1,%0"
7807 : "=m" (v->counter)
7808 : "er" (i), "m" (v->counter));
7809 @@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
7810 */
7811 static inline void atomic64_sub(long i, atomic64_t *v)
7812 {
7813 - asm volatile(LOCK_PREFIX "subq %1,%0"
7814 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
7815 +
7816 +#ifdef CONFIG_PAX_REFCOUNT
7817 + "jno 0f\n"
7818 + LOCK_PREFIX "addq %1,%0\n"
7819 + "int $4\n0:\n"
7820 + _ASM_EXTABLE(0b, 0b)
7821 +#endif
7822 +
7823 + : "=m" (v->counter)
7824 + : "er" (i), "m" (v->counter));
7825 +}
7826 +
7827 +/**
7828 + * atomic64_sub_unchecked - subtract the atomic64 variable
7829 + * @i: integer value to subtract
7830 + * @v: pointer to type atomic64_unchecked_t
7831 + *
7832 + * Atomically subtracts @i from @v.
7833 + */
7834 +static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
7835 +{
7836 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
7837 : "=m" (v->counter)
7838 : "er" (i), "m" (v->counter));
7839 }
7840 @@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
7841 {
7842 unsigned char c;
7843
7844 - asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
7845 + asm volatile(LOCK_PREFIX "subq %2,%0\n"
7846 +
7847 +#ifdef CONFIG_PAX_REFCOUNT
7848 + "jno 0f\n"
7849 + LOCK_PREFIX "addq %2,%0\n"
7850 + "int $4\n0:\n"
7851 + _ASM_EXTABLE(0b, 0b)
7852 +#endif
7853 +
7854 + "sete %1\n"
7855 : "=m" (v->counter), "=qm" (c)
7856 : "er" (i), "m" (v->counter) : "memory");
7857 return c;
7858 @@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
7859 */
7860 static inline void atomic64_inc(atomic64_t *v)
7861 {
7862 + asm volatile(LOCK_PREFIX "incq %0\n"
7863 +
7864 +#ifdef CONFIG_PAX_REFCOUNT
7865 + "jno 0f\n"
7866 + LOCK_PREFIX "decq %0\n"
7867 + "int $4\n0:\n"
7868 + _ASM_EXTABLE(0b, 0b)
7869 +#endif
7870 +
7871 + : "=m" (v->counter)
7872 + : "m" (v->counter));
7873 +}
7874 +
7875 +/**
7876 + * atomic64_inc_unchecked - increment atomic64 variable
7877 + * @v: pointer to type atomic64_unchecked_t
7878 + *
7879 + * Atomically increments @v by 1.
7880 + */
7881 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
7882 +{
7883 asm volatile(LOCK_PREFIX "incq %0"
7884 : "=m" (v->counter)
7885 : "m" (v->counter));
7886 @@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64_t *v)
7887 */
7888 static inline void atomic64_dec(atomic64_t *v)
7889 {
7890 - asm volatile(LOCK_PREFIX "decq %0"
7891 + asm volatile(LOCK_PREFIX "decq %0\n"
7892 +
7893 +#ifdef CONFIG_PAX_REFCOUNT
7894 + "jno 0f\n"
7895 + LOCK_PREFIX "incq %0\n"
7896 + "int $4\n0:\n"
7897 + _ASM_EXTABLE(0b, 0b)
7898 +#endif
7899 +
7900 + : "=m" (v->counter)
7901 + : "m" (v->counter));
7902 +}
7903 +
7904 +/**
7905 + * atomic64_dec_unchecked - decrement atomic64 variable
7906 + * @v: pointer to type atomic64_t
7907 + *
7908 + * Atomically decrements @v by 1.
7909 + */
7910 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
7911 +{
7912 + asm volatile(LOCK_PREFIX "decq %0\n"
7913 : "=m" (v->counter)
7914 : "m" (v->counter));
7915 }
7916 @@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
7917 {
7918 unsigned char c;
7919
7920 - asm volatile(LOCK_PREFIX "decq %0; sete %1"
7921 + asm volatile(LOCK_PREFIX "decq %0\n"
7922 +
7923 +#ifdef CONFIG_PAX_REFCOUNT
7924 + "jno 0f\n"
7925 + LOCK_PREFIX "incq %0\n"
7926 + "int $4\n0:\n"
7927 + _ASM_EXTABLE(0b, 0b)
7928 +#endif
7929 +
7930 + "sete %1\n"
7931 : "=m" (v->counter), "=qm" (c)
7932 : "m" (v->counter) : "memory");
7933 return c != 0;
7934 @@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
7935 {
7936 unsigned char c;
7937
7938 - asm volatile(LOCK_PREFIX "incq %0; sete %1"
7939 + asm volatile(LOCK_PREFIX "incq %0\n"
7940 +
7941 +#ifdef CONFIG_PAX_REFCOUNT
7942 + "jno 0f\n"
7943 + LOCK_PREFIX "decq %0\n"
7944 + "int $4\n0:\n"
7945 + _ASM_EXTABLE(0b, 0b)
7946 +#endif
7947 +
7948 + "sete %1\n"
7949 : "=m" (v->counter), "=qm" (c)
7950 : "m" (v->counter) : "memory");
7951 return c != 0;
7952 @@ -155,7 +292,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
7953 {
7954 unsigned char c;
7955
7956 - asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
7957 + asm volatile(LOCK_PREFIX "addq %2,%0\n"
7958 +
7959 +#ifdef CONFIG_PAX_REFCOUNT
7960 + "jno 0f\n"
7961 + LOCK_PREFIX "subq %2,%0\n"
7962 + "int $4\n0:\n"
7963 + _ASM_EXTABLE(0b, 0b)
7964 +#endif
7965 +
7966 + "sets %1\n"
7967 : "=m" (v->counter), "=qm" (c)
7968 : "er" (i), "m" (v->counter) : "memory");
7969 return c;
7970 @@ -170,6 +316,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
7971 */
7972 static inline long atomic64_add_return(long i, atomic64_t *v)
7973 {
7974 + return i + xadd_check_overflow(&v->counter, i);
7975 +}
7976 +
7977 +/**
7978 + * atomic64_add_return_unchecked - add and return
7979 + * @i: integer value to add
7980 + * @v: pointer to type atomic64_unchecked_t
7981 + *
7982 + * Atomically adds @i to @v and returns @i + @v
7983 + */
7984 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
7985 +{
7986 return i + xadd(&v->counter, i);
7987 }
7988
7989 @@ -179,6 +337,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
7990 }
7991
7992 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
7993 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
7994 +{
7995 + return atomic64_add_return_unchecked(1, v);
7996 +}
7997 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
7998
7999 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
8000 @@ -186,6 +348,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
8001 return cmpxchg(&v->counter, old, new);
8002 }
8003
8004 +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
8005 +{
8006 + return cmpxchg(&v->counter, old, new);
8007 +}
8008 +
8009 static inline long atomic64_xchg(atomic64_t *v, long new)
8010 {
8011 return xchg(&v->counter, new);
8012 @@ -202,17 +369,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
8013 */
8014 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
8015 {
8016 - long c, old;
8017 + long c, old, new;
8018 c = atomic64_read(v);
8019 for (;;) {
8020 - if (unlikely(c == (u)))
8021 + if (unlikely(c == u))
8022 break;
8023 - old = atomic64_cmpxchg((v), c, c + (a));
8024 +
8025 + asm volatile("add %2,%0\n"
8026 +
8027 +#ifdef CONFIG_PAX_REFCOUNT
8028 + "jno 0f\n"
8029 + "sub %2,%0\n"
8030 + "int $4\n0:\n"
8031 + _ASM_EXTABLE(0b, 0b)
8032 +#endif
8033 +
8034 + : "=r" (new)
8035 + : "0" (c), "ir" (a));
8036 +
8037 + old = atomic64_cmpxchg(v, c, new);
8038 if (likely(old == c))
8039 break;
8040 c = old;
8041 }
8042 - return c != (u);
8043 + return c != u;
8044 }
8045
8046 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
8047 diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
8048 index 1775d6e..b65017f 100644
8049 --- a/arch/x86/include/asm/bitops.h
8050 +++ b/arch/x86/include/asm/bitops.h
8051 @@ -38,7 +38,7 @@
8052 * a mask operation on a byte.
8053 */
8054 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
8055 -#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
8056 +#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
8057 #define CONST_MASK(nr) (1 << ((nr) & 7))
8058
8059 /**
8060 diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
8061 index 5e1a2ee..c9f9533 100644
8062 --- a/arch/x86/include/asm/boot.h
8063 +++ b/arch/x86/include/asm/boot.h
8064 @@ -11,10 +11,15 @@
8065 #include <asm/pgtable_types.h>
8066
8067 /* Physical address where kernel should be loaded. */
8068 -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
8069 +#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
8070 + (CONFIG_PHYSICAL_ALIGN - 1)) \
8071 & ~(CONFIG_PHYSICAL_ALIGN - 1))
8072
8073 +#ifndef __ASSEMBLY__
8074 +extern unsigned char __LOAD_PHYSICAL_ADDR[];
8075 +#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
8076 +#endif
8077 +
8078 /* Minimum kernel alignment, as a power of two */
8079 #ifdef CONFIG_X86_64
8080 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
8081 diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
8082 index 48f99f1..d78ebf9 100644
8083 --- a/arch/x86/include/asm/cache.h
8084 +++ b/arch/x86/include/asm/cache.h
8085 @@ -5,12 +5,13 @@
8086
8087 /* L1 cache line size */
8088 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
8089 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8090 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8091
8092 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
8093 +#define __read_only __attribute__((__section__(".data..read_only")))
8094
8095 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
8096 -#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
8097 +#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
8098
8099 #ifdef CONFIG_X86_VSMP
8100 #ifdef CONFIG_SMP
8101 diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
8102 index 4e12668..501d239 100644
8103 --- a/arch/x86/include/asm/cacheflush.h
8104 +++ b/arch/x86/include/asm/cacheflush.h
8105 @@ -26,7 +26,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
8106 unsigned long pg_flags = pg->flags & _PGMT_MASK;
8107
8108 if (pg_flags == _PGMT_DEFAULT)
8109 - return -1;
8110 + return ~0UL;
8111 else if (pg_flags == _PGMT_WC)
8112 return _PAGE_CACHE_WC;
8113 else if (pg_flags == _PGMT_UC_MINUS)
8114 diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
8115 index 46fc474..b02b0f9 100644
8116 --- a/arch/x86/include/asm/checksum_32.h
8117 +++ b/arch/x86/include/asm/checksum_32.h
8118 @@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
8119 int len, __wsum sum,
8120 int *src_err_ptr, int *dst_err_ptr);
8121
8122 +asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
8123 + int len, __wsum sum,
8124 + int *src_err_ptr, int *dst_err_ptr);
8125 +
8126 +asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
8127 + int len, __wsum sum,
8128 + int *src_err_ptr, int *dst_err_ptr);
8129 +
8130 /*
8131 * Note: when you get a NULL pointer exception here this means someone
8132 * passed in an incorrect kernel address to one of these functions.
8133 @@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
8134 int *err_ptr)
8135 {
8136 might_sleep();
8137 - return csum_partial_copy_generic((__force void *)src, dst,
8138 + return csum_partial_copy_generic_from_user((__force void *)src, dst,
8139 len, sum, err_ptr, NULL);
8140 }
8141
8142 @@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
8143 {
8144 might_sleep();
8145 if (access_ok(VERIFY_WRITE, dst, len))
8146 - return csum_partial_copy_generic(src, (__force void *)dst,
8147 + return csum_partial_copy_generic_to_user(src, (__force void *)dst,
8148 len, sum, NULL, err_ptr);
8149
8150 if (len)
8151 diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
8152 index 5d3acdf..6447a02 100644
8153 --- a/arch/x86/include/asm/cmpxchg.h
8154 +++ b/arch/x86/include/asm/cmpxchg.h
8155 @@ -14,6 +14,8 @@ extern void __cmpxchg_wrong_size(void)
8156 __compiletime_error("Bad argument size for cmpxchg");
8157 extern void __xadd_wrong_size(void)
8158 __compiletime_error("Bad argument size for xadd");
8159 +extern void __xadd_check_overflow_wrong_size(void)
8160 + __compiletime_error("Bad argument size for xadd_check_overflow");
8161
8162 /*
8163 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
8164 @@ -195,6 +197,34 @@ extern void __xadd_wrong_size(void)
8165 __ret; \
8166 })
8167
8168 +#define __xadd_check_overflow(ptr, inc, lock) \
8169 + ({ \
8170 + __typeof__ (*(ptr)) __ret = (inc); \
8171 + switch (sizeof(*(ptr))) { \
8172 + case __X86_CASE_L: \
8173 + asm volatile (lock "xaddl %0, %1\n" \
8174 + "jno 0f\n" \
8175 + "mov %0,%1\n" \
8176 + "int $4\n0:\n" \
8177 + _ASM_EXTABLE(0b, 0b) \
8178 + : "+r" (__ret), "+m" (*(ptr)) \
8179 + : : "memory", "cc"); \
8180 + break; \
8181 + case __X86_CASE_Q: \
8182 + asm volatile (lock "xaddq %q0, %1\n" \
8183 + "jno 0f\n" \
8184 + "mov %0,%1\n" \
8185 + "int $4\n0:\n" \
8186 + _ASM_EXTABLE(0b, 0b) \
8187 + : "+r" (__ret), "+m" (*(ptr)) \
8188 + : : "memory", "cc"); \
8189 + break; \
8190 + default: \
8191 + __xadd_check_overflow_wrong_size(); \
8192 + } \
8193 + __ret; \
8194 + })
8195 +
8196 /*
8197 * xadd() adds "inc" to "*ptr" and atomically returns the previous
8198 * value of "*ptr".
8199 @@ -207,4 +237,6 @@ extern void __xadd_wrong_size(void)
8200 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
8201 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
8202
8203 +#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
8204 +
8205 #endif /* ASM_X86_CMPXCHG_H */
8206 diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
8207 index f3444f7..051a196 100644
8208 --- a/arch/x86/include/asm/cpufeature.h
8209 +++ b/arch/x86/include/asm/cpufeature.h
8210 @@ -363,7 +363,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
8211 ".section .discard,\"aw\",@progbits\n"
8212 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
8213 ".previous\n"
8214 - ".section .altinstr_replacement,\"ax\"\n"
8215 + ".section .altinstr_replacement,\"a\"\n"
8216 "3: movb $1,%0\n"
8217 "4:\n"
8218 ".previous\n"
8219 diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
8220 index 41935fa..3b40db8 100644
8221 --- a/arch/x86/include/asm/desc.h
8222 +++ b/arch/x86/include/asm/desc.h
8223 @@ -4,6 +4,7 @@
8224 #include <asm/desc_defs.h>
8225 #include <asm/ldt.h>
8226 #include <asm/mmu.h>
8227 +#include <asm/pgtable.h>
8228
8229 #include <linux/smp.h>
8230
8231 @@ -16,6 +17,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
8232
8233 desc->type = (info->read_exec_only ^ 1) << 1;
8234 desc->type |= info->contents << 2;
8235 + desc->type |= info->seg_not_present ^ 1;
8236
8237 desc->s = 1;
8238 desc->dpl = 0x3;
8239 @@ -34,17 +36,12 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
8240 }
8241
8242 extern struct desc_ptr idt_descr;
8243 -extern gate_desc idt_table[];
8244 -
8245 -struct gdt_page {
8246 - struct desc_struct gdt[GDT_ENTRIES];
8247 -} __attribute__((aligned(PAGE_SIZE)));
8248 -
8249 -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
8250 +extern gate_desc idt_table[256];
8251
8252 +extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
8253 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
8254 {
8255 - return per_cpu(gdt_page, cpu).gdt;
8256 + return cpu_gdt_table[cpu];
8257 }
8258
8259 #ifdef CONFIG_X86_64
8260 @@ -69,8 +66,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
8261 unsigned long base, unsigned dpl, unsigned flags,
8262 unsigned short seg)
8263 {
8264 - gate->a = (seg << 16) | (base & 0xffff);
8265 - gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
8266 + gate->gate.offset_low = base;
8267 + gate->gate.seg = seg;
8268 + gate->gate.reserved = 0;
8269 + gate->gate.type = type;
8270 + gate->gate.s = 0;
8271 + gate->gate.dpl = dpl;
8272 + gate->gate.p = 1;
8273 + gate->gate.offset_high = base >> 16;
8274 }
8275
8276 #endif
8277 @@ -115,12 +118,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
8278
8279 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
8280 {
8281 + pax_open_kernel();
8282 memcpy(&idt[entry], gate, sizeof(*gate));
8283 + pax_close_kernel();
8284 }
8285
8286 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
8287 {
8288 + pax_open_kernel();
8289 memcpy(&ldt[entry], desc, 8);
8290 + pax_close_kernel();
8291 }
8292
8293 static inline void
8294 @@ -134,7 +141,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
8295 default: size = sizeof(*gdt); break;
8296 }
8297
8298 + pax_open_kernel();
8299 memcpy(&gdt[entry], desc, size);
8300 + pax_close_kernel();
8301 }
8302
8303 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
8304 @@ -207,7 +216,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
8305
8306 static inline void native_load_tr_desc(void)
8307 {
8308 + pax_open_kernel();
8309 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
8310 + pax_close_kernel();
8311 }
8312
8313 static inline void native_load_gdt(const struct desc_ptr *dtr)
8314 @@ -244,8 +255,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
8315 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
8316 unsigned int i;
8317
8318 + pax_open_kernel();
8319 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
8320 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
8321 + pax_close_kernel();
8322 }
8323
8324 #define _LDT_empty(info) \
8325 @@ -307,7 +320,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
8326 desc->limit = (limit >> 16) & 0xf;
8327 }
8328
8329 -static inline void _set_gate(int gate, unsigned type, void *addr,
8330 +static inline void _set_gate(int gate, unsigned type, const void *addr,
8331 unsigned dpl, unsigned ist, unsigned seg)
8332 {
8333 gate_desc s;
8334 @@ -326,7 +339,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
8335 * Pentium F0 0F bugfix can have resulted in the mapped
8336 * IDT being write-protected.
8337 */
8338 -static inline void set_intr_gate(unsigned int n, void *addr)
8339 +static inline void set_intr_gate(unsigned int n, const void *addr)
8340 {
8341 BUG_ON((unsigned)n > 0xFF);
8342 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
8343 @@ -356,19 +369,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
8344 /*
8345 * This routine sets up an interrupt gate at directory privilege level 3.
8346 */
8347 -static inline void set_system_intr_gate(unsigned int n, void *addr)
8348 +static inline void set_system_intr_gate(unsigned int n, const void *addr)
8349 {
8350 BUG_ON((unsigned)n > 0xFF);
8351 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
8352 }
8353
8354 -static inline void set_system_trap_gate(unsigned int n, void *addr)
8355 +static inline void set_system_trap_gate(unsigned int n, const void *addr)
8356 {
8357 BUG_ON((unsigned)n > 0xFF);
8358 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
8359 }
8360
8361 -static inline void set_trap_gate(unsigned int n, void *addr)
8362 +static inline void set_trap_gate(unsigned int n, const void *addr)
8363 {
8364 BUG_ON((unsigned)n > 0xFF);
8365 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
8366 @@ -377,19 +390,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
8367 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
8368 {
8369 BUG_ON((unsigned)n > 0xFF);
8370 - _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
8371 + _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
8372 }
8373
8374 -static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
8375 +static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
8376 {
8377 BUG_ON((unsigned)n > 0xFF);
8378 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
8379 }
8380
8381 -static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
8382 +static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
8383 {
8384 BUG_ON((unsigned)n > 0xFF);
8385 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
8386 }
8387
8388 +#ifdef CONFIG_X86_32
8389 +static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
8390 +{
8391 + struct desc_struct d;
8392 +
8393 + if (likely(limit))
8394 + limit = (limit - 1UL) >> PAGE_SHIFT;
8395 + pack_descriptor(&d, base, limit, 0xFB, 0xC);
8396 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
8397 +}
8398 +#endif
8399 +
8400 #endif /* _ASM_X86_DESC_H */
8401 diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
8402 index 278441f..b95a174 100644
8403 --- a/arch/x86/include/asm/desc_defs.h
8404 +++ b/arch/x86/include/asm/desc_defs.h
8405 @@ -31,6 +31,12 @@ struct desc_struct {
8406 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
8407 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
8408 };
8409 + struct {
8410 + u16 offset_low;
8411 + u16 seg;
8412 + unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
8413 + unsigned offset_high: 16;
8414 + } gate;
8415 };
8416 } __attribute__((packed));
8417
8418 diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
8419 index 908b969..a1f4eb4 100644
8420 --- a/arch/x86/include/asm/e820.h
8421 +++ b/arch/x86/include/asm/e820.h
8422 @@ -69,7 +69,7 @@ struct e820map {
8423 #define ISA_START_ADDRESS 0xa0000
8424 #define ISA_END_ADDRESS 0x100000
8425
8426 -#define BIOS_BEGIN 0x000a0000
8427 +#define BIOS_BEGIN 0x000c0000
8428 #define BIOS_END 0x00100000
8429
8430 #define BIOS_ROM_BASE 0xffe00000
8431 diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
8432 index 5f962df..7289f09 100644
8433 --- a/arch/x86/include/asm/elf.h
8434 +++ b/arch/x86/include/asm/elf.h
8435 @@ -238,7 +238,25 @@ extern int force_personality32;
8436 the loader. We need to make sure that it is out of the way of the program
8437 that it will "exec", and that there is sufficient room for the brk. */
8438
8439 +#ifdef CONFIG_PAX_SEGMEXEC
8440 +#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
8441 +#else
8442 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
8443 +#endif
8444 +
8445 +#ifdef CONFIG_PAX_ASLR
8446 +#ifdef CONFIG_X86_32
8447 +#define PAX_ELF_ET_DYN_BASE 0x10000000UL
8448 +
8449 +#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8450 +#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8451 +#else
8452 +#define PAX_ELF_ET_DYN_BASE 0x400000UL
8453 +
8454 +#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8455 +#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8456 +#endif
8457 +#endif
8458
8459 /* This yields a mask that user programs can use to figure out what
8460 instruction set this CPU supports. This could be done in user space,
8461 @@ -291,9 +309,7 @@ do { \
8462
8463 #define ARCH_DLINFO \
8464 do { \
8465 - if (vdso_enabled) \
8466 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
8467 - (unsigned long)current->mm->context.vdso); \
8468 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
8469 } while (0)
8470
8471 #define AT_SYSINFO 32
8472 @@ -304,7 +320,7 @@ do { \
8473
8474 #endif /* !CONFIG_X86_32 */
8475
8476 -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
8477 +#define VDSO_CURRENT_BASE (current->mm->context.vdso)
8478
8479 #define VDSO_ENTRY \
8480 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
8481 @@ -318,9 +334,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
8482 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
8483 #define compat_arch_setup_additional_pages syscall32_setup_pages
8484
8485 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
8486 -#define arch_randomize_brk arch_randomize_brk
8487 -
8488 /*
8489 * True on X86_32 or when emulating IA32 on X86_64
8490 */
8491 diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
8492 index cc70c1c..d96d011 100644
8493 --- a/arch/x86/include/asm/emergency-restart.h
8494 +++ b/arch/x86/include/asm/emergency-restart.h
8495 @@ -15,6 +15,6 @@ enum reboot_type {
8496
8497 extern enum reboot_type reboot_type;
8498
8499 -extern void machine_emergency_restart(void);
8500 +extern void machine_emergency_restart(void) __noreturn;
8501
8502 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
8503 diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
8504 index d09bb03..4ea4194 100644
8505 --- a/arch/x86/include/asm/futex.h
8506 +++ b/arch/x86/include/asm/futex.h
8507 @@ -12,16 +12,18 @@
8508 #include <asm/system.h>
8509
8510 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
8511 + typecheck(u32 __user *, uaddr); \
8512 asm volatile("1:\t" insn "\n" \
8513 "2:\t.section .fixup,\"ax\"\n" \
8514 "3:\tmov\t%3, %1\n" \
8515 "\tjmp\t2b\n" \
8516 "\t.previous\n" \
8517 _ASM_EXTABLE(1b, 3b) \
8518 - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
8519 + : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
8520 : "i" (-EFAULT), "0" (oparg), "1" (0))
8521
8522 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
8523 + typecheck(u32 __user *, uaddr); \
8524 asm volatile("1:\tmovl %2, %0\n" \
8525 "\tmovl\t%0, %3\n" \
8526 "\t" insn "\n" \
8527 @@ -34,7 +36,7 @@
8528 _ASM_EXTABLE(1b, 4b) \
8529 _ASM_EXTABLE(2b, 4b) \
8530 : "=&a" (oldval), "=&r" (ret), \
8531 - "+m" (*uaddr), "=&r" (tem) \
8532 + "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
8533 : "r" (oparg), "i" (-EFAULT), "1" (0))
8534
8535 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
8536 @@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
8537
8538 switch (op) {
8539 case FUTEX_OP_SET:
8540 - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
8541 + __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
8542 break;
8543 case FUTEX_OP_ADD:
8544 - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
8545 + __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
8546 uaddr, oparg);
8547 break;
8548 case FUTEX_OP_OR:
8549 @@ -123,13 +125,13 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
8550 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
8551 return -EFAULT;
8552
8553 - asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
8554 + asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
8555 "2:\t.section .fixup, \"ax\"\n"
8556 "3:\tmov %3, %0\n"
8557 "\tjmp 2b\n"
8558 "\t.previous\n"
8559 _ASM_EXTABLE(1b, 3b)
8560 - : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
8561 + : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
8562 : "i" (-EFAULT), "r" (newval), "1" (oldval)
8563 : "memory"
8564 );
8565 diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
8566 index eb92a6e..b98b2f4 100644
8567 --- a/arch/x86/include/asm/hw_irq.h
8568 +++ b/arch/x86/include/asm/hw_irq.h
8569 @@ -136,8 +136,8 @@ extern void setup_ioapic_dest(void);
8570 extern void enable_IO_APIC(void);
8571
8572 /* Statistics */
8573 -extern atomic_t irq_err_count;
8574 -extern atomic_t irq_mis_count;
8575 +extern atomic_unchecked_t irq_err_count;
8576 +extern atomic_unchecked_t irq_mis_count;
8577
8578 /* EISA */
8579 extern void eisa_set_level_irq(unsigned int irq);
8580 diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
8581 index c9e09ea..73888df 100644
8582 --- a/arch/x86/include/asm/i387.h
8583 +++ b/arch/x86/include/asm/i387.h
8584 @@ -92,6 +92,11 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
8585 {
8586 int err;
8587
8588 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8589 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8590 + fx = (struct i387_fxsave_struct __user *)((void *)fx + PAX_USER_SHADOW_BASE);
8591 +#endif
8592 +
8593 /* See comment in fxsave() below. */
8594 #ifdef CONFIG_AS_FXSAVEQ
8595 asm volatile("1: fxrstorq %[fx]\n\t"
8596 @@ -121,6 +126,11 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
8597 {
8598 int err;
8599
8600 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8601 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8602 + fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
8603 +#endif
8604 +
8605 /*
8606 * Clear the bytes not touched by the fxsave and reserved
8607 * for the SW usage.
8608 @@ -213,13 +223,8 @@ static inline void fpu_fxsave(struct fpu *fpu)
8609 #endif /* CONFIG_X86_64 */
8610
8611 /* We need a safe address that is cheap to find and that is already
8612 - in L1 during context switch. The best choices are unfortunately
8613 - different for UP and SMP */
8614 -#ifdef CONFIG_SMP
8615 -#define safe_address (__per_cpu_offset[0])
8616 -#else
8617 -#define safe_address (kstat_cpu(0).cpustat.user)
8618 -#endif
8619 + in L1 during context switch. */
8620 +#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
8621
8622 /*
8623 * These must be called with preempt disabled
8624 @@ -312,7 +317,7 @@ static inline void kernel_fpu_begin(void)
8625 struct thread_info *me = current_thread_info();
8626 preempt_disable();
8627 if (me->status & TS_USEDFPU)
8628 - __save_init_fpu(me->task);
8629 + __save_init_fpu(current);
8630 else
8631 clts();
8632 }
8633 diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
8634 index d8e8eef..99f81ae 100644
8635 --- a/arch/x86/include/asm/io.h
8636 +++ b/arch/x86/include/asm/io.h
8637 @@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
8638
8639 #include <linux/vmalloc.h>
8640
8641 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
8642 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
8643 +{
8644 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8645 +}
8646 +
8647 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
8648 +{
8649 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8650 +}
8651 +
8652 /*
8653 * Convert a virtual cached pointer to an uncached pointer
8654 */
8655 diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
8656 index bba3cf8..06bc8da 100644
8657 --- a/arch/x86/include/asm/irqflags.h
8658 +++ b/arch/x86/include/asm/irqflags.h
8659 @@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
8660 sti; \
8661 sysexit
8662
8663 +#define GET_CR0_INTO_RDI mov %cr0, %rdi
8664 +#define SET_RDI_INTO_CR0 mov %rdi, %cr0
8665 +#define GET_CR3_INTO_RDI mov %cr3, %rdi
8666 +#define SET_RDI_INTO_CR3 mov %rdi, %cr3
8667 +
8668 #else
8669 #define INTERRUPT_RETURN iret
8670 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
8671 diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
8672 index 5478825..839e88c 100644
8673 --- a/arch/x86/include/asm/kprobes.h
8674 +++ b/arch/x86/include/asm/kprobes.h
8675 @@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
8676 #define RELATIVEJUMP_SIZE 5
8677 #define RELATIVECALL_OPCODE 0xe8
8678 #define RELATIVE_ADDR_SIZE 4
8679 -#define MAX_STACK_SIZE 64
8680 -#define MIN_STACK_SIZE(ADDR) \
8681 - (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
8682 - THREAD_SIZE - (unsigned long)(ADDR))) \
8683 - ? (MAX_STACK_SIZE) \
8684 - : (((unsigned long)current_thread_info()) + \
8685 - THREAD_SIZE - (unsigned long)(ADDR)))
8686 +#define MAX_STACK_SIZE 64UL
8687 +#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
8688
8689 #define flush_insn_slot(p) do { } while (0)
8690
8691 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
8692 index b4973f4..7c4d3fc 100644
8693 --- a/arch/x86/include/asm/kvm_host.h
8694 +++ b/arch/x86/include/asm/kvm_host.h
8695 @@ -459,7 +459,7 @@ struct kvm_arch {
8696 unsigned int n_requested_mmu_pages;
8697 unsigned int n_max_mmu_pages;
8698 unsigned int indirect_shadow_pages;
8699 - atomic_t invlpg_counter;
8700 + atomic_unchecked_t invlpg_counter;
8701 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
8702 /*
8703 * Hash table of struct kvm_mmu_page.
8704 @@ -638,7 +638,7 @@ struct kvm_x86_ops {
8705 int (*check_intercept)(struct kvm_vcpu *vcpu,
8706 struct x86_instruction_info *info,
8707 enum x86_intercept_stage stage);
8708 -};
8709 +} __do_const;
8710
8711 struct kvm_arch_async_pf {
8712 u32 token;
8713 diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
8714 index 9cdae5d..300d20f 100644
8715 --- a/arch/x86/include/asm/local.h
8716 +++ b/arch/x86/include/asm/local.h
8717 @@ -18,26 +18,58 @@ typedef struct {
8718
8719 static inline void local_inc(local_t *l)
8720 {
8721 - asm volatile(_ASM_INC "%0"
8722 + asm volatile(_ASM_INC "%0\n"
8723 +
8724 +#ifdef CONFIG_PAX_REFCOUNT
8725 + "jno 0f\n"
8726 + _ASM_DEC "%0\n"
8727 + "int $4\n0:\n"
8728 + _ASM_EXTABLE(0b, 0b)
8729 +#endif
8730 +
8731 : "+m" (l->a.counter));
8732 }
8733
8734 static inline void local_dec(local_t *l)
8735 {
8736 - asm volatile(_ASM_DEC "%0"
8737 + asm volatile(_ASM_DEC "%0\n"
8738 +
8739 +#ifdef CONFIG_PAX_REFCOUNT
8740 + "jno 0f\n"
8741 + _ASM_INC "%0\n"
8742 + "int $4\n0:\n"
8743 + _ASM_EXTABLE(0b, 0b)
8744 +#endif
8745 +
8746 : "+m" (l->a.counter));
8747 }
8748
8749 static inline void local_add(long i, local_t *l)
8750 {
8751 - asm volatile(_ASM_ADD "%1,%0"
8752 + asm volatile(_ASM_ADD "%1,%0\n"
8753 +
8754 +#ifdef CONFIG_PAX_REFCOUNT
8755 + "jno 0f\n"
8756 + _ASM_SUB "%1,%0\n"
8757 + "int $4\n0:\n"
8758 + _ASM_EXTABLE(0b, 0b)
8759 +#endif
8760 +
8761 : "+m" (l->a.counter)
8762 : "ir" (i));
8763 }
8764
8765 static inline void local_sub(long i, local_t *l)
8766 {
8767 - asm volatile(_ASM_SUB "%1,%0"
8768 + asm volatile(_ASM_SUB "%1,%0\n"
8769 +
8770 +#ifdef CONFIG_PAX_REFCOUNT
8771 + "jno 0f\n"
8772 + _ASM_ADD "%1,%0\n"
8773 + "int $4\n0:\n"
8774 + _ASM_EXTABLE(0b, 0b)
8775 +#endif
8776 +
8777 : "+m" (l->a.counter)
8778 : "ir" (i));
8779 }
8780 @@ -55,7 +87,16 @@ static inline int local_sub_and_test(long i, local_t *l)
8781 {
8782 unsigned char c;
8783
8784 - asm volatile(_ASM_SUB "%2,%0; sete %1"
8785 + asm volatile(_ASM_SUB "%2,%0\n"
8786 +
8787 +#ifdef CONFIG_PAX_REFCOUNT
8788 + "jno 0f\n"
8789 + _ASM_ADD "%2,%0\n"
8790 + "int $4\n0:\n"
8791 + _ASM_EXTABLE(0b, 0b)
8792 +#endif
8793 +
8794 + "sete %1\n"
8795 : "+m" (l->a.counter), "=qm" (c)
8796 : "ir" (i) : "memory");
8797 return c;
8798 @@ -73,7 +114,16 @@ static inline int local_dec_and_test(local_t *l)
8799 {
8800 unsigned char c;
8801
8802 - asm volatile(_ASM_DEC "%0; sete %1"
8803 + asm volatile(_ASM_DEC "%0\n"
8804 +
8805 +#ifdef CONFIG_PAX_REFCOUNT
8806 + "jno 0f\n"
8807 + _ASM_INC "%0\n"
8808 + "int $4\n0:\n"
8809 + _ASM_EXTABLE(0b, 0b)
8810 +#endif
8811 +
8812 + "sete %1\n"
8813 : "+m" (l->a.counter), "=qm" (c)
8814 : : "memory");
8815 return c != 0;
8816 @@ -91,7 +141,16 @@ static inline int local_inc_and_test(local_t *l)
8817 {
8818 unsigned char c;
8819
8820 - asm volatile(_ASM_INC "%0; sete %1"
8821 + asm volatile(_ASM_INC "%0\n"
8822 +
8823 +#ifdef CONFIG_PAX_REFCOUNT
8824 + "jno 0f\n"
8825 + _ASM_DEC "%0\n"
8826 + "int $4\n0:\n"
8827 + _ASM_EXTABLE(0b, 0b)
8828 +#endif
8829 +
8830 + "sete %1\n"
8831 : "+m" (l->a.counter), "=qm" (c)
8832 : : "memory");
8833 return c != 0;
8834 @@ -110,7 +169,16 @@ static inline int local_add_negative(long i, local_t *l)
8835 {
8836 unsigned char c;
8837
8838 - asm volatile(_ASM_ADD "%2,%0; sets %1"
8839 + asm volatile(_ASM_ADD "%2,%0\n"
8840 +
8841 +#ifdef CONFIG_PAX_REFCOUNT
8842 + "jno 0f\n"
8843 + _ASM_SUB "%2,%0\n"
8844 + "int $4\n0:\n"
8845 + _ASM_EXTABLE(0b, 0b)
8846 +#endif
8847 +
8848 + "sets %1\n"
8849 : "+m" (l->a.counter), "=qm" (c)
8850 : "ir" (i) : "memory");
8851 return c;
8852 @@ -133,7 +201,15 @@ static inline long local_add_return(long i, local_t *l)
8853 #endif
8854 /* Modern 486+ processor */
8855 __i = i;
8856 - asm volatile(_ASM_XADD "%0, %1;"
8857 + asm volatile(_ASM_XADD "%0, %1\n"
8858 +
8859 +#ifdef CONFIG_PAX_REFCOUNT
8860 + "jno 0f\n"
8861 + _ASM_MOV "%0,%1\n"
8862 + "int $4\n0:\n"
8863 + _ASM_EXTABLE(0b, 0b)
8864 +#endif
8865 +
8866 : "+r" (i), "+m" (l->a.counter)
8867 : : "memory");
8868 return i + __i;
8869 diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
8870 index 593e51d..fa69c9a 100644
8871 --- a/arch/x86/include/asm/mman.h
8872 +++ b/arch/x86/include/asm/mman.h
8873 @@ -5,4 +5,14 @@
8874
8875 #include <asm-generic/mman.h>
8876
8877 +#ifdef __KERNEL__
8878 +#ifndef __ASSEMBLY__
8879 +#ifdef CONFIG_X86_32
8880 +#define arch_mmap_check i386_mmap_check
8881 +int i386_mmap_check(unsigned long addr, unsigned long len,
8882 + unsigned long flags);
8883 +#endif
8884 +#endif
8885 +#endif
8886 +
8887 #endif /* _ASM_X86_MMAN_H */
8888 diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
8889 index 5f55e69..e20bfb1 100644
8890 --- a/arch/x86/include/asm/mmu.h
8891 +++ b/arch/x86/include/asm/mmu.h
8892 @@ -9,7 +9,7 @@
8893 * we put the segment information here.
8894 */
8895 typedef struct {
8896 - void *ldt;
8897 + struct desc_struct *ldt;
8898 int size;
8899
8900 #ifdef CONFIG_X86_64
8901 @@ -18,7 +18,19 @@ typedef struct {
8902 #endif
8903
8904 struct mutex lock;
8905 - void *vdso;
8906 + unsigned long vdso;
8907 +
8908 +#ifdef CONFIG_X86_32
8909 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
8910 + unsigned long user_cs_base;
8911 + unsigned long user_cs_limit;
8912 +
8913 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
8914 + cpumask_t cpu_user_cs_mask;
8915 +#endif
8916 +
8917 +#endif
8918 +#endif
8919 } mm_context_t;
8920
8921 #ifdef CONFIG_SMP
8922 diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
8923 index 6902152..399f3a2 100644
8924 --- a/arch/x86/include/asm/mmu_context.h
8925 +++ b/arch/x86/include/asm/mmu_context.h
8926 @@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
8927
8928 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
8929 {
8930 +
8931 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8932 + unsigned int i;
8933 + pgd_t *pgd;
8934 +
8935 + pax_open_kernel();
8936 + pgd = get_cpu_pgd(smp_processor_id());
8937 + for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
8938 + set_pgd_batched(pgd+i, native_make_pgd(0));
8939 + pax_close_kernel();
8940 +#endif
8941 +
8942 #ifdef CONFIG_SMP
8943 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
8944 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
8945 @@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
8946 struct task_struct *tsk)
8947 {
8948 unsigned cpu = smp_processor_id();
8949 +#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
8950 + int tlbstate = TLBSTATE_OK;
8951 +#endif
8952
8953 if (likely(prev != next)) {
8954 #ifdef CONFIG_SMP
8955 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
8956 + tlbstate = percpu_read(cpu_tlbstate.state);
8957 +#endif
8958 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
8959 percpu_write(cpu_tlbstate.active_mm, next);
8960 #endif
8961 cpumask_set_cpu(cpu, mm_cpumask(next));
8962
8963 /* Re-load page tables */
8964 +#ifdef CONFIG_PAX_PER_CPU_PGD
8965 + pax_open_kernel();
8966 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
8967 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
8968 + pax_close_kernel();
8969 + load_cr3(get_cpu_pgd(cpu));
8970 +#else
8971 load_cr3(next->pgd);
8972 +#endif
8973
8974 /* stop flush ipis for the previous mm */
8975 cpumask_clear_cpu(cpu, mm_cpumask(prev));
8976 @@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
8977 */
8978 if (unlikely(prev->context.ldt != next->context.ldt))
8979 load_LDT_nolock(&next->context);
8980 - }
8981 +
8982 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
8983 + if (!(__supported_pte_mask & _PAGE_NX)) {
8984 + smp_mb__before_clear_bit();
8985 + cpu_clear(cpu, prev->context.cpu_user_cs_mask);
8986 + smp_mb__after_clear_bit();
8987 + cpu_set(cpu, next->context.cpu_user_cs_mask);
8988 + }
8989 +#endif
8990 +
8991 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
8992 + if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
8993 + prev->context.user_cs_limit != next->context.user_cs_limit))
8994 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
8995 #ifdef CONFIG_SMP
8996 + else if (unlikely(tlbstate != TLBSTATE_OK))
8997 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
8998 +#endif
8999 +#endif
9000 +
9001 + }
9002 else {
9003 +
9004 +#ifdef CONFIG_PAX_PER_CPU_PGD
9005 + pax_open_kernel();
9006 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
9007 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
9008 + pax_close_kernel();
9009 + load_cr3(get_cpu_pgd(cpu));
9010 +#endif
9011 +
9012 +#ifdef CONFIG_SMP
9013 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
9014 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
9015
9016 @@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
9017 * tlb flush IPI delivery. We must reload CR3
9018 * to make sure to use no freed page tables.
9019 */
9020 +
9021 +#ifndef CONFIG_PAX_PER_CPU_PGD
9022 load_cr3(next->pgd);
9023 +#endif
9024 +
9025 load_LDT_nolock(&next->context);
9026 +
9027 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
9028 + if (!(__supported_pte_mask & _PAGE_NX))
9029 + cpu_set(cpu, next->context.cpu_user_cs_mask);
9030 +#endif
9031 +
9032 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9033 +#ifdef CONFIG_PAX_PAGEEXEC
9034 + if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
9035 +#endif
9036 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9037 +#endif
9038 +
9039 }
9040 +#endif
9041 }
9042 -#endif
9043 }
9044
9045 #define activate_mm(prev, next) \
9046 diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
9047 index 9eae775..c914fea 100644
9048 --- a/arch/x86/include/asm/module.h
9049 +++ b/arch/x86/include/asm/module.h
9050 @@ -5,6 +5,7 @@
9051
9052 #ifdef CONFIG_X86_64
9053 /* X86_64 does not define MODULE_PROC_FAMILY */
9054 +#define MODULE_PROC_FAMILY ""
9055 #elif defined CONFIG_M386
9056 #define MODULE_PROC_FAMILY "386 "
9057 #elif defined CONFIG_M486
9058 @@ -59,8 +60,20 @@
9059 #error unknown processor family
9060 #endif
9061
9062 -#ifdef CONFIG_X86_32
9063 -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
9064 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
9065 +#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
9066 +#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
9067 +#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
9068 +#else
9069 +#define MODULE_PAX_KERNEXEC ""
9070 #endif
9071
9072 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9073 +#define MODULE_PAX_UDEREF "UDEREF "
9074 +#else
9075 +#define MODULE_PAX_UDEREF ""
9076 +#endif
9077 +
9078 +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
9079 +
9080 #endif /* _ASM_X86_MODULE_H */
9081 diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
9082 index 7639dbf..e08a58c 100644
9083 --- a/arch/x86/include/asm/page_64_types.h
9084 +++ b/arch/x86/include/asm/page_64_types.h
9085 @@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
9086
9087 /* duplicated to the one in bootmem.h */
9088 extern unsigned long max_pfn;
9089 -extern unsigned long phys_base;
9090 +extern const unsigned long phys_base;
9091
9092 extern unsigned long __phys_addr(unsigned long);
9093 #define __phys_reloc_hide(x) (x)
9094 diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
9095 index a7d2db9..edb023e 100644
9096 --- a/arch/x86/include/asm/paravirt.h
9097 +++ b/arch/x86/include/asm/paravirt.h
9098 @@ -667,6 +667,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
9099 val);
9100 }
9101
9102 +static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
9103 +{
9104 + pgdval_t val = native_pgd_val(pgd);
9105 +
9106 + if (sizeof(pgdval_t) > sizeof(long))
9107 + PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
9108 + val, (u64)val >> 32);
9109 + else
9110 + PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
9111 + val);
9112 +}
9113 +
9114 static inline void pgd_clear(pgd_t *pgdp)
9115 {
9116 set_pgd(pgdp, __pgd(0));
9117 @@ -748,6 +760,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
9118 pv_mmu_ops.set_fixmap(idx, phys, flags);
9119 }
9120
9121 +#ifdef CONFIG_PAX_KERNEXEC
9122 +static inline unsigned long pax_open_kernel(void)
9123 +{
9124 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
9125 +}
9126 +
9127 +static inline unsigned long pax_close_kernel(void)
9128 +{
9129 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
9130 +}
9131 +#else
9132 +static inline unsigned long pax_open_kernel(void) { return 0; }
9133 +static inline unsigned long pax_close_kernel(void) { return 0; }
9134 +#endif
9135 +
9136 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
9137
9138 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
9139 @@ -964,7 +991,7 @@ extern void default_banner(void);
9140
9141 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
9142 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
9143 -#define PARA_INDIRECT(addr) *%cs:addr
9144 +#define PARA_INDIRECT(addr) *%ss:addr
9145 #endif
9146
9147 #define INTERRUPT_RETURN \
9148 @@ -1041,6 +1068,21 @@ extern void default_banner(void);
9149 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
9150 CLBR_NONE, \
9151 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
9152 +
9153 +#define GET_CR0_INTO_RDI \
9154 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
9155 + mov %rax,%rdi
9156 +
9157 +#define SET_RDI_INTO_CR0 \
9158 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
9159 +
9160 +#define GET_CR3_INTO_RDI \
9161 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
9162 + mov %rax,%rdi
9163 +
9164 +#define SET_RDI_INTO_CR3 \
9165 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
9166 +
9167 #endif /* CONFIG_X86_32 */
9168
9169 #endif /* __ASSEMBLY__ */
9170 diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
9171 index 8e8b9a4..f07d725 100644
9172 --- a/arch/x86/include/asm/paravirt_types.h
9173 +++ b/arch/x86/include/asm/paravirt_types.h
9174 @@ -84,20 +84,20 @@ struct pv_init_ops {
9175 */
9176 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
9177 unsigned long addr, unsigned len);
9178 -};
9179 +} __no_const;
9180
9181
9182 struct pv_lazy_ops {
9183 /* Set deferred update mode, used for batching operations. */
9184 void (*enter)(void);
9185 void (*leave)(void);
9186 -};
9187 +} __no_const;
9188
9189 struct pv_time_ops {
9190 unsigned long long (*sched_clock)(void);
9191 unsigned long long (*steal_clock)(int cpu);
9192 unsigned long (*get_tsc_khz)(void);
9193 -};
9194 +} __no_const;
9195
9196 struct pv_cpu_ops {
9197 /* hooks for various privileged instructions */
9198 @@ -193,7 +193,7 @@ struct pv_cpu_ops {
9199
9200 void (*start_context_switch)(struct task_struct *prev);
9201 void (*end_context_switch)(struct task_struct *next);
9202 -};
9203 +} __no_const;
9204
9205 struct pv_irq_ops {
9206 /*
9207 @@ -224,7 +224,7 @@ struct pv_apic_ops {
9208 unsigned long start_eip,
9209 unsigned long start_esp);
9210 #endif
9211 -};
9212 +} __no_const;
9213
9214 struct pv_mmu_ops {
9215 unsigned long (*read_cr2)(void);
9216 @@ -313,6 +313,7 @@ struct pv_mmu_ops {
9217 struct paravirt_callee_save make_pud;
9218
9219 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
9220 + void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
9221 #endif /* PAGETABLE_LEVELS == 4 */
9222 #endif /* PAGETABLE_LEVELS >= 3 */
9223
9224 @@ -324,6 +325,12 @@ struct pv_mmu_ops {
9225 an mfn. We can tell which is which from the index. */
9226 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
9227 phys_addr_t phys, pgprot_t flags);
9228 +
9229 +#ifdef CONFIG_PAX_KERNEXEC
9230 + unsigned long (*pax_open_kernel)(void);
9231 + unsigned long (*pax_close_kernel)(void);
9232 +#endif
9233 +
9234 };
9235
9236 struct arch_spinlock;
9237 @@ -334,7 +341,7 @@ struct pv_lock_ops {
9238 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
9239 int (*spin_trylock)(struct arch_spinlock *lock);
9240 void (*spin_unlock)(struct arch_spinlock *lock);
9241 -};
9242 +} __no_const;
9243
9244 /* This contains all the paravirt structures: we get a convenient
9245 * number for each function using the offset which we use to indicate
9246 diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
9247 index b4389a4..b7ff22c 100644
9248 --- a/arch/x86/include/asm/pgalloc.h
9249 +++ b/arch/x86/include/asm/pgalloc.h
9250 @@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
9251 pmd_t *pmd, pte_t *pte)
9252 {
9253 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9254 + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
9255 +}
9256 +
9257 +static inline void pmd_populate_user(struct mm_struct *mm,
9258 + pmd_t *pmd, pte_t *pte)
9259 +{
9260 + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9261 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
9262 }
9263
9264 diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
9265 index 98391db..8f6984e 100644
9266 --- a/arch/x86/include/asm/pgtable-2level.h
9267 +++ b/arch/x86/include/asm/pgtable-2level.h
9268 @@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
9269
9270 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9271 {
9272 + pax_open_kernel();
9273 *pmdp = pmd;
9274 + pax_close_kernel();
9275 }
9276
9277 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
9278 diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
9279 index effff47..f9e4035 100644
9280 --- a/arch/x86/include/asm/pgtable-3level.h
9281 +++ b/arch/x86/include/asm/pgtable-3level.h
9282 @@ -38,12 +38,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
9283
9284 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9285 {
9286 + pax_open_kernel();
9287 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
9288 + pax_close_kernel();
9289 }
9290
9291 static inline void native_set_pud(pud_t *pudp, pud_t pud)
9292 {
9293 + pax_open_kernel();
9294 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
9295 + pax_close_kernel();
9296 }
9297
9298 /*
9299 diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
9300 index 18601c8..3d716d1 100644
9301 --- a/arch/x86/include/asm/pgtable.h
9302 +++ b/arch/x86/include/asm/pgtable.h
9303 @@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
9304
9305 #ifndef __PAGETABLE_PUD_FOLDED
9306 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
9307 +#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
9308 #define pgd_clear(pgd) native_pgd_clear(pgd)
9309 #endif
9310
9311 @@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
9312
9313 #define arch_end_context_switch(prev) do {} while(0)
9314
9315 +#define pax_open_kernel() native_pax_open_kernel()
9316 +#define pax_close_kernel() native_pax_close_kernel()
9317 #endif /* CONFIG_PARAVIRT */
9318
9319 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
9320 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
9321 +
9322 +#ifdef CONFIG_PAX_KERNEXEC
9323 +static inline unsigned long native_pax_open_kernel(void)
9324 +{
9325 + unsigned long cr0;
9326 +
9327 + preempt_disable();
9328 + barrier();
9329 + cr0 = read_cr0() ^ X86_CR0_WP;
9330 + BUG_ON(unlikely(cr0 & X86_CR0_WP));
9331 + write_cr0(cr0);
9332 + return cr0 ^ X86_CR0_WP;
9333 +}
9334 +
9335 +static inline unsigned long native_pax_close_kernel(void)
9336 +{
9337 + unsigned long cr0;
9338 +
9339 + cr0 = read_cr0() ^ X86_CR0_WP;
9340 + BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
9341 + write_cr0(cr0);
9342 + barrier();
9343 + preempt_enable_no_resched();
9344 + return cr0 ^ X86_CR0_WP;
9345 +}
9346 +#else
9347 +static inline unsigned long native_pax_open_kernel(void) { return 0; }
9348 +static inline unsigned long native_pax_close_kernel(void) { return 0; }
9349 +#endif
9350 +
9351 /*
9352 * The following only work if pte_present() is true.
9353 * Undefined behaviour if not..
9354 */
9355 +static inline int pte_user(pte_t pte)
9356 +{
9357 + return pte_val(pte) & _PAGE_USER;
9358 +}
9359 +
9360 static inline int pte_dirty(pte_t pte)
9361 {
9362 return pte_flags(pte) & _PAGE_DIRTY;
9363 @@ -196,9 +236,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
9364 return pte_clear_flags(pte, _PAGE_RW);
9365 }
9366
9367 +static inline pte_t pte_mkread(pte_t pte)
9368 +{
9369 + return __pte(pte_val(pte) | _PAGE_USER);
9370 +}
9371 +
9372 static inline pte_t pte_mkexec(pte_t pte)
9373 {
9374 - return pte_clear_flags(pte, _PAGE_NX);
9375 +#ifdef CONFIG_X86_PAE
9376 + if (__supported_pte_mask & _PAGE_NX)
9377 + return pte_clear_flags(pte, _PAGE_NX);
9378 + else
9379 +#endif
9380 + return pte_set_flags(pte, _PAGE_USER);
9381 +}
9382 +
9383 +static inline pte_t pte_exprotect(pte_t pte)
9384 +{
9385 +#ifdef CONFIG_X86_PAE
9386 + if (__supported_pte_mask & _PAGE_NX)
9387 + return pte_set_flags(pte, _PAGE_NX);
9388 + else
9389 +#endif
9390 + return pte_clear_flags(pte, _PAGE_USER);
9391 }
9392
9393 static inline pte_t pte_mkdirty(pte_t pte)
9394 @@ -390,6 +450,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
9395 #endif
9396
9397 #ifndef __ASSEMBLY__
9398 +
9399 +#ifdef CONFIG_PAX_PER_CPU_PGD
9400 +extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
9401 +static inline pgd_t *get_cpu_pgd(unsigned int cpu)
9402 +{
9403 + return cpu_pgd[cpu];
9404 +}
9405 +#endif
9406 +
9407 #include <linux/mm_types.h>
9408
9409 static inline int pte_none(pte_t pte)
9410 @@ -560,7 +629,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
9411
9412 static inline int pgd_bad(pgd_t pgd)
9413 {
9414 - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
9415 + return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
9416 }
9417
9418 static inline int pgd_none(pgd_t pgd)
9419 @@ -583,7 +652,12 @@ static inline int pgd_none(pgd_t pgd)
9420 * pgd_offset() returns a (pgd_t *)
9421 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
9422 */
9423 -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
9424 +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
9425 +
9426 +#ifdef CONFIG_PAX_PER_CPU_PGD
9427 +#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
9428 +#endif
9429 +
9430 /*
9431 * a shortcut which implies the use of the kernel's pgd, instead
9432 * of a process's
9433 @@ -594,6 +668,20 @@ static inline int pgd_none(pgd_t pgd)
9434 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
9435 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
9436
9437 +#ifdef CONFIG_X86_32
9438 +#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
9439 +#else
9440 +#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
9441 +#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
9442 +
9443 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9444 +#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
9445 +#else
9446 +#define PAX_USER_SHADOW_BASE (_AC(0,UL))
9447 +#endif
9448 +
9449 +#endif
9450 +
9451 #ifndef __ASSEMBLY__
9452
9453 extern int direct_gbpages;
9454 @@ -758,11 +846,23 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
9455 * dst and src can be on the same page, but the range must not overlap,
9456 * and must not cross a page boundary.
9457 */
9458 -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
9459 +static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
9460 {
9461 - memcpy(dst, src, count * sizeof(pgd_t));
9462 + pax_open_kernel();
9463 + while (count--)
9464 + *dst++ = *src++;
9465 + pax_close_kernel();
9466 }
9467
9468 +#ifdef CONFIG_PAX_PER_CPU_PGD
9469 +extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9470 +#endif
9471 +
9472 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9473 +extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9474 +#else
9475 +static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
9476 +#endif
9477
9478 #include <asm-generic/pgtable.h>
9479 #endif /* __ASSEMBLY__ */
9480 diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
9481 index 0c92113..34a77c6 100644
9482 --- a/arch/x86/include/asm/pgtable_32.h
9483 +++ b/arch/x86/include/asm/pgtable_32.h
9484 @@ -25,9 +25,6 @@
9485 struct mm_struct;
9486 struct vm_area_struct;
9487
9488 -extern pgd_t swapper_pg_dir[1024];
9489 -extern pgd_t initial_page_table[1024];
9490 -
9491 static inline void pgtable_cache_init(void) { }
9492 static inline void check_pgt_cache(void) { }
9493 void paging_init(void);
9494 @@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
9495 # include <asm/pgtable-2level.h>
9496 #endif
9497
9498 +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
9499 +extern pgd_t initial_page_table[PTRS_PER_PGD];
9500 +#ifdef CONFIG_X86_PAE
9501 +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
9502 +#endif
9503 +
9504 #if defined(CONFIG_HIGHPTE)
9505 #define pte_offset_map(dir, address) \
9506 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
9507 @@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
9508 /* Clear a kernel PTE and flush it from the TLB */
9509 #define kpte_clear_flush(ptep, vaddr) \
9510 do { \
9511 + pax_open_kernel(); \
9512 pte_clear(&init_mm, (vaddr), (ptep)); \
9513 + pax_close_kernel(); \
9514 __flush_tlb_one((vaddr)); \
9515 } while (0)
9516
9517 @@ -74,6 +79,9 @@ do { \
9518
9519 #endif /* !__ASSEMBLY__ */
9520
9521 +#define HAVE_ARCH_UNMAPPED_AREA
9522 +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
9523 +
9524 /*
9525 * kern_addr_valid() is (1) for FLATMEM and (0) for
9526 * SPARSEMEM and DISCONTIGMEM
9527 diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
9528 index ed5903b..c7fe163 100644
9529 --- a/arch/x86/include/asm/pgtable_32_types.h
9530 +++ b/arch/x86/include/asm/pgtable_32_types.h
9531 @@ -8,7 +8,7 @@
9532 */
9533 #ifdef CONFIG_X86_PAE
9534 # include <asm/pgtable-3level_types.h>
9535 -# define PMD_SIZE (1UL << PMD_SHIFT)
9536 +# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
9537 # define PMD_MASK (~(PMD_SIZE - 1))
9538 #else
9539 # include <asm/pgtable-2level_types.h>
9540 @@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
9541 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
9542 #endif
9543
9544 +#ifdef CONFIG_PAX_KERNEXEC
9545 +#ifndef __ASSEMBLY__
9546 +extern unsigned char MODULES_EXEC_VADDR[];
9547 +extern unsigned char MODULES_EXEC_END[];
9548 +#endif
9549 +#include <asm/boot.h>
9550 +#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
9551 +#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
9552 +#else
9553 +#define ktla_ktva(addr) (addr)
9554 +#define ktva_ktla(addr) (addr)
9555 +#endif
9556 +
9557 #define MODULES_VADDR VMALLOC_START
9558 #define MODULES_END VMALLOC_END
9559 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
9560 diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
9561 index 975f709..107976d 100644
9562 --- a/arch/x86/include/asm/pgtable_64.h
9563 +++ b/arch/x86/include/asm/pgtable_64.h
9564 @@ -16,10 +16,14 @@
9565
9566 extern pud_t level3_kernel_pgt[512];
9567 extern pud_t level3_ident_pgt[512];
9568 +extern pud_t level3_vmalloc_start_pgt[512];
9569 +extern pud_t level3_vmalloc_end_pgt[512];
9570 +extern pud_t level3_vmemmap_pgt[512];
9571 +extern pud_t level2_vmemmap_pgt[512];
9572 extern pmd_t level2_kernel_pgt[512];
9573 extern pmd_t level2_fixmap_pgt[512];
9574 -extern pmd_t level2_ident_pgt[512];
9575 -extern pgd_t init_level4_pgt[];
9576 +extern pmd_t level2_ident_pgt[512*2];
9577 +extern pgd_t init_level4_pgt[512];
9578
9579 #define swapper_pg_dir init_level4_pgt
9580
9581 @@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
9582
9583 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9584 {
9585 + pax_open_kernel();
9586 *pmdp = pmd;
9587 + pax_close_kernel();
9588 }
9589
9590 static inline void native_pmd_clear(pmd_t *pmd)
9591 @@ -107,6 +113,13 @@ static inline void native_pud_clear(pud_t *pud)
9592
9593 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
9594 {
9595 + pax_open_kernel();
9596 + *pgdp = pgd;
9597 + pax_close_kernel();
9598 +}
9599 +
9600 +static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
9601 +{
9602 *pgdp = pgd;
9603 }
9604
9605 diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
9606 index 766ea16..5b96cb3 100644
9607 --- a/arch/x86/include/asm/pgtable_64_types.h
9608 +++ b/arch/x86/include/asm/pgtable_64_types.h
9609 @@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
9610 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
9611 #define MODULES_END _AC(0xffffffffff000000, UL)
9612 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
9613 +#define MODULES_EXEC_VADDR MODULES_VADDR
9614 +#define MODULES_EXEC_END MODULES_END
9615 +
9616 +#define ktla_ktva(addr) (addr)
9617 +#define ktva_ktla(addr) (addr)
9618
9619 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
9620 diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
9621 index 013286a..8b42f4f 100644
9622 --- a/arch/x86/include/asm/pgtable_types.h
9623 +++ b/arch/x86/include/asm/pgtable_types.h
9624 @@ -16,13 +16,12 @@
9625 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
9626 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
9627 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
9628 -#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
9629 +#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
9630 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
9631 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
9632 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
9633 -#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
9634 -#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
9635 -#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
9636 +#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
9637 +#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
9638 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
9639
9640 /* If _PAGE_BIT_PRESENT is clear, we use these: */
9641 @@ -40,7 +39,6 @@
9642 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
9643 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
9644 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
9645 -#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
9646 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
9647 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
9648 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
9649 @@ -57,8 +55,10 @@
9650
9651 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
9652 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
9653 -#else
9654 +#elif defined(CONFIG_KMEMCHECK)
9655 #define _PAGE_NX (_AT(pteval_t, 0))
9656 +#else
9657 +#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
9658 #endif
9659
9660 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
9661 @@ -96,6 +96,9 @@
9662 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
9663 _PAGE_ACCESSED)
9664
9665 +#define PAGE_READONLY_NOEXEC PAGE_READONLY
9666 +#define PAGE_SHARED_NOEXEC PAGE_SHARED
9667 +
9668 #define __PAGE_KERNEL_EXEC \
9669 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
9670 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
9671 @@ -106,7 +109,7 @@
9672 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
9673 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
9674 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
9675 -#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
9676 +#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
9677 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
9678 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
9679 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
9680 @@ -168,8 +171,8 @@
9681 * bits are combined, this will alow user to access the high address mapped
9682 * VDSO in the presence of CONFIG_COMPAT_VDSO
9683 */
9684 -#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
9685 -#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
9686 +#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9687 +#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9688 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
9689 #endif
9690
9691 @@ -207,7 +210,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
9692 {
9693 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
9694 }
9695 +#endif
9696
9697 +#if PAGETABLE_LEVELS == 3
9698 +#include <asm-generic/pgtable-nopud.h>
9699 +#endif
9700 +
9701 +#if PAGETABLE_LEVELS == 2
9702 +#include <asm-generic/pgtable-nopmd.h>
9703 +#endif
9704 +
9705 +#ifndef __ASSEMBLY__
9706 #if PAGETABLE_LEVELS > 3
9707 typedef struct { pudval_t pud; } pud_t;
9708
9709 @@ -221,8 +234,6 @@ static inline pudval_t native_pud_val(pud_t pud)
9710 return pud.pud;
9711 }
9712 #else
9713 -#include <asm-generic/pgtable-nopud.h>
9714 -
9715 static inline pudval_t native_pud_val(pud_t pud)
9716 {
9717 return native_pgd_val(pud.pgd);
9718 @@ -242,8 +253,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
9719 return pmd.pmd;
9720 }
9721 #else
9722 -#include <asm-generic/pgtable-nopmd.h>
9723 -
9724 static inline pmdval_t native_pmd_val(pmd_t pmd)
9725 {
9726 return native_pgd_val(pmd.pud.pgd);
9727 @@ -283,7 +292,6 @@ typedef struct page *pgtable_t;
9728
9729 extern pteval_t __supported_pte_mask;
9730 extern void set_nx(void);
9731 -extern int nx_enabled;
9732
9733 #define pgprot_writecombine pgprot_writecombine
9734 extern pgprot_t pgprot_writecombine(pgprot_t prot);
9735 diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
9736 index b650435..eefa566 100644
9737 --- a/arch/x86/include/asm/processor.h
9738 +++ b/arch/x86/include/asm/processor.h
9739 @@ -268,7 +268,7 @@ struct tss_struct {
9740
9741 } ____cacheline_aligned;
9742
9743 -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
9744 +extern struct tss_struct init_tss[NR_CPUS];
9745
9746 /*
9747 * Save the original ist values for checking stack pointers during debugging
9748 @@ -860,11 +860,18 @@ static inline void spin_lock_prefetch(const void *x)
9749 */
9750 #define TASK_SIZE PAGE_OFFSET
9751 #define TASK_SIZE_MAX TASK_SIZE
9752 +
9753 +#ifdef CONFIG_PAX_SEGMEXEC
9754 +#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
9755 +#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
9756 +#else
9757 #define STACK_TOP TASK_SIZE
9758 -#define STACK_TOP_MAX STACK_TOP
9759 +#endif
9760 +
9761 +#define STACK_TOP_MAX TASK_SIZE
9762
9763 #define INIT_THREAD { \
9764 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
9765 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
9766 .vm86_info = NULL, \
9767 .sysenter_cs = __KERNEL_CS, \
9768 .io_bitmap_ptr = NULL, \
9769 @@ -878,7 +885,7 @@ static inline void spin_lock_prefetch(const void *x)
9770 */
9771 #define INIT_TSS { \
9772 .x86_tss = { \
9773 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
9774 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
9775 .ss0 = __KERNEL_DS, \
9776 .ss1 = __KERNEL_CS, \
9777 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
9778 @@ -889,11 +896,7 @@ static inline void spin_lock_prefetch(const void *x)
9779 extern unsigned long thread_saved_pc(struct task_struct *tsk);
9780
9781 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
9782 -#define KSTK_TOP(info) \
9783 -({ \
9784 - unsigned long *__ptr = (unsigned long *)(info); \
9785 - (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
9786 -})
9787 +#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
9788
9789 /*
9790 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
9791 @@ -908,7 +911,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
9792 #define task_pt_regs(task) \
9793 ({ \
9794 struct pt_regs *__regs__; \
9795 - __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
9796 + __regs__ = (struct pt_regs *)((task)->thread.sp0); \
9797 __regs__ - 1; \
9798 })
9799
9800 @@ -918,13 +921,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
9801 /*
9802 * User space process size. 47bits minus one guard page.
9803 */
9804 -#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
9805 +#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
9806
9807 /* This decides where the kernel will search for a free chunk of vm
9808 * space during mmap's.
9809 */
9810 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
9811 - 0xc0000000 : 0xFFFFe000)
9812 + 0xc0000000 : 0xFFFFf000)
9813
9814 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
9815 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
9816 @@ -935,11 +938,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
9817 #define STACK_TOP_MAX TASK_SIZE_MAX
9818
9819 #define INIT_THREAD { \
9820 - .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
9821 + .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
9822 }
9823
9824 #define INIT_TSS { \
9825 - .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
9826 + .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
9827 }
9828
9829 /*
9830 @@ -961,6 +964,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
9831 */
9832 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
9833
9834 +#ifdef CONFIG_PAX_SEGMEXEC
9835 +#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
9836 +#endif
9837 +
9838 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
9839
9840 /* Get/set a process' ability to use the timestamp counter instruction */
9841 diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
9842 index 3566454..4bdfb8c 100644
9843 --- a/arch/x86/include/asm/ptrace.h
9844 +++ b/arch/x86/include/asm/ptrace.h
9845 @@ -156,28 +156,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
9846 }
9847
9848 /*
9849 - * user_mode_vm(regs) determines whether a register set came from user mode.
9850 + * user_mode(regs) determines whether a register set came from user mode.
9851 * This is true if V8086 mode was enabled OR if the register set was from
9852 * protected mode with RPL-3 CS value. This tricky test checks that with
9853 * one comparison. Many places in the kernel can bypass this full check
9854 - * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
9855 + * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
9856 + * be used.
9857 */
9858 -static inline int user_mode(struct pt_regs *regs)
9859 +static inline int user_mode_novm(struct pt_regs *regs)
9860 {
9861 #ifdef CONFIG_X86_32
9862 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
9863 #else
9864 - return !!(regs->cs & 3);
9865 + return !!(regs->cs & SEGMENT_RPL_MASK);
9866 #endif
9867 }
9868
9869 -static inline int user_mode_vm(struct pt_regs *regs)
9870 +static inline int user_mode(struct pt_regs *regs)
9871 {
9872 #ifdef CONFIG_X86_32
9873 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
9874 USER_RPL;
9875 #else
9876 - return user_mode(regs);
9877 + return user_mode_novm(regs);
9878 #endif
9879 }
9880
9881 @@ -193,15 +194,16 @@ static inline int v8086_mode(struct pt_regs *regs)
9882 #ifdef CONFIG_X86_64
9883 static inline bool user_64bit_mode(struct pt_regs *regs)
9884 {
9885 + unsigned long cs = regs->cs & 0xffff;
9886 #ifndef CONFIG_PARAVIRT
9887 /*
9888 * On non-paravirt systems, this is the only long mode CPL 3
9889 * selector. We do not allow long mode selectors in the LDT.
9890 */
9891 - return regs->cs == __USER_CS;
9892 + return cs == __USER_CS;
9893 #else
9894 /* Headers are too twisted for this to go in paravirt.h. */
9895 - return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
9896 + return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
9897 #endif
9898 }
9899 #endif
9900 diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
9901 index 92f29706..a79cbbb 100644
9902 --- a/arch/x86/include/asm/reboot.h
9903 +++ b/arch/x86/include/asm/reboot.h
9904 @@ -6,19 +6,19 @@
9905 struct pt_regs;
9906
9907 struct machine_ops {
9908 - void (*restart)(char *cmd);
9909 - void (*halt)(void);
9910 - void (*power_off)(void);
9911 + void (* __noreturn restart)(char *cmd);
9912 + void (* __noreturn halt)(void);
9913 + void (* __noreturn power_off)(void);
9914 void (*shutdown)(void);
9915 void (*crash_shutdown)(struct pt_regs *);
9916 - void (*emergency_restart)(void);
9917 -};
9918 + void (* __noreturn emergency_restart)(void);
9919 +} __no_const;
9920
9921 extern struct machine_ops machine_ops;
9922
9923 void native_machine_crash_shutdown(struct pt_regs *regs);
9924 void native_machine_shutdown(void);
9925 -void machine_real_restart(unsigned int type);
9926 +void machine_real_restart(unsigned int type) __noreturn;
9927 /* These must match dispatch_table in reboot_32.S */
9928 #define MRR_BIOS 0
9929 #define MRR_APM 1
9930 diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
9931 index 2dbe4a7..ce1db00 100644
9932 --- a/arch/x86/include/asm/rwsem.h
9933 +++ b/arch/x86/include/asm/rwsem.h
9934 @@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
9935 {
9936 asm volatile("# beginning down_read\n\t"
9937 LOCK_PREFIX _ASM_INC "(%1)\n\t"
9938 +
9939 +#ifdef CONFIG_PAX_REFCOUNT
9940 + "jno 0f\n"
9941 + LOCK_PREFIX _ASM_DEC "(%1)\n"
9942 + "int $4\n0:\n"
9943 + _ASM_EXTABLE(0b, 0b)
9944 +#endif
9945 +
9946 /* adds 0x00000001 */
9947 " jns 1f\n"
9948 " call call_rwsem_down_read_failed\n"
9949 @@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
9950 "1:\n\t"
9951 " mov %1,%2\n\t"
9952 " add %3,%2\n\t"
9953 +
9954 +#ifdef CONFIG_PAX_REFCOUNT
9955 + "jno 0f\n"
9956 + "sub %3,%2\n"
9957 + "int $4\n0:\n"
9958 + _ASM_EXTABLE(0b, 0b)
9959 +#endif
9960 +
9961 " jle 2f\n\t"
9962 LOCK_PREFIX " cmpxchg %2,%0\n\t"
9963 " jnz 1b\n\t"
9964 @@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
9965 long tmp;
9966 asm volatile("# beginning down_write\n\t"
9967 LOCK_PREFIX " xadd %1,(%2)\n\t"
9968 +
9969 +#ifdef CONFIG_PAX_REFCOUNT
9970 + "jno 0f\n"
9971 + "mov %1,(%2)\n"
9972 + "int $4\n0:\n"
9973 + _ASM_EXTABLE(0b, 0b)
9974 +#endif
9975 +
9976 /* adds 0xffff0001, returns the old value */
9977 " test %1,%1\n\t"
9978 /* was the count 0 before? */
9979 @@ -141,6 +165,14 @@ static inline void __up_read(struct rw_semaphore *sem)
9980 long tmp;
9981 asm volatile("# beginning __up_read\n\t"
9982 LOCK_PREFIX " xadd %1,(%2)\n\t"
9983 +
9984 +#ifdef CONFIG_PAX_REFCOUNT
9985 + "jno 0f\n"
9986 + "mov %1,(%2)\n"
9987 + "int $4\n0:\n"
9988 + _ASM_EXTABLE(0b, 0b)
9989 +#endif
9990 +
9991 /* subtracts 1, returns the old value */
9992 " jns 1f\n\t"
9993 " call call_rwsem_wake\n" /* expects old value in %edx */
9994 @@ -159,6 +191,14 @@ static inline void __up_write(struct rw_semaphore *sem)
9995 long tmp;
9996 asm volatile("# beginning __up_write\n\t"
9997 LOCK_PREFIX " xadd %1,(%2)\n\t"
9998 +
9999 +#ifdef CONFIG_PAX_REFCOUNT
10000 + "jno 0f\n"
10001 + "mov %1,(%2)\n"
10002 + "int $4\n0:\n"
10003 + _ASM_EXTABLE(0b, 0b)
10004 +#endif
10005 +
10006 /* subtracts 0xffff0001, returns the old value */
10007 " jns 1f\n\t"
10008 " call call_rwsem_wake\n" /* expects old value in %edx */
10009 @@ -176,6 +216,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
10010 {
10011 asm volatile("# beginning __downgrade_write\n\t"
10012 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
10013 +
10014 +#ifdef CONFIG_PAX_REFCOUNT
10015 + "jno 0f\n"
10016 + LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
10017 + "int $4\n0:\n"
10018 + _ASM_EXTABLE(0b, 0b)
10019 +#endif
10020 +
10021 /*
10022 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
10023 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
10024 @@ -194,7 +242,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
10025 */
10026 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
10027 {
10028 - asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
10029 + asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
10030 +
10031 +#ifdef CONFIG_PAX_REFCOUNT
10032 + "jno 0f\n"
10033 + LOCK_PREFIX _ASM_SUB "%1,%0\n"
10034 + "int $4\n0:\n"
10035 + _ASM_EXTABLE(0b, 0b)
10036 +#endif
10037 +
10038 : "+m" (sem->count)
10039 : "er" (delta));
10040 }
10041 @@ -204,7 +260,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
10042 */
10043 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
10044 {
10045 - return delta + xadd(&sem->count, delta);
10046 + return delta + xadd_check_overflow(&sem->count, delta);
10047 }
10048
10049 #endif /* __KERNEL__ */
10050 diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
10051 index 5e64171..f58957e 100644
10052 --- a/arch/x86/include/asm/segment.h
10053 +++ b/arch/x86/include/asm/segment.h
10054 @@ -64,10 +64,15 @@
10055 * 26 - ESPFIX small SS
10056 * 27 - per-cpu [ offset to per-cpu data area ]
10057 * 28 - stack_canary-20 [ for stack protector ]
10058 - * 29 - unused
10059 - * 30 - unused
10060 + * 29 - PCI BIOS CS
10061 + * 30 - PCI BIOS DS
10062 * 31 - TSS for double fault handler
10063 */
10064 +#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
10065 +#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
10066 +#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
10067 +#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
10068 +
10069 #define GDT_ENTRY_TLS_MIN 6
10070 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
10071
10072 @@ -79,6 +84,8 @@
10073
10074 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
10075
10076 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
10077 +
10078 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
10079
10080 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
10081 @@ -104,6 +111,12 @@
10082 #define __KERNEL_STACK_CANARY 0
10083 #endif
10084
10085 +#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
10086 +#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
10087 +
10088 +#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
10089 +#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
10090 +
10091 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
10092
10093 /*
10094 @@ -141,7 +154,7 @@
10095 */
10096
10097 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
10098 -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
10099 +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
10100
10101
10102 #else
10103 @@ -165,6 +178,8 @@
10104 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
10105 #define __USER32_DS __USER_DS
10106
10107 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
10108 +
10109 #define GDT_ENTRY_TSS 8 /* needs two entries */
10110 #define GDT_ENTRY_LDT 10 /* needs two entries */
10111 #define GDT_ENTRY_TLS_MIN 12
10112 @@ -185,6 +200,7 @@
10113 #endif
10114
10115 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
10116 +#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
10117 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
10118 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
10119 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
10120 diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
10121 index 73b11bc..d4a3b63 100644
10122 --- a/arch/x86/include/asm/smp.h
10123 +++ b/arch/x86/include/asm/smp.h
10124 @@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
10125 /* cpus sharing the last level cache: */
10126 DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
10127 DECLARE_PER_CPU(u16, cpu_llc_id);
10128 -DECLARE_PER_CPU(int, cpu_number);
10129 +DECLARE_PER_CPU(unsigned int, cpu_number);
10130
10131 static inline struct cpumask *cpu_sibling_mask(int cpu)
10132 {
10133 @@ -77,7 +77,7 @@ struct smp_ops {
10134
10135 void (*send_call_func_ipi)(const struct cpumask *mask);
10136 void (*send_call_func_single_ipi)(int cpu);
10137 -};
10138 +} __no_const;
10139
10140 /* Globals due to paravirt */
10141 extern void set_cpu_sibling_map(int cpu);
10142 @@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitdata;
10143 extern int safe_smp_processor_id(void);
10144
10145 #elif defined(CONFIG_X86_64_SMP)
10146 -#define raw_smp_processor_id() (percpu_read(cpu_number))
10147 -
10148 -#define stack_smp_processor_id() \
10149 -({ \
10150 - struct thread_info *ti; \
10151 - __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
10152 - ti->cpu; \
10153 -})
10154 +#define raw_smp_processor_id() (percpu_read(cpu_number))
10155 +#define stack_smp_processor_id() raw_smp_processor_id()
10156 #define safe_smp_processor_id() smp_processor_id()
10157
10158 #endif
10159 diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
10160 index 972c260..43ab1fd 100644
10161 --- a/arch/x86/include/asm/spinlock.h
10162 +++ b/arch/x86/include/asm/spinlock.h
10163 @@ -188,6 +188,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
10164 static inline void arch_read_lock(arch_rwlock_t *rw)
10165 {
10166 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
10167 +
10168 +#ifdef CONFIG_PAX_REFCOUNT
10169 + "jno 0f\n"
10170 + LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
10171 + "int $4\n0:\n"
10172 + _ASM_EXTABLE(0b, 0b)
10173 +#endif
10174 +
10175 "jns 1f\n"
10176 "call __read_lock_failed\n\t"
10177 "1:\n"
10178 @@ -197,6 +205,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
10179 static inline void arch_write_lock(arch_rwlock_t *rw)
10180 {
10181 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
10182 +
10183 +#ifdef CONFIG_PAX_REFCOUNT
10184 + "jno 0f\n"
10185 + LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
10186 + "int $4\n0:\n"
10187 + _ASM_EXTABLE(0b, 0b)
10188 +#endif
10189 +
10190 "jz 1f\n"
10191 "call __write_lock_failed\n\t"
10192 "1:\n"
10193 @@ -226,13 +242,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
10194
10195 static inline void arch_read_unlock(arch_rwlock_t *rw)
10196 {
10197 - asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
10198 + asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
10199 +
10200 +#ifdef CONFIG_PAX_REFCOUNT
10201 + "jno 0f\n"
10202 + LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
10203 + "int $4\n0:\n"
10204 + _ASM_EXTABLE(0b, 0b)
10205 +#endif
10206 +
10207 :"+m" (rw->lock) : : "memory");
10208 }
10209
10210 static inline void arch_write_unlock(arch_rwlock_t *rw)
10211 {
10212 - asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
10213 + asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
10214 +
10215 +#ifdef CONFIG_PAX_REFCOUNT
10216 + "jno 0f\n"
10217 + LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
10218 + "int $4\n0:\n"
10219 + _ASM_EXTABLE(0b, 0b)
10220 +#endif
10221 +
10222 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
10223 }
10224
10225 diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
10226 index 1575177..cb23f52 100644
10227 --- a/arch/x86/include/asm/stackprotector.h
10228 +++ b/arch/x86/include/asm/stackprotector.h
10229 @@ -48,7 +48,7 @@
10230 * head_32 for boot CPU and setup_per_cpu_areas() for others.
10231 */
10232 #define GDT_STACK_CANARY_INIT \
10233 - [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
10234 + [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
10235
10236 /*
10237 * Initialize the stackprotector canary value.
10238 @@ -113,7 +113,7 @@ static inline void setup_stack_canary_segment(int cpu)
10239
10240 static inline void load_stack_canary_segment(void)
10241 {
10242 -#ifdef CONFIG_X86_32
10243 +#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
10244 asm volatile ("mov %0, %%gs" : : "r" (0));
10245 #endif
10246 }
10247 diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
10248 index 70bbe39..4ae2bd4 100644
10249 --- a/arch/x86/include/asm/stacktrace.h
10250 +++ b/arch/x86/include/asm/stacktrace.h
10251 @@ -11,28 +11,20 @@
10252
10253 extern int kstack_depth_to_print;
10254
10255 -struct thread_info;
10256 +struct task_struct;
10257 struct stacktrace_ops;
10258
10259 -typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
10260 - unsigned long *stack,
10261 - unsigned long bp,
10262 - const struct stacktrace_ops *ops,
10263 - void *data,
10264 - unsigned long *end,
10265 - int *graph);
10266 +typedef unsigned long walk_stack_t(struct task_struct *task,
10267 + void *stack_start,
10268 + unsigned long *stack,
10269 + unsigned long bp,
10270 + const struct stacktrace_ops *ops,
10271 + void *data,
10272 + unsigned long *end,
10273 + int *graph);
10274
10275 -extern unsigned long
10276 -print_context_stack(struct thread_info *tinfo,
10277 - unsigned long *stack, unsigned long bp,
10278 - const struct stacktrace_ops *ops, void *data,
10279 - unsigned long *end, int *graph);
10280 -
10281 -extern unsigned long
10282 -print_context_stack_bp(struct thread_info *tinfo,
10283 - unsigned long *stack, unsigned long bp,
10284 - const struct stacktrace_ops *ops, void *data,
10285 - unsigned long *end, int *graph);
10286 +extern walk_stack_t print_context_stack;
10287 +extern walk_stack_t print_context_stack_bp;
10288
10289 /* Generic stack tracer with callbacks */
10290
10291 @@ -40,7 +32,7 @@ struct stacktrace_ops {
10292 void (*address)(void *data, unsigned long address, int reliable);
10293 /* On negative return stop dumping */
10294 int (*stack)(void *data, char *name);
10295 - walk_stack_t walk_stack;
10296 + walk_stack_t *walk_stack;
10297 };
10298
10299 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
10300 diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
10301 index cb23852..2dde194 100644
10302 --- a/arch/x86/include/asm/sys_ia32.h
10303 +++ b/arch/x86/include/asm/sys_ia32.h
10304 @@ -40,7 +40,7 @@ asmlinkage long sys32_rt_sigprocmask(int, compat_sigset_t __user *,
10305 compat_sigset_t __user *, unsigned int);
10306 asmlinkage long sys32_alarm(unsigned int);
10307
10308 -asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int);
10309 +asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int);
10310 asmlinkage long sys32_sysfs(int, u32, u32);
10311
10312 asmlinkage long sys32_sched_rr_get_interval(compat_pid_t,
10313 diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
10314 index 2d2f01c..f985723 100644
10315 --- a/arch/x86/include/asm/system.h
10316 +++ b/arch/x86/include/asm/system.h
10317 @@ -129,7 +129,7 @@ do { \
10318 "call __switch_to\n\t" \
10319 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
10320 __switch_canary \
10321 - "movq %P[thread_info](%%rsi),%%r8\n\t" \
10322 + "movq "__percpu_arg([thread_info])",%%r8\n\t" \
10323 "movq %%rax,%%rdi\n\t" \
10324 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
10325 "jnz ret_from_fork\n\t" \
10326 @@ -140,7 +140,7 @@ do { \
10327 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
10328 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
10329 [_tif_fork] "i" (_TIF_FORK), \
10330 - [thread_info] "i" (offsetof(struct task_struct, stack)), \
10331 + [thread_info] "m" (current_tinfo), \
10332 [current_task] "m" (current_task) \
10333 __switch_canary_iparam \
10334 : "memory", "cc" __EXTRA_CLOBBER)
10335 @@ -200,7 +200,7 @@ static inline unsigned long get_limit(unsigned long segment)
10336 {
10337 unsigned long __limit;
10338 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
10339 - return __limit + 1;
10340 + return __limit;
10341 }
10342
10343 static inline void native_clts(void)
10344 @@ -397,13 +397,13 @@ void enable_hlt(void);
10345
10346 void cpu_idle_wait(void);
10347
10348 -extern unsigned long arch_align_stack(unsigned long sp);
10349 +#define arch_align_stack(x) ((x) & ~0xfUL)
10350 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
10351
10352 void default_idle(void);
10353 bool set_pm_idle_to_default(void);
10354
10355 -void stop_this_cpu(void *dummy);
10356 +void stop_this_cpu(void *dummy) __noreturn;
10357
10358 /*
10359 * Force strict CPU ordering.
10360 diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
10361 index a1fe5c1..ee326d8 100644
10362 --- a/arch/x86/include/asm/thread_info.h
10363 +++ b/arch/x86/include/asm/thread_info.h
10364 @@ -10,6 +10,7 @@
10365 #include <linux/compiler.h>
10366 #include <asm/page.h>
10367 #include <asm/types.h>
10368 +#include <asm/percpu.h>
10369
10370 /*
10371 * low level task data that entry.S needs immediate access to
10372 @@ -24,7 +25,6 @@ struct exec_domain;
10373 #include <linux/atomic.h>
10374
10375 struct thread_info {
10376 - struct task_struct *task; /* main task structure */
10377 struct exec_domain *exec_domain; /* execution domain */
10378 __u32 flags; /* low level flags */
10379 __u32 status; /* thread synchronous flags */
10380 @@ -34,18 +34,12 @@ struct thread_info {
10381 mm_segment_t addr_limit;
10382 struct restart_block restart_block;
10383 void __user *sysenter_return;
10384 -#ifdef CONFIG_X86_32
10385 - unsigned long previous_esp; /* ESP of the previous stack in
10386 - case of nested (IRQ) stacks
10387 - */
10388 - __u8 supervisor_stack[0];
10389 -#endif
10390 + unsigned long lowest_stack;
10391 int uaccess_err;
10392 };
10393
10394 -#define INIT_THREAD_INFO(tsk) \
10395 +#define INIT_THREAD_INFO \
10396 { \
10397 - .task = &tsk, \
10398 .exec_domain = &default_exec_domain, \
10399 .flags = 0, \
10400 .cpu = 0, \
10401 @@ -56,7 +50,7 @@ struct thread_info {
10402 }, \
10403 }
10404
10405 -#define init_thread_info (init_thread_union.thread_info)
10406 +#define init_thread_info (init_thread_union.stack)
10407 #define init_stack (init_thread_union.stack)
10408
10409 #else /* !__ASSEMBLY__ */
10410 @@ -170,45 +164,40 @@ struct thread_info {
10411 ret; \
10412 })
10413
10414 -#ifdef CONFIG_X86_32
10415 -
10416 -#define STACK_WARN (THREAD_SIZE/8)
10417 -/*
10418 - * macros/functions for gaining access to the thread information structure
10419 - *
10420 - * preempt_count needs to be 1 initially, until the scheduler is functional.
10421 - */
10422 -#ifndef __ASSEMBLY__
10423 -
10424 -
10425 -/* how to get the current stack pointer from C */
10426 -register unsigned long current_stack_pointer asm("esp") __used;
10427 -
10428 -/* how to get the thread information struct from C */
10429 -static inline struct thread_info *current_thread_info(void)
10430 -{
10431 - return (struct thread_info *)
10432 - (current_stack_pointer & ~(THREAD_SIZE - 1));
10433 -}
10434 -
10435 -#else /* !__ASSEMBLY__ */
10436 -
10437 +#ifdef __ASSEMBLY__
10438 /* how to get the thread information struct from ASM */
10439 #define GET_THREAD_INFO(reg) \
10440 - movl $-THREAD_SIZE, reg; \
10441 - andl %esp, reg
10442 + mov PER_CPU_VAR(current_tinfo), reg
10443
10444 /* use this one if reg already contains %esp */
10445 -#define GET_THREAD_INFO_WITH_ESP(reg) \
10446 - andl $-THREAD_SIZE, reg
10447 +#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
10448 +#else
10449 +/* how to get the thread information struct from C */
10450 +DECLARE_PER_CPU(struct thread_info *, current_tinfo);
10451 +
10452 +static __always_inline struct thread_info *current_thread_info(void)
10453 +{
10454 + return percpu_read_stable(current_tinfo);
10455 +}
10456 +#endif
10457 +
10458 +#ifdef CONFIG_X86_32
10459 +
10460 +#define STACK_WARN (THREAD_SIZE/8)
10461 +/*
10462 + * macros/functions for gaining access to the thread information structure
10463 + *
10464 + * preempt_count needs to be 1 initially, until the scheduler is functional.
10465 + */
10466 +#ifndef __ASSEMBLY__
10467 +
10468 +/* how to get the current stack pointer from C */
10469 +register unsigned long current_stack_pointer asm("esp") __used;
10470
10471 #endif
10472
10473 #else /* X86_32 */
10474
10475 -#include <asm/percpu.h>
10476 -#define KERNEL_STACK_OFFSET (5*8)
10477 -
10478 /*
10479 * macros/functions for gaining access to the thread information structure
10480 * preempt_count needs to be 1 initially, until the scheduler is functional.
10481 @@ -216,21 +205,8 @@ static inline struct thread_info *current_thread_info(void)
10482 #ifndef __ASSEMBLY__
10483 DECLARE_PER_CPU(unsigned long, kernel_stack);
10484
10485 -static inline struct thread_info *current_thread_info(void)
10486 -{
10487 - struct thread_info *ti;
10488 - ti = (void *)(percpu_read_stable(kernel_stack) +
10489 - KERNEL_STACK_OFFSET - THREAD_SIZE);
10490 - return ti;
10491 -}
10492 -
10493 -#else /* !__ASSEMBLY__ */
10494 -
10495 -/* how to get the thread information struct from ASM */
10496 -#define GET_THREAD_INFO(reg) \
10497 - movq PER_CPU_VAR(kernel_stack),reg ; \
10498 - subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
10499 -
10500 +/* how to get the current stack pointer from C */
10501 +register unsigned long current_stack_pointer asm("rsp") __used;
10502 #endif
10503
10504 #endif /* !X86_32 */
10505 @@ -266,5 +242,16 @@ extern void arch_task_cache_init(void);
10506 extern void free_thread_info(struct thread_info *ti);
10507 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
10508 #define arch_task_cache_init arch_task_cache_init
10509 +
10510 +#define __HAVE_THREAD_FUNCTIONS
10511 +#define task_thread_info(task) (&(task)->tinfo)
10512 +#define task_stack_page(task) ((task)->stack)
10513 +#define setup_thread_stack(p, org) do {} while (0)
10514 +#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
10515 +
10516 +#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
10517 +extern struct task_struct *alloc_task_struct_node(int node);
10518 +extern void free_task_struct(struct task_struct *);
10519 +
10520 #endif
10521 #endif /* _ASM_X86_THREAD_INFO_H */
10522 diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
10523 index 36361bf..324f262 100644
10524 --- a/arch/x86/include/asm/uaccess.h
10525 +++ b/arch/x86/include/asm/uaccess.h
10526 @@ -7,12 +7,15 @@
10527 #include <linux/compiler.h>
10528 #include <linux/thread_info.h>
10529 #include <linux/string.h>
10530 +#include <linux/sched.h>
10531 #include <asm/asm.h>
10532 #include <asm/page.h>
10533
10534 #define VERIFY_READ 0
10535 #define VERIFY_WRITE 1
10536
10537 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
10538 +
10539 /*
10540 * The fs value determines whether argument validity checking should be
10541 * performed or not. If get_fs() == USER_DS, checking is performed, with
10542 @@ -28,7 +31,12 @@
10543
10544 #define get_ds() (KERNEL_DS)
10545 #define get_fs() (current_thread_info()->addr_limit)
10546 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
10547 +void __set_fs(mm_segment_t x);
10548 +void set_fs(mm_segment_t x);
10549 +#else
10550 #define set_fs(x) (current_thread_info()->addr_limit = (x))
10551 +#endif
10552
10553 #define segment_eq(a, b) ((a).seg == (b).seg)
10554
10555 @@ -76,7 +84,33 @@
10556 * checks that the pointer is in the user space range - after calling
10557 * this function, memory access functions may still return -EFAULT.
10558 */
10559 -#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
10560 +#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
10561 +#define access_ok(type, addr, size) \
10562 +({ \
10563 + long __size = size; \
10564 + unsigned long __addr = (unsigned long)addr; \
10565 + unsigned long __addr_ao = __addr & PAGE_MASK; \
10566 + unsigned long __end_ao = __addr + __size - 1; \
10567 + bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
10568 + if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
10569 + while(__addr_ao <= __end_ao) { \
10570 + char __c_ao; \
10571 + __addr_ao += PAGE_SIZE; \
10572 + if (__size > PAGE_SIZE) \
10573 + cond_resched(); \
10574 + if (__get_user(__c_ao, (char __user *)__addr)) \
10575 + break; \
10576 + if (type != VERIFY_WRITE) { \
10577 + __addr = __addr_ao; \
10578 + continue; \
10579 + } \
10580 + if (__put_user(__c_ao, (char __user *)__addr)) \
10581 + break; \
10582 + __addr = __addr_ao; \
10583 + } \
10584 + } \
10585 + __ret_ao; \
10586 +})
10587
10588 /*
10589 * The exception table consists of pairs of addresses: the first is the
10590 @@ -182,12 +216,20 @@ extern int __get_user_bad(void);
10591 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
10592 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
10593
10594 -
10595 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
10596 +#define __copyuser_seg "gs;"
10597 +#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
10598 +#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
10599 +#else
10600 +#define __copyuser_seg
10601 +#define __COPYUSER_SET_ES
10602 +#define __COPYUSER_RESTORE_ES
10603 +#endif
10604
10605 #ifdef CONFIG_X86_32
10606 #define __put_user_asm_u64(x, addr, err, errret) \
10607 - asm volatile("1: movl %%eax,0(%2)\n" \
10608 - "2: movl %%edx,4(%2)\n" \
10609 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
10610 + "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
10611 "3:\n" \
10612 ".section .fixup,\"ax\"\n" \
10613 "4: movl %3,%0\n" \
10614 @@ -199,8 +241,8 @@ extern int __get_user_bad(void);
10615 : "A" (x), "r" (addr), "i" (errret), "0" (err))
10616
10617 #define __put_user_asm_ex_u64(x, addr) \
10618 - asm volatile("1: movl %%eax,0(%1)\n" \
10619 - "2: movl %%edx,4(%1)\n" \
10620 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
10621 + "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
10622 "3:\n" \
10623 _ASM_EXTABLE(1b, 2b - 1b) \
10624 _ASM_EXTABLE(2b, 3b - 2b) \
10625 @@ -252,7 +294,7 @@ extern void __put_user_8(void);
10626 __typeof__(*(ptr)) __pu_val; \
10627 __chk_user_ptr(ptr); \
10628 might_fault(); \
10629 - __pu_val = x; \
10630 + __pu_val = (x); \
10631 switch (sizeof(*(ptr))) { \
10632 case 1: \
10633 __put_user_x(1, __pu_val, ptr, __ret_pu); \
10634 @@ -373,7 +415,7 @@ do { \
10635 } while (0)
10636
10637 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
10638 - asm volatile("1: mov"itype" %2,%"rtype"1\n" \
10639 + asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
10640 "2:\n" \
10641 ".section .fixup,\"ax\"\n" \
10642 "3: mov %3,%0\n" \
10643 @@ -381,7 +423,7 @@ do { \
10644 " jmp 2b\n" \
10645 ".previous\n" \
10646 _ASM_EXTABLE(1b, 3b) \
10647 - : "=r" (err), ltype(x) \
10648 + : "=r" (err), ltype (x) \
10649 : "m" (__m(addr)), "i" (errret), "0" (err))
10650
10651 #define __get_user_size_ex(x, ptr, size) \
10652 @@ -406,7 +448,7 @@ do { \
10653 } while (0)
10654
10655 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
10656 - asm volatile("1: mov"itype" %1,%"rtype"0\n" \
10657 + asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
10658 "2:\n" \
10659 _ASM_EXTABLE(1b, 2b - 1b) \
10660 : ltype(x) : "m" (__m(addr)))
10661 @@ -423,13 +465,24 @@ do { \
10662 int __gu_err; \
10663 unsigned long __gu_val; \
10664 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
10665 - (x) = (__force __typeof__(*(ptr)))__gu_val; \
10666 + (x) = (__typeof__(*(ptr)))__gu_val; \
10667 __gu_err; \
10668 })
10669
10670 /* FIXME: this hack is definitely wrong -AK */
10671 struct __large_struct { unsigned long buf[100]; };
10672 -#define __m(x) (*(struct __large_struct __user *)(x))
10673 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10674 +#define ____m(x) \
10675 +({ \
10676 + unsigned long ____x = (unsigned long)(x); \
10677 + if (____x < PAX_USER_SHADOW_BASE) \
10678 + ____x += PAX_USER_SHADOW_BASE; \
10679 + (void __user *)____x; \
10680 +})
10681 +#else
10682 +#define ____m(x) (x)
10683 +#endif
10684 +#define __m(x) (*(struct __large_struct __user *)____m(x))
10685
10686 /*
10687 * Tell gcc we read from memory instead of writing: this is because
10688 @@ -437,7 +490,7 @@ struct __large_struct { unsigned long buf[100]; };
10689 * aliasing issues.
10690 */
10691 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
10692 - asm volatile("1: mov"itype" %"rtype"1,%2\n" \
10693 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
10694 "2:\n" \
10695 ".section .fixup,\"ax\"\n" \
10696 "3: mov %3,%0\n" \
10697 @@ -445,10 +498,10 @@ struct __large_struct { unsigned long buf[100]; };
10698 ".previous\n" \
10699 _ASM_EXTABLE(1b, 3b) \
10700 : "=r"(err) \
10701 - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
10702 + : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
10703
10704 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
10705 - asm volatile("1: mov"itype" %"rtype"0,%1\n" \
10706 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
10707 "2:\n" \
10708 _ASM_EXTABLE(1b, 2b - 1b) \
10709 : : ltype(x), "m" (__m(addr)))
10710 @@ -487,8 +540,12 @@ struct __large_struct { unsigned long buf[100]; };
10711 * On error, the variable @x is set to zero.
10712 */
10713
10714 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10715 +#define __get_user(x, ptr) get_user((x), (ptr))
10716 +#else
10717 #define __get_user(x, ptr) \
10718 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
10719 +#endif
10720
10721 /**
10722 * __put_user: - Write a simple value into user space, with less checking.
10723 @@ -510,8 +567,12 @@ struct __large_struct { unsigned long buf[100]; };
10724 * Returns zero on success, or -EFAULT on error.
10725 */
10726
10727 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10728 +#define __put_user(x, ptr) put_user((x), (ptr))
10729 +#else
10730 #define __put_user(x, ptr) \
10731 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
10732 +#endif
10733
10734 #define __get_user_unaligned __get_user
10735 #define __put_user_unaligned __put_user
10736 @@ -529,7 +590,7 @@ struct __large_struct { unsigned long buf[100]; };
10737 #define get_user_ex(x, ptr) do { \
10738 unsigned long __gue_val; \
10739 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
10740 - (x) = (__force __typeof__(*(ptr)))__gue_val; \
10741 + (x) = (__typeof__(*(ptr)))__gue_val; \
10742 } while (0)
10743
10744 #ifdef CONFIG_X86_WP_WORKS_OK
10745 diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
10746 index 566e803..b9521e9 100644
10747 --- a/arch/x86/include/asm/uaccess_32.h
10748 +++ b/arch/x86/include/asm/uaccess_32.h
10749 @@ -43,6 +43,9 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
10750 static __always_inline unsigned long __must_check
10751 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
10752 {
10753 + if ((long)n < 0)
10754 + return n;
10755 +
10756 if (__builtin_constant_p(n)) {
10757 unsigned long ret;
10758
10759 @@ -61,6 +64,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
10760 return ret;
10761 }
10762 }
10763 + if (!__builtin_constant_p(n))
10764 + check_object_size(from, n, true);
10765 return __copy_to_user_ll(to, from, n);
10766 }
10767
10768 @@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
10769 __copy_to_user(void __user *to, const void *from, unsigned long n)
10770 {
10771 might_fault();
10772 +
10773 return __copy_to_user_inatomic(to, from, n);
10774 }
10775
10776 static __always_inline unsigned long
10777 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
10778 {
10779 + if ((long)n < 0)
10780 + return n;
10781 +
10782 /* Avoid zeroing the tail if the copy fails..
10783 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
10784 * but as the zeroing behaviour is only significant when n is not
10785 @@ -137,6 +146,10 @@ static __always_inline unsigned long
10786 __copy_from_user(void *to, const void __user *from, unsigned long n)
10787 {
10788 might_fault();
10789 +
10790 + if ((long)n < 0)
10791 + return n;
10792 +
10793 if (__builtin_constant_p(n)) {
10794 unsigned long ret;
10795
10796 @@ -152,6 +165,8 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
10797 return ret;
10798 }
10799 }
10800 + if (!__builtin_constant_p(n))
10801 + check_object_size(to, n, false);
10802 return __copy_from_user_ll(to, from, n);
10803 }
10804
10805 @@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
10806 const void __user *from, unsigned long n)
10807 {
10808 might_fault();
10809 +
10810 + if ((long)n < 0)
10811 + return n;
10812 +
10813 if (__builtin_constant_p(n)) {
10814 unsigned long ret;
10815
10816 @@ -181,15 +200,19 @@ static __always_inline unsigned long
10817 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
10818 unsigned long n)
10819 {
10820 - return __copy_from_user_ll_nocache_nozero(to, from, n);
10821 + if ((long)n < 0)
10822 + return n;
10823 +
10824 + return __copy_from_user_ll_nocache_nozero(to, from, n);
10825 }
10826
10827 -unsigned long __must_check copy_to_user(void __user *to,
10828 - const void *from, unsigned long n);
10829 -unsigned long __must_check _copy_from_user(void *to,
10830 - const void __user *from,
10831 - unsigned long n);
10832 -
10833 +extern void copy_to_user_overflow(void)
10834 +#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
10835 + __compiletime_error("copy_to_user() buffer size is not provably correct")
10836 +#else
10837 + __compiletime_warning("copy_to_user() buffer size is not provably correct")
10838 +#endif
10839 +;
10840
10841 extern void copy_from_user_overflow(void)
10842 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
10843 @@ -199,17 +222,61 @@ extern void copy_from_user_overflow(void)
10844 #endif
10845 ;
10846
10847 -static inline unsigned long __must_check copy_from_user(void *to,
10848 - const void __user *from,
10849 - unsigned long n)
10850 +/**
10851 + * copy_to_user: - Copy a block of data into user space.
10852 + * @to: Destination address, in user space.
10853 + * @from: Source address, in kernel space.
10854 + * @n: Number of bytes to copy.
10855 + *
10856 + * Context: User context only. This function may sleep.
10857 + *
10858 + * Copy data from kernel space to user space.
10859 + *
10860 + * Returns number of bytes that could not be copied.
10861 + * On success, this will be zero.
10862 + */
10863 +static inline unsigned long __must_check
10864 +copy_to_user(void __user *to, const void *from, unsigned long n)
10865 +{
10866 + int sz = __compiletime_object_size(from);
10867 +
10868 + if (unlikely(sz != -1 && sz < n))
10869 + copy_to_user_overflow();
10870 + else if (access_ok(VERIFY_WRITE, to, n))
10871 + n = __copy_to_user(to, from, n);
10872 + return n;
10873 +}
10874 +
10875 +/**
10876 + * copy_from_user: - Copy a block of data from user space.
10877 + * @to: Destination address, in kernel space.
10878 + * @from: Source address, in user space.
10879 + * @n: Number of bytes to copy.
10880 + *
10881 + * Context: User context only. This function may sleep.
10882 + *
10883 + * Copy data from user space to kernel space.
10884 + *
10885 + * Returns number of bytes that could not be copied.
10886 + * On success, this will be zero.
10887 + *
10888 + * If some data could not be copied, this function will pad the copied
10889 + * data to the requested size using zero bytes.
10890 + */
10891 +static inline unsigned long __must_check
10892 +copy_from_user(void *to, const void __user *from, unsigned long n)
10893 {
10894 int sz = __compiletime_object_size(to);
10895
10896 - if (likely(sz == -1 || sz >= n))
10897 - n = _copy_from_user(to, from, n);
10898 - else
10899 + if (unlikely(sz != -1 && sz < n))
10900 copy_from_user_overflow();
10901 -
10902 + else if (access_ok(VERIFY_READ, from, n))
10903 + n = __copy_from_user(to, from, n);
10904 + else if ((long)n > 0) {
10905 + if (!__builtin_constant_p(n))
10906 + check_object_size(to, n, false);
10907 + memset(to, 0, n);
10908 + }
10909 return n;
10910 }
10911
10912 diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
10913 index 1c66d30..23ab77d 100644
10914 --- a/arch/x86/include/asm/uaccess_64.h
10915 +++ b/arch/x86/include/asm/uaccess_64.h
10916 @@ -10,6 +10,9 @@
10917 #include <asm/alternative.h>
10918 #include <asm/cpufeature.h>
10919 #include <asm/page.h>
10920 +#include <asm/pgtable.h>
10921 +
10922 +#define set_fs(x) (current_thread_info()->addr_limit = (x))
10923
10924 /*
10925 * Copy To/From Userspace
10926 @@ -17,12 +20,12 @@
10927
10928 /* Handles exceptions in both to and from, but doesn't do access_ok */
10929 __must_check unsigned long
10930 -copy_user_generic_string(void *to, const void *from, unsigned len);
10931 +copy_user_generic_string(void *to, const void *from, unsigned long len);
10932 __must_check unsigned long
10933 -copy_user_generic_unrolled(void *to, const void *from, unsigned len);
10934 +copy_user_generic_unrolled(void *to, const void *from, unsigned long len);
10935
10936 static __always_inline __must_check unsigned long
10937 -copy_user_generic(void *to, const void *from, unsigned len)
10938 +copy_user_generic(void *to, const void *from, unsigned long len)
10939 {
10940 unsigned ret;
10941
10942 @@ -36,138 +39,222 @@ copy_user_generic(void *to, const void *from, unsigned len)
10943 return ret;
10944 }
10945
10946 +static __always_inline __must_check unsigned long
10947 +__copy_to_user(void __user *to, const void *from, unsigned long len);
10948 +static __always_inline __must_check unsigned long
10949 +__copy_from_user(void *to, const void __user *from, unsigned long len);
10950 __must_check unsigned long
10951 -_copy_to_user(void __user *to, const void *from, unsigned len);
10952 -__must_check unsigned long
10953 -_copy_from_user(void *to, const void __user *from, unsigned len);
10954 -__must_check unsigned long
10955 -copy_in_user(void __user *to, const void __user *from, unsigned len);
10956 +copy_in_user(void __user *to, const void __user *from, unsigned long len);
10957
10958 static inline unsigned long __must_check copy_from_user(void *to,
10959 const void __user *from,
10960 unsigned long n)
10961 {
10962 - int sz = __compiletime_object_size(to);
10963 -
10964 might_fault();
10965 - if (likely(sz == -1 || sz >= n))
10966 - n = _copy_from_user(to, from, n);
10967 -#ifdef CONFIG_DEBUG_VM
10968 - else
10969 - WARN(1, "Buffer overflow detected!\n");
10970 -#endif
10971 +
10972 + if (access_ok(VERIFY_READ, from, n))
10973 + n = __copy_from_user(to, from, n);
10974 + else if (n < INT_MAX) {
10975 + if (!__builtin_constant_p(n))
10976 + check_object_size(to, n, false);
10977 + memset(to, 0, n);
10978 + }
10979 return n;
10980 }
10981
10982 static __always_inline __must_check
10983 -int copy_to_user(void __user *dst, const void *src, unsigned size)
10984 +int copy_to_user(void __user *dst, const void *src, unsigned long size)
10985 {
10986 might_fault();
10987
10988 - return _copy_to_user(dst, src, size);
10989 + if (access_ok(VERIFY_WRITE, dst, size))
10990 + size = __copy_to_user(dst, src, size);
10991 + return size;
10992 }
10993
10994 static __always_inline __must_check
10995 -int __copy_from_user(void *dst, const void __user *src, unsigned size)
10996 +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
10997 {
10998 - int ret = 0;
10999 + int sz = __compiletime_object_size(dst);
11000 + unsigned ret = 0;
11001
11002 might_fault();
11003 - if (!__builtin_constant_p(size))
11004 - return copy_user_generic(dst, (__force void *)src, size);
11005 +
11006 + if (size > INT_MAX)
11007 + return size;
11008 +
11009 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11010 + if (!__access_ok(VERIFY_READ, src, size))
11011 + return size;
11012 +#endif
11013 +
11014 + if (unlikely(sz != -1 && sz < size)) {
11015 +#ifdef CONFIG_DEBUG_VM
11016 + WARN(1, "Buffer overflow detected!\n");
11017 +#endif
11018 + return size;
11019 + }
11020 +
11021 + if (!__builtin_constant_p(size)) {
11022 + check_object_size(dst, size, false);
11023 +
11024 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11025 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11026 + src += PAX_USER_SHADOW_BASE;
11027 +#endif
11028 +
11029 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
11030 + }
11031 switch (size) {
11032 - case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
11033 + case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
11034 ret, "b", "b", "=q", 1);
11035 return ret;
11036 - case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
11037 + case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
11038 ret, "w", "w", "=r", 2);
11039 return ret;
11040 - case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
11041 + case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
11042 ret, "l", "k", "=r", 4);
11043 return ret;
11044 - case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
11045 + case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
11046 ret, "q", "", "=r", 8);
11047 return ret;
11048 case 10:
11049 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
11050 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
11051 ret, "q", "", "=r", 10);
11052 if (unlikely(ret))
11053 return ret;
11054 __get_user_asm(*(u16 *)(8 + (char *)dst),
11055 - (u16 __user *)(8 + (char __user *)src),
11056 + (const u16 __user *)(8 + (const char __user *)src),
11057 ret, "w", "w", "=r", 2);
11058 return ret;
11059 case 16:
11060 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
11061 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
11062 ret, "q", "", "=r", 16);
11063 if (unlikely(ret))
11064 return ret;
11065 __get_user_asm(*(u64 *)(8 + (char *)dst),
11066 - (u64 __user *)(8 + (char __user *)src),
11067 + (const u64 __user *)(8 + (const char __user *)src),
11068 ret, "q", "", "=r", 8);
11069 return ret;
11070 default:
11071 - return copy_user_generic(dst, (__force void *)src, size);
11072 +
11073 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11074 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11075 + src += PAX_USER_SHADOW_BASE;
11076 +#endif
11077 +
11078 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
11079 }
11080 }
11081
11082 static __always_inline __must_check
11083 -int __copy_to_user(void __user *dst, const void *src, unsigned size)
11084 +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
11085 {
11086 - int ret = 0;
11087 + int sz = __compiletime_object_size(src);
11088 + unsigned ret = 0;
11089
11090 might_fault();
11091 - if (!__builtin_constant_p(size))
11092 - return copy_user_generic((__force void *)dst, src, size);
11093 +
11094 + if (size > INT_MAX)
11095 + return size;
11096 +
11097 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11098 + if (!__access_ok(VERIFY_WRITE, dst, size))
11099 + return size;
11100 +#endif
11101 +
11102 + if (unlikely(sz != -1 && sz < size)) {
11103 +#ifdef CONFIG_DEBUG_VM
11104 + WARN(1, "Buffer overflow detected!\n");
11105 +#endif
11106 + return size;
11107 + }
11108 +
11109 + if (!__builtin_constant_p(size)) {
11110 + check_object_size(src, size, true);
11111 +
11112 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11113 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11114 + dst += PAX_USER_SHADOW_BASE;
11115 +#endif
11116 +
11117 + return copy_user_generic((__force_kernel void *)dst, src, size);
11118 + }
11119 switch (size) {
11120 - case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
11121 + case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
11122 ret, "b", "b", "iq", 1);
11123 return ret;
11124 - case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
11125 + case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
11126 ret, "w", "w", "ir", 2);
11127 return ret;
11128 - case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
11129 + case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
11130 ret, "l", "k", "ir", 4);
11131 return ret;
11132 - case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
11133 + case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
11134 ret, "q", "", "er", 8);
11135 return ret;
11136 case 10:
11137 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
11138 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
11139 ret, "q", "", "er", 10);
11140 if (unlikely(ret))
11141 return ret;
11142 asm("":::"memory");
11143 - __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
11144 + __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
11145 ret, "w", "w", "ir", 2);
11146 return ret;
11147 case 16:
11148 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
11149 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
11150 ret, "q", "", "er", 16);
11151 if (unlikely(ret))
11152 return ret;
11153 asm("":::"memory");
11154 - __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
11155 + __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
11156 ret, "q", "", "er", 8);
11157 return ret;
11158 default:
11159 - return copy_user_generic((__force void *)dst, src, size);
11160 +
11161 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11162 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11163 + dst += PAX_USER_SHADOW_BASE;
11164 +#endif
11165 +
11166 + return copy_user_generic((__force_kernel void *)dst, src, size);
11167 }
11168 }
11169
11170 static __always_inline __must_check
11171 -int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11172 +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
11173 {
11174 - int ret = 0;
11175 + unsigned ret = 0;
11176
11177 might_fault();
11178 - if (!__builtin_constant_p(size))
11179 - return copy_user_generic((__force void *)dst,
11180 - (__force void *)src, size);
11181 +
11182 + if (size > INT_MAX)
11183 + return size;
11184 +
11185 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11186 + if (!__access_ok(VERIFY_READ, src, size))
11187 + return size;
11188 + if (!__access_ok(VERIFY_WRITE, dst, size))
11189 + return size;
11190 +#endif
11191 +
11192 + if (!__builtin_constant_p(size)) {
11193 +
11194 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11195 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11196 + src += PAX_USER_SHADOW_BASE;
11197 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11198 + dst += PAX_USER_SHADOW_BASE;
11199 +#endif
11200 +
11201 + return copy_user_generic((__force_kernel void *)dst,
11202 + (__force_kernel const void *)src, size);
11203 + }
11204 switch (size) {
11205 case 1: {
11206 u8 tmp;
11207 - __get_user_asm(tmp, (u8 __user *)src,
11208 + __get_user_asm(tmp, (const u8 __user *)src,
11209 ret, "b", "b", "=q", 1);
11210 if (likely(!ret))
11211 __put_user_asm(tmp, (u8 __user *)dst,
11212 @@ -176,7 +263,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11213 }
11214 case 2: {
11215 u16 tmp;
11216 - __get_user_asm(tmp, (u16 __user *)src,
11217 + __get_user_asm(tmp, (const u16 __user *)src,
11218 ret, "w", "w", "=r", 2);
11219 if (likely(!ret))
11220 __put_user_asm(tmp, (u16 __user *)dst,
11221 @@ -186,7 +273,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11222
11223 case 4: {
11224 u32 tmp;
11225 - __get_user_asm(tmp, (u32 __user *)src,
11226 + __get_user_asm(tmp, (const u32 __user *)src,
11227 ret, "l", "k", "=r", 4);
11228 if (likely(!ret))
11229 __put_user_asm(tmp, (u32 __user *)dst,
11230 @@ -195,7 +282,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11231 }
11232 case 8: {
11233 u64 tmp;
11234 - __get_user_asm(tmp, (u64 __user *)src,
11235 + __get_user_asm(tmp, (const u64 __user *)src,
11236 ret, "q", "", "=r", 8);
11237 if (likely(!ret))
11238 __put_user_asm(tmp, (u64 __user *)dst,
11239 @@ -203,8 +290,16 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11240 return ret;
11241 }
11242 default:
11243 - return copy_user_generic((__force void *)dst,
11244 - (__force void *)src, size);
11245 +
11246 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11247 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11248 + src += PAX_USER_SHADOW_BASE;
11249 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11250 + dst += PAX_USER_SHADOW_BASE;
11251 +#endif
11252 +
11253 + return copy_user_generic((__force_kernel void *)dst,
11254 + (__force_kernel const void *)src, size);
11255 }
11256 }
11257
11258 @@ -219,35 +314,72 @@ __must_check unsigned long clear_user(void __user *mem, unsigned long len);
11259 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
11260
11261 static __must_check __always_inline int
11262 -__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
11263 +__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
11264 {
11265 - return copy_user_generic(dst, (__force const void *)src, size);
11266 + if (size > INT_MAX)
11267 + return size;
11268 +
11269 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11270 + if (!__access_ok(VERIFY_READ, src, size))
11271 + return size;
11272 +
11273 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11274 + src += PAX_USER_SHADOW_BASE;
11275 +#endif
11276 +
11277 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
11278 }
11279
11280 -static __must_check __always_inline int
11281 -__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
11282 +static __must_check __always_inline unsigned long
11283 +__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
11284 {
11285 - return copy_user_generic((__force void *)dst, src, size);
11286 + if (size > INT_MAX)
11287 + return size;
11288 +
11289 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11290 + if (!__access_ok(VERIFY_WRITE, dst, size))
11291 + return size;
11292 +
11293 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11294 + dst += PAX_USER_SHADOW_BASE;
11295 +#endif
11296 +
11297 + return copy_user_generic((__force_kernel void *)dst, src, size);
11298 }
11299
11300 -extern long __copy_user_nocache(void *dst, const void __user *src,
11301 - unsigned size, int zerorest);
11302 +extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
11303 + unsigned long size, int zerorest);
11304
11305 -static inline int
11306 -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
11307 +static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
11308 {
11309 might_sleep();
11310 +
11311 + if (size > INT_MAX)
11312 + return size;
11313 +
11314 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11315 + if (!__access_ok(VERIFY_READ, src, size))
11316 + return size;
11317 +#endif
11318 +
11319 return __copy_user_nocache(dst, src, size, 1);
11320 }
11321
11322 -static inline int
11323 -__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
11324 - unsigned size)
11325 +static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
11326 + unsigned long size)
11327 {
11328 + if (size > INT_MAX)
11329 + return size;
11330 +
11331 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11332 + if (!__access_ok(VERIFY_READ, src, size))
11333 + return size;
11334 +#endif
11335 +
11336 return __copy_user_nocache(dst, src, size, 0);
11337 }
11338
11339 -unsigned long
11340 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
11341 +extern unsigned long
11342 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest);
11343
11344 #endif /* _ASM_X86_UACCESS_64_H */
11345 diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
11346 index bb05228..d763d5b 100644
11347 --- a/arch/x86/include/asm/vdso.h
11348 +++ b/arch/x86/include/asm/vdso.h
11349 @@ -11,7 +11,7 @@ extern const char VDSO32_PRELINK[];
11350 #define VDSO32_SYMBOL(base, name) \
11351 ({ \
11352 extern const char VDSO32_##name[]; \
11353 - (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
11354 + (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
11355 })
11356 #endif
11357
11358 diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
11359 index 1971e65..1e3559b 100644
11360 --- a/arch/x86/include/asm/x86_init.h
11361 +++ b/arch/x86/include/asm/x86_init.h
11362 @@ -28,7 +28,7 @@ struct x86_init_mpparse {
11363 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
11364 void (*find_smp_config)(void);
11365 void (*get_smp_config)(unsigned int early);
11366 -};
11367 +} __no_const;
11368
11369 /**
11370 * struct x86_init_resources - platform specific resource related ops
11371 @@ -42,7 +42,7 @@ struct x86_init_resources {
11372 void (*probe_roms)(void);
11373 void (*reserve_resources)(void);
11374 char *(*memory_setup)(void);
11375 -};
11376 +} __no_const;
11377
11378 /**
11379 * struct x86_init_irqs - platform specific interrupt setup
11380 @@ -55,7 +55,7 @@ struct x86_init_irqs {
11381 void (*pre_vector_init)(void);
11382 void (*intr_init)(void);
11383 void (*trap_init)(void);
11384 -};
11385 +} __no_const;
11386
11387 /**
11388 * struct x86_init_oem - oem platform specific customizing functions
11389 @@ -65,7 +65,7 @@ struct x86_init_irqs {
11390 struct x86_init_oem {
11391 void (*arch_setup)(void);
11392 void (*banner)(void);
11393 -};
11394 +} __no_const;
11395
11396 /**
11397 * struct x86_init_mapping - platform specific initial kernel pagetable setup
11398 @@ -76,7 +76,7 @@ struct x86_init_oem {
11399 */
11400 struct x86_init_mapping {
11401 void (*pagetable_reserve)(u64 start, u64 end);
11402 -};
11403 +} __no_const;
11404
11405 /**
11406 * struct x86_init_paging - platform specific paging functions
11407 @@ -86,7 +86,7 @@ struct x86_init_mapping {
11408 struct x86_init_paging {
11409 void (*pagetable_setup_start)(pgd_t *base);
11410 void (*pagetable_setup_done)(pgd_t *base);
11411 -};
11412 +} __no_const;
11413
11414 /**
11415 * struct x86_init_timers - platform specific timer setup
11416 @@ -101,7 +101,7 @@ struct x86_init_timers {
11417 void (*tsc_pre_init)(void);
11418 void (*timer_init)(void);
11419 void (*wallclock_init)(void);
11420 -};
11421 +} __no_const;
11422
11423 /**
11424 * struct x86_init_iommu - platform specific iommu setup
11425 @@ -109,7 +109,7 @@ struct x86_init_timers {
11426 */
11427 struct x86_init_iommu {
11428 int (*iommu_init)(void);
11429 -};
11430 +} __no_const;
11431
11432 /**
11433 * struct x86_init_pci - platform specific pci init functions
11434 @@ -123,7 +123,7 @@ struct x86_init_pci {
11435 int (*init)(void);
11436 void (*init_irq)(void);
11437 void (*fixup_irqs)(void);
11438 -};
11439 +} __no_const;
11440
11441 /**
11442 * struct x86_init_ops - functions for platform specific setup
11443 @@ -139,7 +139,7 @@ struct x86_init_ops {
11444 struct x86_init_timers timers;
11445 struct x86_init_iommu iommu;
11446 struct x86_init_pci pci;
11447 -};
11448 +} __no_const;
11449
11450 /**
11451 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
11452 @@ -147,7 +147,7 @@ struct x86_init_ops {
11453 */
11454 struct x86_cpuinit_ops {
11455 void (*setup_percpu_clockev)(void);
11456 -};
11457 +} __no_const;
11458
11459 /**
11460 * struct x86_platform_ops - platform specific runtime functions
11461 @@ -169,7 +169,7 @@ struct x86_platform_ops {
11462 void (*nmi_init)(void);
11463 unsigned char (*get_nmi_reason)(void);
11464 int (*i8042_detect)(void);
11465 -};
11466 +} __no_const;
11467
11468 struct pci_dev;
11469
11470 @@ -177,7 +177,7 @@ struct x86_msi_ops {
11471 int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
11472 void (*teardown_msi_irq)(unsigned int irq);
11473 void (*teardown_msi_irqs)(struct pci_dev *dev);
11474 -};
11475 +} __no_const;
11476
11477 extern struct x86_init_ops x86_init;
11478 extern struct x86_cpuinit_ops x86_cpuinit;
11479 diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
11480 index c6ce245..ffbdab7 100644
11481 --- a/arch/x86/include/asm/xsave.h
11482 +++ b/arch/x86/include/asm/xsave.h
11483 @@ -65,6 +65,11 @@ static inline int xsave_user(struct xsave_struct __user *buf)
11484 {
11485 int err;
11486
11487 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11488 + if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
11489 + buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
11490 +#endif
11491 +
11492 /*
11493 * Clear the xsave header first, so that reserved fields are
11494 * initialized to zero.
11495 @@ -96,10 +101,15 @@ static inline int xsave_user(struct xsave_struct __user *buf)
11496 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
11497 {
11498 int err;
11499 - struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
11500 + struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
11501 u32 lmask = mask;
11502 u32 hmask = mask >> 32;
11503
11504 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11505 + if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
11506 + xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
11507 +#endif
11508 +
11509 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
11510 "2:\n"
11511 ".section .fixup,\"ax\"\n"
11512 diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile
11513 index 6a564ac..9b1340c 100644
11514 --- a/arch/x86/kernel/acpi/realmode/Makefile
11515 +++ b/arch/x86/kernel/acpi/realmode/Makefile
11516 @@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \
11517 $(call cc-option, -fno-stack-protector) \
11518 $(call cc-option, -mpreferred-stack-boundary=2)
11519 KBUILD_CFLAGS += $(call cc-option, -m32)
11520 +ifdef CONSTIFY_PLUGIN
11521 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
11522 +endif
11523 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
11524 GCOV_PROFILE := n
11525
11526 diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
11527 index b4fd836..4358fe3 100644
11528 --- a/arch/x86/kernel/acpi/realmode/wakeup.S
11529 +++ b/arch/x86/kernel/acpi/realmode/wakeup.S
11530 @@ -108,6 +108,9 @@ wakeup_code:
11531 /* Do any other stuff... */
11532
11533 #ifndef CONFIG_64BIT
11534 + /* Recheck NX bit overrides (64bit path does this in trampoline */
11535 + call verify_cpu
11536 +
11537 /* This could also be done in C code... */
11538 movl pmode_cr3, %eax
11539 movl %eax, %cr3
11540 @@ -131,6 +134,7 @@ wakeup_code:
11541 movl pmode_cr0, %eax
11542 movl %eax, %cr0
11543 jmp pmode_return
11544 +# include "../../verify_cpu.S"
11545 #else
11546 pushw $0
11547 pushw trampoline_segment
11548 diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
11549 index 103b6ab..2004d0a 100644
11550 --- a/arch/x86/kernel/acpi/sleep.c
11551 +++ b/arch/x86/kernel/acpi/sleep.c
11552 @@ -94,8 +94,12 @@ int acpi_suspend_lowlevel(void)
11553 header->trampoline_segment = trampoline_address() >> 4;
11554 #ifdef CONFIG_SMP
11555 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
11556 +
11557 + pax_open_kernel();
11558 early_gdt_descr.address =
11559 (unsigned long)get_cpu_gdt_table(smp_processor_id());
11560 + pax_close_kernel();
11561 +
11562 initial_gs = per_cpu_offset(smp_processor_id());
11563 #endif
11564 initial_code = (unsigned long)wakeup_long64;
11565 diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
11566 index 13ab720..95d5442 100644
11567 --- a/arch/x86/kernel/acpi/wakeup_32.S
11568 +++ b/arch/x86/kernel/acpi/wakeup_32.S
11569 @@ -30,13 +30,11 @@ wakeup_pmode_return:
11570 # and restore the stack ... but you need gdt for this to work
11571 movl saved_context_esp, %esp
11572
11573 - movl %cs:saved_magic, %eax
11574 - cmpl $0x12345678, %eax
11575 + cmpl $0x12345678, saved_magic
11576 jne bogus_magic
11577
11578 # jump to place where we left off
11579 - movl saved_eip, %eax
11580 - jmp *%eax
11581 + jmp *(saved_eip)
11582
11583 bogus_magic:
11584 jmp bogus_magic
11585 diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
11586 index 1f84794..e23f862 100644
11587 --- a/arch/x86/kernel/alternative.c
11588 +++ b/arch/x86/kernel/alternative.c
11589 @@ -276,6 +276,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
11590 */
11591 for (a = start; a < end; a++) {
11592 instr = (u8 *)&a->instr_offset + a->instr_offset;
11593 +
11594 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
11595 + instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11596 + if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
11597 + instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11598 +#endif
11599 +
11600 replacement = (u8 *)&a->repl_offset + a->repl_offset;
11601 BUG_ON(a->replacementlen > a->instrlen);
11602 BUG_ON(a->instrlen > sizeof(insnbuf));
11603 @@ -307,10 +314,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
11604 for (poff = start; poff < end; poff++) {
11605 u8 *ptr = (u8 *)poff + *poff;
11606
11607 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
11608 + ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11609 + if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
11610 + ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11611 +#endif
11612 +
11613 if (!*poff || ptr < text || ptr >= text_end)
11614 continue;
11615 /* turn DS segment override prefix into lock prefix */
11616 - if (*ptr == 0x3e)
11617 + if (*ktla_ktva(ptr) == 0x3e)
11618 text_poke(ptr, ((unsigned char []){0xf0}), 1);
11619 };
11620 mutex_unlock(&text_mutex);
11621 @@ -328,10 +341,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
11622 for (poff = start; poff < end; poff++) {
11623 u8 *ptr = (u8 *)poff + *poff;
11624
11625 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
11626 + ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11627 + if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
11628 + ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11629 +#endif
11630 +
11631 if (!*poff || ptr < text || ptr >= text_end)
11632 continue;
11633 /* turn lock prefix into DS segment override prefix */
11634 - if (*ptr == 0xf0)
11635 + if (*ktla_ktva(ptr) == 0xf0)
11636 text_poke(ptr, ((unsigned char []){0x3E}), 1);
11637 };
11638 mutex_unlock(&text_mutex);
11639 @@ -500,7 +519,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
11640
11641 BUG_ON(p->len > MAX_PATCH_LEN);
11642 /* prep the buffer with the original instructions */
11643 - memcpy(insnbuf, p->instr, p->len);
11644 + memcpy(insnbuf, ktla_ktva(p->instr), p->len);
11645 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
11646 (unsigned long)p->instr, p->len);
11647
11648 @@ -568,7 +587,7 @@ void __init alternative_instructions(void)
11649 if (smp_alt_once)
11650 free_init_pages("SMP alternatives",
11651 (unsigned long)__smp_locks,
11652 - (unsigned long)__smp_locks_end);
11653 + PAGE_ALIGN((unsigned long)__smp_locks_end));
11654
11655 restart_nmi();
11656 }
11657 @@ -585,13 +604,17 @@ void __init alternative_instructions(void)
11658 * instructions. And on the local CPU you need to be protected again NMI or MCE
11659 * handlers seeing an inconsistent instruction while you patch.
11660 */
11661 -void *__init_or_module text_poke_early(void *addr, const void *opcode,
11662 +void *__kprobes text_poke_early(void *addr, const void *opcode,
11663 size_t len)
11664 {
11665 unsigned long flags;
11666 local_irq_save(flags);
11667 - memcpy(addr, opcode, len);
11668 +
11669 + pax_open_kernel();
11670 + memcpy(ktla_ktva(addr), opcode, len);
11671 sync_core();
11672 + pax_close_kernel();
11673 +
11674 local_irq_restore(flags);
11675 /* Could also do a CLFLUSH here to speed up CPU recovery; but
11676 that causes hangs on some VIA CPUs. */
11677 @@ -613,36 +636,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
11678 */
11679 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
11680 {
11681 - unsigned long flags;
11682 - char *vaddr;
11683 + unsigned char *vaddr = ktla_ktva(addr);
11684 struct page *pages[2];
11685 - int i;
11686 + size_t i;
11687
11688 if (!core_kernel_text((unsigned long)addr)) {
11689 - pages[0] = vmalloc_to_page(addr);
11690 - pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
11691 + pages[0] = vmalloc_to_page(vaddr);
11692 + pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
11693 } else {
11694 - pages[0] = virt_to_page(addr);
11695 + pages[0] = virt_to_page(vaddr);
11696 WARN_ON(!PageReserved(pages[0]));
11697 - pages[1] = virt_to_page(addr + PAGE_SIZE);
11698 + pages[1] = virt_to_page(vaddr + PAGE_SIZE);
11699 }
11700 BUG_ON(!pages[0]);
11701 - local_irq_save(flags);
11702 - set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
11703 - if (pages[1])
11704 - set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
11705 - vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
11706 - memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
11707 - clear_fixmap(FIX_TEXT_POKE0);
11708 - if (pages[1])
11709 - clear_fixmap(FIX_TEXT_POKE1);
11710 - local_flush_tlb();
11711 - sync_core();
11712 - /* Could also do a CLFLUSH here to speed up CPU recovery; but
11713 - that causes hangs on some VIA CPUs. */
11714 + text_poke_early(addr, opcode, len);
11715 for (i = 0; i < len; i++)
11716 - BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
11717 - local_irq_restore(flags);
11718 + BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
11719 return addr;
11720 }
11721
11722 diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
11723 index f98d84c..e402a69 100644
11724 --- a/arch/x86/kernel/apic/apic.c
11725 +++ b/arch/x86/kernel/apic/apic.c
11726 @@ -174,7 +174,7 @@ int first_system_vector = 0xfe;
11727 /*
11728 * Debug level, exported for io_apic.c
11729 */
11730 -unsigned int apic_verbosity;
11731 +int apic_verbosity;
11732
11733 int pic_mode;
11734
11735 @@ -1853,7 +1853,7 @@ void smp_error_interrupt(struct pt_regs *regs)
11736 apic_write(APIC_ESR, 0);
11737 v1 = apic_read(APIC_ESR);
11738 ack_APIC_irq();
11739 - atomic_inc(&irq_err_count);
11740 + atomic_inc_unchecked(&irq_err_count);
11741
11742 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
11743 smp_processor_id(), v0 , v1);
11744 diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
11745 index 6d939d7..0697fcc 100644
11746 --- a/arch/x86/kernel/apic/io_apic.c
11747 +++ b/arch/x86/kernel/apic/io_apic.c
11748 @@ -1096,7 +1096,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
11749 }
11750 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
11751
11752 -void lock_vector_lock(void)
11753 +void lock_vector_lock(void) __acquires(vector_lock)
11754 {
11755 /* Used to the online set of cpus does not change
11756 * during assign_irq_vector.
11757 @@ -1104,7 +1104,7 @@ void lock_vector_lock(void)
11758 raw_spin_lock(&vector_lock);
11759 }
11760
11761 -void unlock_vector_lock(void)
11762 +void unlock_vector_lock(void) __releases(vector_lock)
11763 {
11764 raw_spin_unlock(&vector_lock);
11765 }
11766 @@ -2510,7 +2510,7 @@ static void ack_apic_edge(struct irq_data *data)
11767 ack_APIC_irq();
11768 }
11769
11770 -atomic_t irq_mis_count;
11771 +atomic_unchecked_t irq_mis_count;
11772
11773 static void ack_apic_level(struct irq_data *data)
11774 {
11775 @@ -2576,7 +2576,7 @@ static void ack_apic_level(struct irq_data *data)
11776 * at the cpu.
11777 */
11778 if (!(v & (1 << (i & 0x1f)))) {
11779 - atomic_inc(&irq_mis_count);
11780 + atomic_inc_unchecked(&irq_mis_count);
11781
11782 eoi_ioapic_irq(irq, cfg);
11783 }
11784 diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
11785 index a46bd38..6b906d7 100644
11786 --- a/arch/x86/kernel/apm_32.c
11787 +++ b/arch/x86/kernel/apm_32.c
11788 @@ -411,7 +411,7 @@ static DEFINE_MUTEX(apm_mutex);
11789 * This is for buggy BIOS's that refer to (real mode) segment 0x40
11790 * even though they are called in protected mode.
11791 */
11792 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
11793 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
11794 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
11795
11796 static const char driver_version[] = "1.16ac"; /* no spaces */
11797 @@ -589,7 +589,10 @@ static long __apm_bios_call(void *_call)
11798 BUG_ON(cpu != 0);
11799 gdt = get_cpu_gdt_table(cpu);
11800 save_desc_40 = gdt[0x40 / 8];
11801 +
11802 + pax_open_kernel();
11803 gdt[0x40 / 8] = bad_bios_desc;
11804 + pax_close_kernel();
11805
11806 apm_irq_save(flags);
11807 APM_DO_SAVE_SEGS;
11808 @@ -598,7 +601,11 @@ static long __apm_bios_call(void *_call)
11809 &call->esi);
11810 APM_DO_RESTORE_SEGS;
11811 apm_irq_restore(flags);
11812 +
11813 + pax_open_kernel();
11814 gdt[0x40 / 8] = save_desc_40;
11815 + pax_close_kernel();
11816 +
11817 put_cpu();
11818
11819 return call->eax & 0xff;
11820 @@ -665,7 +672,10 @@ static long __apm_bios_call_simple(void *_call)
11821 BUG_ON(cpu != 0);
11822 gdt = get_cpu_gdt_table(cpu);
11823 save_desc_40 = gdt[0x40 / 8];
11824 +
11825 + pax_open_kernel();
11826 gdt[0x40 / 8] = bad_bios_desc;
11827 + pax_close_kernel();
11828
11829 apm_irq_save(flags);
11830 APM_DO_SAVE_SEGS;
11831 @@ -673,7 +683,11 @@ static long __apm_bios_call_simple(void *_call)
11832 &call->eax);
11833 APM_DO_RESTORE_SEGS;
11834 apm_irq_restore(flags);
11835 +
11836 + pax_open_kernel();
11837 gdt[0x40 / 8] = save_desc_40;
11838 + pax_close_kernel();
11839 +
11840 put_cpu();
11841 return error;
11842 }
11843 @@ -2347,12 +2361,15 @@ static int __init apm_init(void)
11844 * code to that CPU.
11845 */
11846 gdt = get_cpu_gdt_table(0);
11847 +
11848 + pax_open_kernel();
11849 set_desc_base(&gdt[APM_CS >> 3],
11850 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
11851 set_desc_base(&gdt[APM_CS_16 >> 3],
11852 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
11853 set_desc_base(&gdt[APM_DS >> 3],
11854 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
11855 + pax_close_kernel();
11856
11857 proc_create("apm", 0, NULL, &apm_file_ops);
11858
11859 diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
11860 index 4f13faf..87db5d2 100644
11861 --- a/arch/x86/kernel/asm-offsets.c
11862 +++ b/arch/x86/kernel/asm-offsets.c
11863 @@ -33,6 +33,8 @@ void common(void) {
11864 OFFSET(TI_status, thread_info, status);
11865 OFFSET(TI_addr_limit, thread_info, addr_limit);
11866 OFFSET(TI_preempt_count, thread_info, preempt_count);
11867 + OFFSET(TI_lowest_stack, thread_info, lowest_stack);
11868 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
11869
11870 BLANK();
11871 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
11872 @@ -53,8 +55,26 @@ void common(void) {
11873 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
11874 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
11875 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
11876 +
11877 +#ifdef CONFIG_PAX_KERNEXEC
11878 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
11879 #endif
11880
11881 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11882 + OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
11883 + OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
11884 +#ifdef CONFIG_X86_64
11885 + OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
11886 +#endif
11887 +#endif
11888 +
11889 +#endif
11890 +
11891 + BLANK();
11892 + DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
11893 + DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
11894 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
11895 +
11896 #ifdef CONFIG_XEN
11897 BLANK();
11898 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
11899 diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
11900 index e72a119..6e2955d 100644
11901 --- a/arch/x86/kernel/asm-offsets_64.c
11902 +++ b/arch/x86/kernel/asm-offsets_64.c
11903 @@ -69,6 +69,7 @@ int main(void)
11904 BLANK();
11905 #undef ENTRY
11906
11907 + DEFINE(TSS_size, sizeof(struct tss_struct));
11908 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
11909 BLANK();
11910
11911 diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
11912 index 25f24dc..4094a7f 100644
11913 --- a/arch/x86/kernel/cpu/Makefile
11914 +++ b/arch/x86/kernel/cpu/Makefile
11915 @@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
11916 CFLAGS_REMOVE_perf_event.o = -pg
11917 endif
11918
11919 -# Make sure load_percpu_segment has no stackprotector
11920 -nostackp := $(call cc-option, -fno-stack-protector)
11921 -CFLAGS_common.o := $(nostackp)
11922 -
11923 obj-y := intel_cacheinfo.o scattered.o topology.o
11924 obj-y += proc.o capflags.o powerflags.o common.o
11925 obj-y += vmware.o hypervisor.o sched.o mshyperv.o
11926 diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
11927 index 0bab2b1..d0a1bf8 100644
11928 --- a/arch/x86/kernel/cpu/amd.c
11929 +++ b/arch/x86/kernel/cpu/amd.c
11930 @@ -664,7 +664,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
11931 unsigned int size)
11932 {
11933 /* AMD errata T13 (order #21922) */
11934 - if ((c->x86 == 6)) {
11935 + if (c->x86 == 6) {
11936 /* Duron Rev A0 */
11937 if (c->x86_model == 3 && c->x86_mask == 0)
11938 size = 64;
11939 diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
11940 index aa003b1..47ea638 100644
11941 --- a/arch/x86/kernel/cpu/common.c
11942 +++ b/arch/x86/kernel/cpu/common.c
11943 @@ -84,60 +84,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
11944
11945 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
11946
11947 -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
11948 -#ifdef CONFIG_X86_64
11949 - /*
11950 - * We need valid kernel segments for data and code in long mode too
11951 - * IRET will check the segment types kkeil 2000/10/28
11952 - * Also sysret mandates a special GDT layout
11953 - *
11954 - * TLS descriptors are currently at a different place compared to i386.
11955 - * Hopefully nobody expects them at a fixed place (Wine?)
11956 - */
11957 - [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
11958 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
11959 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
11960 - [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
11961 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
11962 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
11963 -#else
11964 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
11965 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
11966 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
11967 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
11968 - /*
11969 - * Segments used for calling PnP BIOS have byte granularity.
11970 - * They code segments and data segments have fixed 64k limits,
11971 - * the transfer segment sizes are set at run time.
11972 - */
11973 - /* 32-bit code */
11974 - [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
11975 - /* 16-bit code */
11976 - [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
11977 - /* 16-bit data */
11978 - [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
11979 - /* 16-bit data */
11980 - [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
11981 - /* 16-bit data */
11982 - [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
11983 - /*
11984 - * The APM segments have byte granularity and their bases
11985 - * are set at run time. All have 64k limits.
11986 - */
11987 - /* 32-bit code */
11988 - [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
11989 - /* 16-bit code */
11990 - [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
11991 - /* data */
11992 - [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
11993 -
11994 - [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
11995 - [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
11996 - GDT_STACK_CANARY_INIT
11997 -#endif
11998 -} };
11999 -EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
12000 -
12001 static int __init x86_xsave_setup(char *s)
12002 {
12003 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
12004 @@ -372,7 +318,7 @@ void switch_to_new_gdt(int cpu)
12005 {
12006 struct desc_ptr gdt_descr;
12007
12008 - gdt_descr.address = (long)get_cpu_gdt_table(cpu);
12009 + gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
12010 gdt_descr.size = GDT_SIZE - 1;
12011 load_gdt(&gdt_descr);
12012 /* Reload the per-cpu base */
12013 @@ -844,6 +790,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
12014 /* Filter out anything that depends on CPUID levels we don't have */
12015 filter_cpuid_features(c, true);
12016
12017 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
12018 + setup_clear_cpu_cap(X86_FEATURE_SEP);
12019 +#endif
12020 +
12021 /* If the model name is still unset, do table lookup. */
12022 if (!c->x86_model_id[0]) {
12023 const char *p;
12024 @@ -1024,6 +974,9 @@ static __init int setup_disablecpuid(char *arg)
12025 }
12026 __setup("clearcpuid=", setup_disablecpuid);
12027
12028 +DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
12029 +EXPORT_PER_CPU_SYMBOL(current_tinfo);
12030 +
12031 #ifdef CONFIG_X86_64
12032 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
12033
12034 @@ -1039,7 +992,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
12035 EXPORT_PER_CPU_SYMBOL(current_task);
12036
12037 DEFINE_PER_CPU(unsigned long, kernel_stack) =
12038 - (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
12039 + (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
12040 EXPORT_PER_CPU_SYMBOL(kernel_stack);
12041
12042 DEFINE_PER_CPU(char *, irq_stack_ptr) =
12043 @@ -1104,7 +1057,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
12044 {
12045 memset(regs, 0, sizeof(struct pt_regs));
12046 regs->fs = __KERNEL_PERCPU;
12047 - regs->gs = __KERNEL_STACK_CANARY;
12048 + savesegment(gs, regs->gs);
12049
12050 return regs;
12051 }
12052 @@ -1159,7 +1112,7 @@ void __cpuinit cpu_init(void)
12053 int i;
12054
12055 cpu = stack_smp_processor_id();
12056 - t = &per_cpu(init_tss, cpu);
12057 + t = init_tss + cpu;
12058 oist = &per_cpu(orig_ist, cpu);
12059
12060 #ifdef CONFIG_NUMA
12061 @@ -1185,7 +1138,7 @@ void __cpuinit cpu_init(void)
12062 switch_to_new_gdt(cpu);
12063 loadsegment(fs, 0);
12064
12065 - load_idt((const struct desc_ptr *)&idt_descr);
12066 + load_idt(&idt_descr);
12067
12068 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
12069 syscall_init();
12070 @@ -1194,7 +1147,6 @@ void __cpuinit cpu_init(void)
12071 wrmsrl(MSR_KERNEL_GS_BASE, 0);
12072 barrier();
12073
12074 - x86_configure_nx();
12075 if (cpu != 0)
12076 enable_x2apic();
12077
12078 @@ -1248,7 +1200,7 @@ void __cpuinit cpu_init(void)
12079 {
12080 int cpu = smp_processor_id();
12081 struct task_struct *curr = current;
12082 - struct tss_struct *t = &per_cpu(init_tss, cpu);
12083 + struct tss_struct *t = init_tss + cpu;
12084 struct thread_struct *thread = &curr->thread;
12085
12086 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
12087 diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
12088 index 5231312..a78a987 100644
12089 --- a/arch/x86/kernel/cpu/intel.c
12090 +++ b/arch/x86/kernel/cpu/intel.c
12091 @@ -174,7 +174,7 @@ static void __cpuinit trap_init_f00f_bug(void)
12092 * Update the IDT descriptor and reload the IDT so that
12093 * it uses the read-only mapped virtual address.
12094 */
12095 - idt_descr.address = fix_to_virt(FIX_F00F_IDT);
12096 + idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
12097 load_idt(&idt_descr);
12098 }
12099 #endif
12100 diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
12101 index 2af127d..8ff7ac0 100644
12102 --- a/arch/x86/kernel/cpu/mcheck/mce.c
12103 +++ b/arch/x86/kernel/cpu/mcheck/mce.c
12104 @@ -42,6 +42,7 @@
12105 #include <asm/processor.h>
12106 #include <asm/mce.h>
12107 #include <asm/msr.h>
12108 +#include <asm/local.h>
12109
12110 #include "mce-internal.h"
12111
12112 @@ -202,7 +203,7 @@ static void print_mce(struct mce *m)
12113 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
12114 m->cs, m->ip);
12115
12116 - if (m->cs == __KERNEL_CS)
12117 + if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
12118 print_symbol("{%s}", m->ip);
12119 pr_cont("\n");
12120 }
12121 @@ -235,10 +236,10 @@ static void print_mce(struct mce *m)
12122
12123 #define PANIC_TIMEOUT 5 /* 5 seconds */
12124
12125 -static atomic_t mce_paniced;
12126 +static atomic_unchecked_t mce_paniced;
12127
12128 static int fake_panic;
12129 -static atomic_t mce_fake_paniced;
12130 +static atomic_unchecked_t mce_fake_paniced;
12131
12132 /* Panic in progress. Enable interrupts and wait for final IPI */
12133 static void wait_for_panic(void)
12134 @@ -262,7 +263,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
12135 /*
12136 * Make sure only one CPU runs in machine check panic
12137 */
12138 - if (atomic_inc_return(&mce_paniced) > 1)
12139 + if (atomic_inc_return_unchecked(&mce_paniced) > 1)
12140 wait_for_panic();
12141 barrier();
12142
12143 @@ -270,7 +271,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
12144 console_verbose();
12145 } else {
12146 /* Don't log too much for fake panic */
12147 - if (atomic_inc_return(&mce_fake_paniced) > 1)
12148 + if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
12149 return;
12150 }
12151 /* First print corrected ones that are still unlogged */
12152 @@ -610,7 +611,7 @@ static int mce_timed_out(u64 *t)
12153 * might have been modified by someone else.
12154 */
12155 rmb();
12156 - if (atomic_read(&mce_paniced))
12157 + if (atomic_read_unchecked(&mce_paniced))
12158 wait_for_panic();
12159 if (!monarch_timeout)
12160 goto out;
12161 @@ -1398,7 +1399,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
12162 }
12163
12164 /* Call the installed machine check handler for this CPU setup. */
12165 -void (*machine_check_vector)(struct pt_regs *, long error_code) =
12166 +void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
12167 unexpected_machine_check;
12168
12169 /*
12170 @@ -1421,7 +1422,9 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
12171 return;
12172 }
12173
12174 + pax_open_kernel();
12175 machine_check_vector = do_machine_check;
12176 + pax_close_kernel();
12177
12178 __mcheck_cpu_init_generic();
12179 __mcheck_cpu_init_vendor(c);
12180 @@ -1435,7 +1438,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
12181 */
12182
12183 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
12184 -static int mce_chrdev_open_count; /* #times opened */
12185 +static local_t mce_chrdev_open_count; /* #times opened */
12186 static int mce_chrdev_open_exclu; /* already open exclusive? */
12187
12188 static int mce_chrdev_open(struct inode *inode, struct file *file)
12189 @@ -1443,7 +1446,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
12190 spin_lock(&mce_chrdev_state_lock);
12191
12192 if (mce_chrdev_open_exclu ||
12193 - (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
12194 + (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
12195 spin_unlock(&mce_chrdev_state_lock);
12196
12197 return -EBUSY;
12198 @@ -1451,7 +1454,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
12199
12200 if (file->f_flags & O_EXCL)
12201 mce_chrdev_open_exclu = 1;
12202 - mce_chrdev_open_count++;
12203 + local_inc(&mce_chrdev_open_count);
12204
12205 spin_unlock(&mce_chrdev_state_lock);
12206
12207 @@ -1462,7 +1465,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
12208 {
12209 spin_lock(&mce_chrdev_state_lock);
12210
12211 - mce_chrdev_open_count--;
12212 + local_dec(&mce_chrdev_open_count);
12213 mce_chrdev_open_exclu = 0;
12214
12215 spin_unlock(&mce_chrdev_state_lock);
12216 @@ -2171,7 +2174,7 @@ struct dentry *mce_get_debugfs_dir(void)
12217 static void mce_reset(void)
12218 {
12219 cpu_missing = 0;
12220 - atomic_set(&mce_fake_paniced, 0);
12221 + atomic_set_unchecked(&mce_fake_paniced, 0);
12222 atomic_set(&mce_executing, 0);
12223 atomic_set(&mce_callin, 0);
12224 atomic_set(&global_nwo, 0);
12225 diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
12226 index 5c0e653..51ddf2c 100644
12227 --- a/arch/x86/kernel/cpu/mcheck/p5.c
12228 +++ b/arch/x86/kernel/cpu/mcheck/p5.c
12229 @@ -11,7 +11,7 @@
12230 #include <asm/processor.h>
12231 #include <asm/system.h>
12232 #include <asm/mce.h>
12233 -#include <asm/msr.h>
12234 +#include <asm/pgtable.h>
12235
12236 /* By default disabled */
12237 int mce_p5_enabled __read_mostly;
12238 @@ -50,7 +50,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
12239 if (!cpu_has(c, X86_FEATURE_MCE))
12240 return;
12241
12242 + pax_open_kernel();
12243 machine_check_vector = pentium_machine_check;
12244 + pax_close_kernel();
12245 /* Make sure the vector pointer is visible before we enable MCEs: */
12246 wmb();
12247
12248 diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
12249 index 54060f5..c1a7577 100644
12250 --- a/arch/x86/kernel/cpu/mcheck/winchip.c
12251 +++ b/arch/x86/kernel/cpu/mcheck/winchip.c
12252 @@ -11,6 +11,7 @@
12253 #include <asm/system.h>
12254 #include <asm/mce.h>
12255 #include <asm/msr.h>
12256 +#include <asm/pgtable.h>
12257
12258 /* Machine check handler for WinChip C6: */
12259 static void winchip_machine_check(struct pt_regs *regs, long error_code)
12260 @@ -24,7 +25,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
12261 {
12262 u32 lo, hi;
12263
12264 + pax_open_kernel();
12265 machine_check_vector = winchip_machine_check;
12266 + pax_close_kernel();
12267 /* Make sure the vector pointer is visible before we enable MCEs: */
12268 wmb();
12269
12270 diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
12271 index 6b96110..0da73eb 100644
12272 --- a/arch/x86/kernel/cpu/mtrr/main.c
12273 +++ b/arch/x86/kernel/cpu/mtrr/main.c
12274 @@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
12275 u64 size_or_mask, size_and_mask;
12276 static bool mtrr_aps_delayed_init;
12277
12278 -static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
12279 +static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
12280
12281 const struct mtrr_ops *mtrr_if;
12282
12283 diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
12284 index df5e41f..816c719 100644
12285 --- a/arch/x86/kernel/cpu/mtrr/mtrr.h
12286 +++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
12287 @@ -25,7 +25,7 @@ struct mtrr_ops {
12288 int (*validate_add_page)(unsigned long base, unsigned long size,
12289 unsigned int type);
12290 int (*have_wrcomb)(void);
12291 -};
12292 +} __do_const;
12293
12294 extern int generic_get_free_region(unsigned long base, unsigned long size,
12295 int replace_reg);
12296 diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
12297 index 2bda212..78cc605 100644
12298 --- a/arch/x86/kernel/cpu/perf_event.c
12299 +++ b/arch/x86/kernel/cpu/perf_event.c
12300 @@ -1529,7 +1529,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
12301 break;
12302
12303 perf_callchain_store(entry, frame.return_address);
12304 - fp = frame.next_frame;
12305 + fp = (const void __force_user *)frame.next_frame;
12306 }
12307 }
12308
12309 diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
12310 index 13ad899..f642b9a 100644
12311 --- a/arch/x86/kernel/crash.c
12312 +++ b/arch/x86/kernel/crash.c
12313 @@ -36,10 +36,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
12314 {
12315 #ifdef CONFIG_X86_32
12316 struct pt_regs fixed_regs;
12317 -#endif
12318
12319 -#ifdef CONFIG_X86_32
12320 - if (!user_mode_vm(regs)) {
12321 + if (!user_mode(regs)) {
12322 crash_fixup_ss_esp(&fixed_regs, regs);
12323 regs = &fixed_regs;
12324 }
12325 diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
12326 index 37250fe..bf2ec74 100644
12327 --- a/arch/x86/kernel/doublefault_32.c
12328 +++ b/arch/x86/kernel/doublefault_32.c
12329 @@ -11,7 +11,7 @@
12330
12331 #define DOUBLEFAULT_STACKSIZE (1024)
12332 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
12333 -#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
12334 +#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
12335
12336 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
12337
12338 @@ -21,7 +21,7 @@ static void doublefault_fn(void)
12339 unsigned long gdt, tss;
12340
12341 store_gdt(&gdt_desc);
12342 - gdt = gdt_desc.address;
12343 + gdt = (unsigned long)gdt_desc.address;
12344
12345 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
12346
12347 @@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
12348 /* 0x2 bit is always set */
12349 .flags = X86_EFLAGS_SF | 0x2,
12350 .sp = STACK_START,
12351 - .es = __USER_DS,
12352 + .es = __KERNEL_DS,
12353 .cs = __KERNEL_CS,
12354 .ss = __KERNEL_DS,
12355 - .ds = __USER_DS,
12356 + .ds = __KERNEL_DS,
12357 .fs = __KERNEL_PERCPU,
12358
12359 .__cr3 = __pa_nodebug(swapper_pg_dir),
12360 diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
12361 index 1aae78f..aab3a3d 100644
12362 --- a/arch/x86/kernel/dumpstack.c
12363 +++ b/arch/x86/kernel/dumpstack.c
12364 @@ -2,6 +2,9 @@
12365 * Copyright (C) 1991, 1992 Linus Torvalds
12366 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
12367 */
12368 +#ifdef CONFIG_GRKERNSEC_HIDESYM
12369 +#define __INCLUDED_BY_HIDESYM 1
12370 +#endif
12371 #include <linux/kallsyms.h>
12372 #include <linux/kprobes.h>
12373 #include <linux/uaccess.h>
12374 @@ -35,9 +38,8 @@ void printk_address(unsigned long address, int reliable)
12375 static void
12376 print_ftrace_graph_addr(unsigned long addr, void *data,
12377 const struct stacktrace_ops *ops,
12378 - struct thread_info *tinfo, int *graph)
12379 + struct task_struct *task, int *graph)
12380 {
12381 - struct task_struct *task = tinfo->task;
12382 unsigned long ret_addr;
12383 int index = task->curr_ret_stack;
12384
12385 @@ -58,7 +60,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
12386 static inline void
12387 print_ftrace_graph_addr(unsigned long addr, void *data,
12388 const struct stacktrace_ops *ops,
12389 - struct thread_info *tinfo, int *graph)
12390 + struct task_struct *task, int *graph)
12391 { }
12392 #endif
12393
12394 @@ -69,10 +71,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
12395 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
12396 */
12397
12398 -static inline int valid_stack_ptr(struct thread_info *tinfo,
12399 - void *p, unsigned int size, void *end)
12400 +static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
12401 {
12402 - void *t = tinfo;
12403 if (end) {
12404 if (p < end && p >= (end-THREAD_SIZE))
12405 return 1;
12406 @@ -83,14 +83,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
12407 }
12408
12409 unsigned long
12410 -print_context_stack(struct thread_info *tinfo,
12411 +print_context_stack(struct task_struct *task, void *stack_start,
12412 unsigned long *stack, unsigned long bp,
12413 const struct stacktrace_ops *ops, void *data,
12414 unsigned long *end, int *graph)
12415 {
12416 struct stack_frame *frame = (struct stack_frame *)bp;
12417
12418 - while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
12419 + while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
12420 unsigned long addr;
12421
12422 addr = *stack;
12423 @@ -102,7 +102,7 @@ print_context_stack(struct thread_info *tinfo,
12424 } else {
12425 ops->address(data, addr, 0);
12426 }
12427 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
12428 + print_ftrace_graph_addr(addr, data, ops, task, graph);
12429 }
12430 stack++;
12431 }
12432 @@ -111,7 +111,7 @@ print_context_stack(struct thread_info *tinfo,
12433 EXPORT_SYMBOL_GPL(print_context_stack);
12434
12435 unsigned long
12436 -print_context_stack_bp(struct thread_info *tinfo,
12437 +print_context_stack_bp(struct task_struct *task, void *stack_start,
12438 unsigned long *stack, unsigned long bp,
12439 const struct stacktrace_ops *ops, void *data,
12440 unsigned long *end, int *graph)
12441 @@ -119,7 +119,7 @@ print_context_stack_bp(struct thread_info *tinfo,
12442 struct stack_frame *frame = (struct stack_frame *)bp;
12443 unsigned long *ret_addr = &frame->return_address;
12444
12445 - while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
12446 + while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
12447 unsigned long addr = *ret_addr;
12448
12449 if (!__kernel_text_address(addr))
12450 @@ -128,7 +128,7 @@ print_context_stack_bp(struct thread_info *tinfo,
12451 ops->address(data, addr, 1);
12452 frame = frame->next_frame;
12453 ret_addr = &frame->return_address;
12454 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
12455 + print_ftrace_graph_addr(addr, data, ops, task, graph);
12456 }
12457
12458 return (unsigned long)frame;
12459 @@ -186,7 +186,7 @@ void dump_stack(void)
12460
12461 bp = stack_frame(current, NULL);
12462 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
12463 - current->pid, current->comm, print_tainted(),
12464 + task_pid_nr(current), current->comm, print_tainted(),
12465 init_utsname()->release,
12466 (int)strcspn(init_utsname()->version, " "),
12467 init_utsname()->version);
12468 @@ -222,6 +222,8 @@ unsigned __kprobes long oops_begin(void)
12469 }
12470 EXPORT_SYMBOL_GPL(oops_begin);
12471
12472 +extern void gr_handle_kernel_exploit(void);
12473 +
12474 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
12475 {
12476 if (regs && kexec_should_crash(current))
12477 @@ -243,7 +245,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
12478 panic("Fatal exception in interrupt");
12479 if (panic_on_oops)
12480 panic("Fatal exception");
12481 - do_exit(signr);
12482 +
12483 + gr_handle_kernel_exploit();
12484 +
12485 + do_group_exit(signr);
12486 }
12487
12488 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
12489 @@ -269,7 +274,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
12490
12491 show_registers(regs);
12492 #ifdef CONFIG_X86_32
12493 - if (user_mode_vm(regs)) {
12494 + if (user_mode(regs)) {
12495 sp = regs->sp;
12496 ss = regs->ss & 0xffff;
12497 } else {
12498 @@ -297,7 +302,7 @@ void die(const char *str, struct pt_regs *regs, long err)
12499 unsigned long flags = oops_begin();
12500 int sig = SIGSEGV;
12501
12502 - if (!user_mode_vm(regs))
12503 + if (!user_mode(regs))
12504 report_bug(regs->ip, regs);
12505
12506 if (__die(str, regs, err))
12507 diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
12508 index c99f9ed..2a15d80 100644
12509 --- a/arch/x86/kernel/dumpstack_32.c
12510 +++ b/arch/x86/kernel/dumpstack_32.c
12511 @@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12512 bp = stack_frame(task, regs);
12513
12514 for (;;) {
12515 - struct thread_info *context;
12516 + void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12517
12518 - context = (struct thread_info *)
12519 - ((unsigned long)stack & (~(THREAD_SIZE - 1)));
12520 - bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
12521 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12522
12523 - stack = (unsigned long *)context->previous_esp;
12524 - if (!stack)
12525 + if (stack_start == task_stack_page(task))
12526 break;
12527 + stack = *(unsigned long **)stack_start;
12528 if (ops->stack(data, "IRQ") < 0)
12529 break;
12530 touch_nmi_watchdog();
12531 @@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs)
12532 * When in-kernel, we also print out the stack and code at the
12533 * time of the fault..
12534 */
12535 - if (!user_mode_vm(regs)) {
12536 + if (!user_mode(regs)) {
12537 unsigned int code_prologue = code_bytes * 43 / 64;
12538 unsigned int code_len = code_bytes;
12539 unsigned char c;
12540 u8 *ip;
12541 + unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
12542
12543 printk(KERN_EMERG "Stack:\n");
12544 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
12545
12546 printk(KERN_EMERG "Code: ");
12547
12548 - ip = (u8 *)regs->ip - code_prologue;
12549 + ip = (u8 *)regs->ip - code_prologue + cs_base;
12550 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
12551 /* try starting at IP */
12552 - ip = (u8 *)regs->ip;
12553 + ip = (u8 *)regs->ip + cs_base;
12554 code_len = code_len - code_prologue + 1;
12555 }
12556 for (i = 0; i < code_len; i++, ip++) {
12557 @@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs)
12558 printk(KERN_CONT " Bad EIP value.");
12559 break;
12560 }
12561 - if (ip == (u8 *)regs->ip)
12562 + if (ip == (u8 *)regs->ip + cs_base)
12563 printk(KERN_CONT "<%02x> ", c);
12564 else
12565 printk(KERN_CONT "%02x ", c);
12566 @@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
12567 {
12568 unsigned short ud2;
12569
12570 + ip = ktla_ktva(ip);
12571 if (ip < PAGE_OFFSET)
12572 return 0;
12573 if (probe_kernel_address((unsigned short *)ip, ud2))
12574 @@ -139,3 +139,15 @@ int is_valid_bugaddr(unsigned long ip)
12575
12576 return ud2 == 0x0b0f;
12577 }
12578 +
12579 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12580 +void pax_check_alloca(unsigned long size)
12581 +{
12582 + unsigned long sp = (unsigned long)&sp, stack_left;
12583 +
12584 + /* all kernel stacks are of the same size */
12585 + stack_left = sp & (THREAD_SIZE - 1);
12586 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
12587 +}
12588 +EXPORT_SYMBOL(pax_check_alloca);
12589 +#endif
12590 diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
12591 index 6d728d9..279514e 100644
12592 --- a/arch/x86/kernel/dumpstack_64.c
12593 +++ b/arch/x86/kernel/dumpstack_64.c
12594 @@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12595 unsigned long *irq_stack_end =
12596 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
12597 unsigned used = 0;
12598 - struct thread_info *tinfo;
12599 int graph = 0;
12600 unsigned long dummy;
12601 + void *stack_start;
12602
12603 if (!task)
12604 task = current;
12605 @@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12606 * current stack address. If the stacks consist of nested
12607 * exceptions
12608 */
12609 - tinfo = task_thread_info(task);
12610 for (;;) {
12611 char *id;
12612 unsigned long *estack_end;
12613 +
12614 estack_end = in_exception_stack(cpu, (unsigned long)stack,
12615 &used, &id);
12616
12617 @@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12618 if (ops->stack(data, id) < 0)
12619 break;
12620
12621 - bp = ops->walk_stack(tinfo, stack, bp, ops,
12622 + bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
12623 data, estack_end, &graph);
12624 ops->stack(data, "<EOE>");
12625 /*
12626 @@ -172,7 +172,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12627 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
12628 if (ops->stack(data, "IRQ") < 0)
12629 break;
12630 - bp = ops->walk_stack(tinfo, stack, bp,
12631 + bp = ops->walk_stack(task, irq_stack, stack, bp,
12632 ops, data, irq_stack_end, &graph);
12633 /*
12634 * We link to the next stack (which would be
12635 @@ -191,7 +191,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12636 /*
12637 * This handles the process stack:
12638 */
12639 - bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
12640 + stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12641 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12642 put_cpu();
12643 }
12644 EXPORT_SYMBOL(dump_trace);
12645 @@ -305,3 +306,50 @@ int is_valid_bugaddr(unsigned long ip)
12646
12647 return ud2 == 0x0b0f;
12648 }
12649 +
12650 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12651 +void pax_check_alloca(unsigned long size)
12652 +{
12653 + unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
12654 + unsigned cpu, used;
12655 + char *id;
12656 +
12657 + /* check the process stack first */
12658 + stack_start = (unsigned long)task_stack_page(current);
12659 + stack_end = stack_start + THREAD_SIZE;
12660 + if (likely(stack_start <= sp && sp < stack_end)) {
12661 + unsigned long stack_left = sp & (THREAD_SIZE - 1);
12662 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
12663 + return;
12664 + }
12665 +
12666 + cpu = get_cpu();
12667 +
12668 + /* check the irq stacks */
12669 + stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
12670 + stack_start = stack_end - IRQ_STACK_SIZE;
12671 + if (stack_start <= sp && sp < stack_end) {
12672 + unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
12673 + put_cpu();
12674 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
12675 + return;
12676 + }
12677 +
12678 + /* check the exception stacks */
12679 + used = 0;
12680 + stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
12681 + stack_start = stack_end - EXCEPTION_STKSZ;
12682 + if (stack_end && stack_start <= sp && sp < stack_end) {
12683 + unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
12684 + put_cpu();
12685 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
12686 + return;
12687 + }
12688 +
12689 + put_cpu();
12690 +
12691 + /* unknown stack */
12692 + BUG();
12693 +}
12694 +EXPORT_SYMBOL(pax_check_alloca);
12695 +#endif
12696 diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
12697 index cd28a35..c72ed9a 100644
12698 --- a/arch/x86/kernel/early_printk.c
12699 +++ b/arch/x86/kernel/early_printk.c
12700 @@ -7,6 +7,7 @@
12701 #include <linux/pci_regs.h>
12702 #include <linux/pci_ids.h>
12703 #include <linux/errno.h>
12704 +#include <linux/sched.h>
12705 #include <asm/io.h>
12706 #include <asm/processor.h>
12707 #include <asm/fcntl.h>
12708 diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
12709 index f3f6f53..0841b66 100644
12710 --- a/arch/x86/kernel/entry_32.S
12711 +++ b/arch/x86/kernel/entry_32.S
12712 @@ -186,13 +186,146 @@
12713 /*CFI_REL_OFFSET gs, PT_GS*/
12714 .endm
12715 .macro SET_KERNEL_GS reg
12716 +
12717 +#ifdef CONFIG_CC_STACKPROTECTOR
12718 movl $(__KERNEL_STACK_CANARY), \reg
12719 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
12720 + movl $(__USER_DS), \reg
12721 +#else
12722 + xorl \reg, \reg
12723 +#endif
12724 +
12725 movl \reg, %gs
12726 .endm
12727
12728 #endif /* CONFIG_X86_32_LAZY_GS */
12729
12730 -.macro SAVE_ALL
12731 +.macro pax_enter_kernel
12732 +#ifdef CONFIG_PAX_KERNEXEC
12733 + call pax_enter_kernel
12734 +#endif
12735 +.endm
12736 +
12737 +.macro pax_exit_kernel
12738 +#ifdef CONFIG_PAX_KERNEXEC
12739 + call pax_exit_kernel
12740 +#endif
12741 +.endm
12742 +
12743 +#ifdef CONFIG_PAX_KERNEXEC
12744 +ENTRY(pax_enter_kernel)
12745 +#ifdef CONFIG_PARAVIRT
12746 + pushl %eax
12747 + pushl %ecx
12748 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
12749 + mov %eax, %esi
12750 +#else
12751 + mov %cr0, %esi
12752 +#endif
12753 + bts $16, %esi
12754 + jnc 1f
12755 + mov %cs, %esi
12756 + cmp $__KERNEL_CS, %esi
12757 + jz 3f
12758 + ljmp $__KERNEL_CS, $3f
12759 +1: ljmp $__KERNEXEC_KERNEL_CS, $2f
12760 +2:
12761 +#ifdef CONFIG_PARAVIRT
12762 + mov %esi, %eax
12763 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
12764 +#else
12765 + mov %esi, %cr0
12766 +#endif
12767 +3:
12768 +#ifdef CONFIG_PARAVIRT
12769 + popl %ecx
12770 + popl %eax
12771 +#endif
12772 + ret
12773 +ENDPROC(pax_enter_kernel)
12774 +
12775 +ENTRY(pax_exit_kernel)
12776 +#ifdef CONFIG_PARAVIRT
12777 + pushl %eax
12778 + pushl %ecx
12779 +#endif
12780 + mov %cs, %esi
12781 + cmp $__KERNEXEC_KERNEL_CS, %esi
12782 + jnz 2f
12783 +#ifdef CONFIG_PARAVIRT
12784 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
12785 + mov %eax, %esi
12786 +#else
12787 + mov %cr0, %esi
12788 +#endif
12789 + btr $16, %esi
12790 + ljmp $__KERNEL_CS, $1f
12791 +1:
12792 +#ifdef CONFIG_PARAVIRT
12793 + mov %esi, %eax
12794 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
12795 +#else
12796 + mov %esi, %cr0
12797 +#endif
12798 +2:
12799 +#ifdef CONFIG_PARAVIRT
12800 + popl %ecx
12801 + popl %eax
12802 +#endif
12803 + ret
12804 +ENDPROC(pax_exit_kernel)
12805 +#endif
12806 +
12807 +.macro pax_erase_kstack
12808 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12809 + call pax_erase_kstack
12810 +#endif
12811 +.endm
12812 +
12813 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12814 +/*
12815 + * ebp: thread_info
12816 + * ecx, edx: can be clobbered
12817 + */
12818 +ENTRY(pax_erase_kstack)
12819 + pushl %edi
12820 + pushl %eax
12821 +
12822 + mov TI_lowest_stack(%ebp), %edi
12823 + mov $-0xBEEF, %eax
12824 + std
12825 +
12826 +1: mov %edi, %ecx
12827 + and $THREAD_SIZE_asm - 1, %ecx
12828 + shr $2, %ecx
12829 + repne scasl
12830 + jecxz 2f
12831 +
12832 + cmp $2*16, %ecx
12833 + jc 2f
12834 +
12835 + mov $2*16, %ecx
12836 + repe scasl
12837 + jecxz 2f
12838 + jne 1b
12839 +
12840 +2: cld
12841 + mov %esp, %ecx
12842 + sub %edi, %ecx
12843 + shr $2, %ecx
12844 + rep stosl
12845 +
12846 + mov TI_task_thread_sp0(%ebp), %edi
12847 + sub $128, %edi
12848 + mov %edi, TI_lowest_stack(%ebp)
12849 +
12850 + popl %eax
12851 + popl %edi
12852 + ret
12853 +ENDPROC(pax_erase_kstack)
12854 +#endif
12855 +
12856 +.macro __SAVE_ALL _DS
12857 cld
12858 PUSH_GS
12859 pushl_cfi %fs
12860 @@ -215,7 +348,7 @@
12861 CFI_REL_OFFSET ecx, 0
12862 pushl_cfi %ebx
12863 CFI_REL_OFFSET ebx, 0
12864 - movl $(__USER_DS), %edx
12865 + movl $\_DS, %edx
12866 movl %edx, %ds
12867 movl %edx, %es
12868 movl $(__KERNEL_PERCPU), %edx
12869 @@ -223,6 +356,15 @@
12870 SET_KERNEL_GS %edx
12871 .endm
12872
12873 +.macro SAVE_ALL
12874 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
12875 + __SAVE_ALL __KERNEL_DS
12876 + pax_enter_kernel
12877 +#else
12878 + __SAVE_ALL __USER_DS
12879 +#endif
12880 +.endm
12881 +
12882 .macro RESTORE_INT_REGS
12883 popl_cfi %ebx
12884 CFI_RESTORE ebx
12885 @@ -308,7 +450,7 @@ ENTRY(ret_from_fork)
12886 popfl_cfi
12887 jmp syscall_exit
12888 CFI_ENDPROC
12889 -END(ret_from_fork)
12890 +ENDPROC(ret_from_fork)
12891
12892 /*
12893 * Interrupt exit functions should be protected against kprobes
12894 @@ -333,7 +475,15 @@ check_userspace:
12895 movb PT_CS(%esp), %al
12896 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
12897 cmpl $USER_RPL, %eax
12898 +
12899 +#ifdef CONFIG_PAX_KERNEXEC
12900 + jae resume_userspace
12901 +
12902 + PAX_EXIT_KERNEL
12903 + jmp resume_kernel
12904 +#else
12905 jb resume_kernel # not returning to v8086 or userspace
12906 +#endif
12907
12908 ENTRY(resume_userspace)
12909 LOCKDEP_SYS_EXIT
12910 @@ -345,8 +495,8 @@ ENTRY(resume_userspace)
12911 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
12912 # int/exception return?
12913 jne work_pending
12914 - jmp restore_all
12915 -END(ret_from_exception)
12916 + jmp restore_all_pax
12917 +ENDPROC(ret_from_exception)
12918
12919 #ifdef CONFIG_PREEMPT
12920 ENTRY(resume_kernel)
12921 @@ -361,7 +511,7 @@ need_resched:
12922 jz restore_all
12923 call preempt_schedule_irq
12924 jmp need_resched
12925 -END(resume_kernel)
12926 +ENDPROC(resume_kernel)
12927 #endif
12928 CFI_ENDPROC
12929 /*
12930 @@ -395,23 +545,34 @@ sysenter_past_esp:
12931 /*CFI_REL_OFFSET cs, 0*/
12932 /*
12933 * Push current_thread_info()->sysenter_return to the stack.
12934 - * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
12935 - * pushed above; +8 corresponds to copy_thread's esp0 setting.
12936 */
12937 - pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
12938 + pushl_cfi $0
12939 CFI_REL_OFFSET eip, 0
12940
12941 pushl_cfi %eax
12942 SAVE_ALL
12943 + GET_THREAD_INFO(%ebp)
12944 + movl TI_sysenter_return(%ebp),%ebp
12945 + movl %ebp,PT_EIP(%esp)
12946 ENABLE_INTERRUPTS(CLBR_NONE)
12947
12948 /*
12949 * Load the potential sixth argument from user stack.
12950 * Careful about security.
12951 */
12952 + movl PT_OLDESP(%esp),%ebp
12953 +
12954 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12955 + mov PT_OLDSS(%esp),%ds
12956 +1: movl %ds:(%ebp),%ebp
12957 + push %ss
12958 + pop %ds
12959 +#else
12960 cmpl $__PAGE_OFFSET-3,%ebp
12961 jae syscall_fault
12962 1: movl (%ebp),%ebp
12963 +#endif
12964 +
12965 movl %ebp,PT_EBP(%esp)
12966 .section __ex_table,"a"
12967 .align 4
12968 @@ -434,12 +595,24 @@ sysenter_do_call:
12969 testl $_TIF_ALLWORK_MASK, %ecx
12970 jne sysexit_audit
12971 sysenter_exit:
12972 +
12973 +#ifdef CONFIG_PAX_RANDKSTACK
12974 + pushl_cfi %eax
12975 + movl %esp, %eax
12976 + call pax_randomize_kstack
12977 + popl_cfi %eax
12978 +#endif
12979 +
12980 + pax_erase_kstack
12981 +
12982 /* if something modifies registers it must also disable sysexit */
12983 movl PT_EIP(%esp), %edx
12984 movl PT_OLDESP(%esp), %ecx
12985 xorl %ebp,%ebp
12986 TRACE_IRQS_ON
12987 1: mov PT_FS(%esp), %fs
12988 +2: mov PT_DS(%esp), %ds
12989 +3: mov PT_ES(%esp), %es
12990 PTGS_TO_GS
12991 ENABLE_INTERRUPTS_SYSEXIT
12992
12993 @@ -456,6 +629,9 @@ sysenter_audit:
12994 movl %eax,%edx /* 2nd arg: syscall number */
12995 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
12996 call audit_syscall_entry
12997 +
12998 + pax_erase_kstack
12999 +
13000 pushl_cfi %ebx
13001 movl PT_EAX(%esp),%eax /* reload syscall number */
13002 jmp sysenter_do_call
13003 @@ -482,11 +658,17 @@ sysexit_audit:
13004
13005 CFI_ENDPROC
13006 .pushsection .fixup,"ax"
13007 -2: movl $0,PT_FS(%esp)
13008 +4: movl $0,PT_FS(%esp)
13009 + jmp 1b
13010 +5: movl $0,PT_DS(%esp)
13011 + jmp 1b
13012 +6: movl $0,PT_ES(%esp)
13013 jmp 1b
13014 .section __ex_table,"a"
13015 .align 4
13016 - .long 1b,2b
13017 + .long 1b,4b
13018 + .long 2b,5b
13019 + .long 3b,6b
13020 .popsection
13021 PTGS_TO_GS_EX
13022 ENDPROC(ia32_sysenter_target)
13023 @@ -519,6 +701,15 @@ syscall_exit:
13024 testl $_TIF_ALLWORK_MASK, %ecx # current->work
13025 jne syscall_exit_work
13026
13027 +restore_all_pax:
13028 +
13029 +#ifdef CONFIG_PAX_RANDKSTACK
13030 + movl %esp, %eax
13031 + call pax_randomize_kstack
13032 +#endif
13033 +
13034 + pax_erase_kstack
13035 +
13036 restore_all:
13037 TRACE_IRQS_IRET
13038 restore_all_notrace:
13039 @@ -578,14 +769,34 @@ ldt_ss:
13040 * compensating for the offset by changing to the ESPFIX segment with
13041 * a base address that matches for the difference.
13042 */
13043 -#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
13044 +#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
13045 mov %esp, %edx /* load kernel esp */
13046 mov PT_OLDESP(%esp), %eax /* load userspace esp */
13047 mov %dx, %ax /* eax: new kernel esp */
13048 sub %eax, %edx /* offset (low word is 0) */
13049 +#ifdef CONFIG_SMP
13050 + movl PER_CPU_VAR(cpu_number), %ebx
13051 + shll $PAGE_SHIFT_asm, %ebx
13052 + addl $cpu_gdt_table, %ebx
13053 +#else
13054 + movl $cpu_gdt_table, %ebx
13055 +#endif
13056 shr $16, %edx
13057 - mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
13058 - mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
13059 +
13060 +#ifdef CONFIG_PAX_KERNEXEC
13061 + mov %cr0, %esi
13062 + btr $16, %esi
13063 + mov %esi, %cr0
13064 +#endif
13065 +
13066 + mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
13067 + mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
13068 +
13069 +#ifdef CONFIG_PAX_KERNEXEC
13070 + bts $16, %esi
13071 + mov %esi, %cr0
13072 +#endif
13073 +
13074 pushl_cfi $__ESPFIX_SS
13075 pushl_cfi %eax /* new kernel esp */
13076 /* Disable interrupts, but do not irqtrace this section: we
13077 @@ -614,34 +825,28 @@ work_resched:
13078 movl TI_flags(%ebp), %ecx
13079 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
13080 # than syscall tracing?
13081 - jz restore_all
13082 + jz restore_all_pax
13083 testb $_TIF_NEED_RESCHED, %cl
13084 jnz work_resched
13085
13086 work_notifysig: # deal with pending signals and
13087 # notify-resume requests
13088 + movl %esp, %eax
13089 #ifdef CONFIG_VM86
13090 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
13091 - movl %esp, %eax
13092 - jne work_notifysig_v86 # returning to kernel-space or
13093 + jz 1f # returning to kernel-space or
13094 # vm86-space
13095 - xorl %edx, %edx
13096 - call do_notify_resume
13097 - jmp resume_userspace_sig
13098
13099 - ALIGN
13100 -work_notifysig_v86:
13101 pushl_cfi %ecx # save ti_flags for do_notify_resume
13102 call save_v86_state # %eax contains pt_regs pointer
13103 popl_cfi %ecx
13104 movl %eax, %esp
13105 -#else
13106 - movl %esp, %eax
13107 +1:
13108 #endif
13109 xorl %edx, %edx
13110 call do_notify_resume
13111 jmp resume_userspace_sig
13112 -END(work_pending)
13113 +ENDPROC(work_pending)
13114
13115 # perform syscall exit tracing
13116 ALIGN
13117 @@ -649,11 +854,14 @@ syscall_trace_entry:
13118 movl $-ENOSYS,PT_EAX(%esp)
13119 movl %esp, %eax
13120 call syscall_trace_enter
13121 +
13122 + pax_erase_kstack
13123 +
13124 /* What it returned is what we'll actually use. */
13125 cmpl $(nr_syscalls), %eax
13126 jnae syscall_call
13127 jmp syscall_exit
13128 -END(syscall_trace_entry)
13129 +ENDPROC(syscall_trace_entry)
13130
13131 # perform syscall exit tracing
13132 ALIGN
13133 @@ -666,20 +874,24 @@ syscall_exit_work:
13134 movl %esp, %eax
13135 call syscall_trace_leave
13136 jmp resume_userspace
13137 -END(syscall_exit_work)
13138 +ENDPROC(syscall_exit_work)
13139 CFI_ENDPROC
13140
13141 RING0_INT_FRAME # can't unwind into user space anyway
13142 syscall_fault:
13143 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13144 + push %ss
13145 + pop %ds
13146 +#endif
13147 GET_THREAD_INFO(%ebp)
13148 movl $-EFAULT,PT_EAX(%esp)
13149 jmp resume_userspace
13150 -END(syscall_fault)
13151 +ENDPROC(syscall_fault)
13152
13153 syscall_badsys:
13154 movl $-ENOSYS,PT_EAX(%esp)
13155 jmp resume_userspace
13156 -END(syscall_badsys)
13157 +ENDPROC(syscall_badsys)
13158 CFI_ENDPROC
13159 /*
13160 * End of kprobes section
13161 @@ -753,6 +965,36 @@ ptregs_clone:
13162 CFI_ENDPROC
13163 ENDPROC(ptregs_clone)
13164
13165 + ALIGN;
13166 +ENTRY(kernel_execve)
13167 + CFI_STARTPROC
13168 + pushl_cfi %ebp
13169 + sub $PT_OLDSS+4,%esp
13170 + pushl_cfi %edi
13171 + pushl_cfi %ecx
13172 + pushl_cfi %eax
13173 + lea 3*4(%esp),%edi
13174 + mov $PT_OLDSS/4+1,%ecx
13175 + xorl %eax,%eax
13176 + rep stosl
13177 + popl_cfi %eax
13178 + popl_cfi %ecx
13179 + popl_cfi %edi
13180 + movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
13181 + pushl_cfi %esp
13182 + call sys_execve
13183 + add $4,%esp
13184 + CFI_ADJUST_CFA_OFFSET -4
13185 + GET_THREAD_INFO(%ebp)
13186 + test %eax,%eax
13187 + jz syscall_exit
13188 + add $PT_OLDSS+4,%esp
13189 + CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
13190 + popl_cfi %ebp
13191 + ret
13192 + CFI_ENDPROC
13193 +ENDPROC(kernel_execve)
13194 +
13195 .macro FIXUP_ESPFIX_STACK
13196 /*
13197 * Switch back for ESPFIX stack to the normal zerobased stack
13198 @@ -762,8 +1004,15 @@ ENDPROC(ptregs_clone)
13199 * normal stack and adjusts ESP with the matching offset.
13200 */
13201 /* fixup the stack */
13202 - mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
13203 - mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
13204 +#ifdef CONFIG_SMP
13205 + movl PER_CPU_VAR(cpu_number), %ebx
13206 + shll $PAGE_SHIFT_asm, %ebx
13207 + addl $cpu_gdt_table, %ebx
13208 +#else
13209 + movl $cpu_gdt_table, %ebx
13210 +#endif
13211 + mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
13212 + mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
13213 shl $16, %eax
13214 addl %esp, %eax /* the adjusted stack pointer */
13215 pushl_cfi $__KERNEL_DS
13216 @@ -816,7 +1065,7 @@ vector=vector+1
13217 .endr
13218 2: jmp common_interrupt
13219 .endr
13220 -END(irq_entries_start)
13221 +ENDPROC(irq_entries_start)
13222
13223 .previous
13224 END(interrupt)
13225 @@ -864,7 +1113,7 @@ ENTRY(coprocessor_error)
13226 pushl_cfi $do_coprocessor_error
13227 jmp error_code
13228 CFI_ENDPROC
13229 -END(coprocessor_error)
13230 +ENDPROC(coprocessor_error)
13231
13232 ENTRY(simd_coprocessor_error)
13233 RING0_INT_FRAME
13234 @@ -885,7 +1134,7 @@ ENTRY(simd_coprocessor_error)
13235 #endif
13236 jmp error_code
13237 CFI_ENDPROC
13238 -END(simd_coprocessor_error)
13239 +ENDPROC(simd_coprocessor_error)
13240
13241 ENTRY(device_not_available)
13242 RING0_INT_FRAME
13243 @@ -893,7 +1142,7 @@ ENTRY(device_not_available)
13244 pushl_cfi $do_device_not_available
13245 jmp error_code
13246 CFI_ENDPROC
13247 -END(device_not_available)
13248 +ENDPROC(device_not_available)
13249
13250 #ifdef CONFIG_PARAVIRT
13251 ENTRY(native_iret)
13252 @@ -902,12 +1151,12 @@ ENTRY(native_iret)
13253 .align 4
13254 .long native_iret, iret_exc
13255 .previous
13256 -END(native_iret)
13257 +ENDPROC(native_iret)
13258
13259 ENTRY(native_irq_enable_sysexit)
13260 sti
13261 sysexit
13262 -END(native_irq_enable_sysexit)
13263 +ENDPROC(native_irq_enable_sysexit)
13264 #endif
13265
13266 ENTRY(overflow)
13267 @@ -916,7 +1165,7 @@ ENTRY(overflow)
13268 pushl_cfi $do_overflow
13269 jmp error_code
13270 CFI_ENDPROC
13271 -END(overflow)
13272 +ENDPROC(overflow)
13273
13274 ENTRY(bounds)
13275 RING0_INT_FRAME
13276 @@ -924,7 +1173,7 @@ ENTRY(bounds)
13277 pushl_cfi $do_bounds
13278 jmp error_code
13279 CFI_ENDPROC
13280 -END(bounds)
13281 +ENDPROC(bounds)
13282
13283 ENTRY(invalid_op)
13284 RING0_INT_FRAME
13285 @@ -932,7 +1181,7 @@ ENTRY(invalid_op)
13286 pushl_cfi $do_invalid_op
13287 jmp error_code
13288 CFI_ENDPROC
13289 -END(invalid_op)
13290 +ENDPROC(invalid_op)
13291
13292 ENTRY(coprocessor_segment_overrun)
13293 RING0_INT_FRAME
13294 @@ -940,35 +1189,35 @@ ENTRY(coprocessor_segment_overrun)
13295 pushl_cfi $do_coprocessor_segment_overrun
13296 jmp error_code
13297 CFI_ENDPROC
13298 -END(coprocessor_segment_overrun)
13299 +ENDPROC(coprocessor_segment_overrun)
13300
13301 ENTRY(invalid_TSS)
13302 RING0_EC_FRAME
13303 pushl_cfi $do_invalid_TSS
13304 jmp error_code
13305 CFI_ENDPROC
13306 -END(invalid_TSS)
13307 +ENDPROC(invalid_TSS)
13308
13309 ENTRY(segment_not_present)
13310 RING0_EC_FRAME
13311 pushl_cfi $do_segment_not_present
13312 jmp error_code
13313 CFI_ENDPROC
13314 -END(segment_not_present)
13315 +ENDPROC(segment_not_present)
13316
13317 ENTRY(stack_segment)
13318 RING0_EC_FRAME
13319 pushl_cfi $do_stack_segment
13320 jmp error_code
13321 CFI_ENDPROC
13322 -END(stack_segment)
13323 +ENDPROC(stack_segment)
13324
13325 ENTRY(alignment_check)
13326 RING0_EC_FRAME
13327 pushl_cfi $do_alignment_check
13328 jmp error_code
13329 CFI_ENDPROC
13330 -END(alignment_check)
13331 +ENDPROC(alignment_check)
13332
13333 ENTRY(divide_error)
13334 RING0_INT_FRAME
13335 @@ -976,7 +1225,7 @@ ENTRY(divide_error)
13336 pushl_cfi $do_divide_error
13337 jmp error_code
13338 CFI_ENDPROC
13339 -END(divide_error)
13340 +ENDPROC(divide_error)
13341
13342 #ifdef CONFIG_X86_MCE
13343 ENTRY(machine_check)
13344 @@ -985,7 +1234,7 @@ ENTRY(machine_check)
13345 pushl_cfi machine_check_vector
13346 jmp error_code
13347 CFI_ENDPROC
13348 -END(machine_check)
13349 +ENDPROC(machine_check)
13350 #endif
13351
13352 ENTRY(spurious_interrupt_bug)
13353 @@ -994,7 +1243,7 @@ ENTRY(spurious_interrupt_bug)
13354 pushl_cfi $do_spurious_interrupt_bug
13355 jmp error_code
13356 CFI_ENDPROC
13357 -END(spurious_interrupt_bug)
13358 +ENDPROC(spurious_interrupt_bug)
13359 /*
13360 * End of kprobes section
13361 */
13362 @@ -1109,7 +1358,7 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
13363
13364 ENTRY(mcount)
13365 ret
13366 -END(mcount)
13367 +ENDPROC(mcount)
13368
13369 ENTRY(ftrace_caller)
13370 cmpl $0, function_trace_stop
13371 @@ -1138,7 +1387,7 @@ ftrace_graph_call:
13372 .globl ftrace_stub
13373 ftrace_stub:
13374 ret
13375 -END(ftrace_caller)
13376 +ENDPROC(ftrace_caller)
13377
13378 #else /* ! CONFIG_DYNAMIC_FTRACE */
13379
13380 @@ -1174,7 +1423,7 @@ trace:
13381 popl %ecx
13382 popl %eax
13383 jmp ftrace_stub
13384 -END(mcount)
13385 +ENDPROC(mcount)
13386 #endif /* CONFIG_DYNAMIC_FTRACE */
13387 #endif /* CONFIG_FUNCTION_TRACER */
13388
13389 @@ -1195,7 +1444,7 @@ ENTRY(ftrace_graph_caller)
13390 popl %ecx
13391 popl %eax
13392 ret
13393 -END(ftrace_graph_caller)
13394 +ENDPROC(ftrace_graph_caller)
13395
13396 .globl return_to_handler
13397 return_to_handler:
13398 @@ -1209,7 +1458,6 @@ return_to_handler:
13399 jmp *%ecx
13400 #endif
13401
13402 -.section .rodata,"a"
13403 #include "syscall_table_32.S"
13404
13405 syscall_table_size=(.-sys_call_table)
13406 @@ -1255,15 +1503,18 @@ error_code:
13407 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
13408 REG_TO_PTGS %ecx
13409 SET_KERNEL_GS %ecx
13410 - movl $(__USER_DS), %ecx
13411 + movl $(__KERNEL_DS), %ecx
13412 movl %ecx, %ds
13413 movl %ecx, %es
13414 +
13415 + pax_enter_kernel
13416 +
13417 TRACE_IRQS_OFF
13418 movl %esp,%eax # pt_regs pointer
13419 call *%edi
13420 jmp ret_from_exception
13421 CFI_ENDPROC
13422 -END(page_fault)
13423 +ENDPROC(page_fault)
13424
13425 /*
13426 * Debug traps and NMI can happen at the one SYSENTER instruction
13427 @@ -1305,7 +1556,7 @@ debug_stack_correct:
13428 call do_debug
13429 jmp ret_from_exception
13430 CFI_ENDPROC
13431 -END(debug)
13432 +ENDPROC(debug)
13433
13434 /*
13435 * NMI is doubly nasty. It can happen _while_ we're handling
13436 @@ -1342,6 +1593,9 @@ nmi_stack_correct:
13437 xorl %edx,%edx # zero error code
13438 movl %esp,%eax # pt_regs pointer
13439 call do_nmi
13440 +
13441 + pax_exit_kernel
13442 +
13443 jmp restore_all_notrace
13444 CFI_ENDPROC
13445
13446 @@ -1378,12 +1632,15 @@ nmi_espfix_stack:
13447 FIXUP_ESPFIX_STACK # %eax == %esp
13448 xorl %edx,%edx # zero error code
13449 call do_nmi
13450 +
13451 + pax_exit_kernel
13452 +
13453 RESTORE_REGS
13454 lss 12+4(%esp), %esp # back to espfix stack
13455 CFI_ADJUST_CFA_OFFSET -24
13456 jmp irq_return
13457 CFI_ENDPROC
13458 -END(nmi)
13459 +ENDPROC(nmi)
13460
13461 ENTRY(int3)
13462 RING0_INT_FRAME
13463 @@ -1395,14 +1652,14 @@ ENTRY(int3)
13464 call do_int3
13465 jmp ret_from_exception
13466 CFI_ENDPROC
13467 -END(int3)
13468 +ENDPROC(int3)
13469
13470 ENTRY(general_protection)
13471 RING0_EC_FRAME
13472 pushl_cfi $do_general_protection
13473 jmp error_code
13474 CFI_ENDPROC
13475 -END(general_protection)
13476 +ENDPROC(general_protection)
13477
13478 #ifdef CONFIG_KVM_GUEST
13479 ENTRY(async_page_fault)
13480 @@ -1410,7 +1667,7 @@ ENTRY(async_page_fault)
13481 pushl_cfi $do_async_page_fault
13482 jmp error_code
13483 CFI_ENDPROC
13484 -END(async_page_fault)
13485 +ENDPROC(async_page_fault)
13486 #endif
13487
13488 /*
13489 diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
13490 index faf8d5e..f58c441 100644
13491 --- a/arch/x86/kernel/entry_64.S
13492 +++ b/arch/x86/kernel/entry_64.S
13493 @@ -55,6 +55,8 @@
13494 #include <asm/paravirt.h>
13495 #include <asm/ftrace.h>
13496 #include <asm/percpu.h>
13497 +#include <asm/pgtable.h>
13498 +#include <asm/alternative-asm.h>
13499
13500 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
13501 #include <linux/elf-em.h>
13502 @@ -68,8 +70,9 @@
13503 #ifdef CONFIG_FUNCTION_TRACER
13504 #ifdef CONFIG_DYNAMIC_FTRACE
13505 ENTRY(mcount)
13506 + pax_force_retaddr
13507 retq
13508 -END(mcount)
13509 +ENDPROC(mcount)
13510
13511 ENTRY(ftrace_caller)
13512 cmpl $0, function_trace_stop
13513 @@ -92,8 +95,9 @@ GLOBAL(ftrace_graph_call)
13514 #endif
13515
13516 GLOBAL(ftrace_stub)
13517 + pax_force_retaddr
13518 retq
13519 -END(ftrace_caller)
13520 +ENDPROC(ftrace_caller)
13521
13522 #else /* ! CONFIG_DYNAMIC_FTRACE */
13523 ENTRY(mcount)
13524 @@ -112,6 +116,7 @@ ENTRY(mcount)
13525 #endif
13526
13527 GLOBAL(ftrace_stub)
13528 + pax_force_retaddr
13529 retq
13530
13531 trace:
13532 @@ -121,12 +126,13 @@ trace:
13533 movq 8(%rbp), %rsi
13534 subq $MCOUNT_INSN_SIZE, %rdi
13535
13536 + pax_force_fptr ftrace_trace_function
13537 call *ftrace_trace_function
13538
13539 MCOUNT_RESTORE_FRAME
13540
13541 jmp ftrace_stub
13542 -END(mcount)
13543 +ENDPROC(mcount)
13544 #endif /* CONFIG_DYNAMIC_FTRACE */
13545 #endif /* CONFIG_FUNCTION_TRACER */
13546
13547 @@ -146,8 +152,9 @@ ENTRY(ftrace_graph_caller)
13548
13549 MCOUNT_RESTORE_FRAME
13550
13551 + pax_force_retaddr
13552 retq
13553 -END(ftrace_graph_caller)
13554 +ENDPROC(ftrace_graph_caller)
13555
13556 GLOBAL(return_to_handler)
13557 subq $24, %rsp
13558 @@ -163,6 +170,7 @@ GLOBAL(return_to_handler)
13559 movq 8(%rsp), %rdx
13560 movq (%rsp), %rax
13561 addq $24, %rsp
13562 + pax_force_fptr %rdi
13563 jmp *%rdi
13564 #endif
13565
13566 @@ -178,6 +186,282 @@ ENTRY(native_usergs_sysret64)
13567 ENDPROC(native_usergs_sysret64)
13568 #endif /* CONFIG_PARAVIRT */
13569
13570 + .macro ljmpq sel, off
13571 +#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
13572 + .byte 0x48; ljmp *1234f(%rip)
13573 + .pushsection .rodata
13574 + .align 16
13575 + 1234: .quad \off; .word \sel
13576 + .popsection
13577 +#else
13578 + pushq $\sel
13579 + pushq $\off
13580 + lretq
13581 +#endif
13582 + .endm
13583 +
13584 + .macro pax_enter_kernel
13585 + pax_set_fptr_mask
13586 +#ifdef CONFIG_PAX_KERNEXEC
13587 + call pax_enter_kernel
13588 +#endif
13589 + .endm
13590 +
13591 + .macro pax_exit_kernel
13592 +#ifdef CONFIG_PAX_KERNEXEC
13593 + call pax_exit_kernel
13594 +#endif
13595 + .endm
13596 +
13597 +#ifdef CONFIG_PAX_KERNEXEC
13598 +ENTRY(pax_enter_kernel)
13599 + pushq %rdi
13600 +
13601 +#ifdef CONFIG_PARAVIRT
13602 + PV_SAVE_REGS(CLBR_RDI)
13603 +#endif
13604 +
13605 + GET_CR0_INTO_RDI
13606 + bts $16,%rdi
13607 + jnc 3f
13608 + mov %cs,%edi
13609 + cmp $__KERNEL_CS,%edi
13610 + jnz 2f
13611 +1:
13612 +
13613 +#ifdef CONFIG_PARAVIRT
13614 + PV_RESTORE_REGS(CLBR_RDI)
13615 +#endif
13616 +
13617 + popq %rdi
13618 + pax_force_retaddr
13619 + retq
13620 +
13621 +2: ljmpq __KERNEL_CS,1f
13622 +3: ljmpq __KERNEXEC_KERNEL_CS,4f
13623 +4: SET_RDI_INTO_CR0
13624 + jmp 1b
13625 +ENDPROC(pax_enter_kernel)
13626 +
13627 +ENTRY(pax_exit_kernel)
13628 + pushq %rdi
13629 +
13630 +#ifdef CONFIG_PARAVIRT
13631 + PV_SAVE_REGS(CLBR_RDI)
13632 +#endif
13633 +
13634 + mov %cs,%rdi
13635 + cmp $__KERNEXEC_KERNEL_CS,%edi
13636 + jz 2f
13637 +1:
13638 +
13639 +#ifdef CONFIG_PARAVIRT
13640 + PV_RESTORE_REGS(CLBR_RDI);
13641 +#endif
13642 +
13643 + popq %rdi
13644 + pax_force_retaddr
13645 + retq
13646 +
13647 +2: GET_CR0_INTO_RDI
13648 + btr $16,%rdi
13649 + ljmpq __KERNEL_CS,3f
13650 +3: SET_RDI_INTO_CR0
13651 + jmp 1b
13652 +#ifdef CONFIG_PARAVIRT
13653 + PV_RESTORE_REGS(CLBR_RDI);
13654 +#endif
13655 +
13656 + popq %rdi
13657 + pax_force_retaddr
13658 + retq
13659 +ENDPROC(pax_exit_kernel)
13660 +#endif
13661 +
13662 + .macro pax_enter_kernel_user
13663 + pax_set_fptr_mask
13664 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13665 + call pax_enter_kernel_user
13666 +#endif
13667 + .endm
13668 +
13669 + .macro pax_exit_kernel_user
13670 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13671 + call pax_exit_kernel_user
13672 +#endif
13673 +#ifdef CONFIG_PAX_RANDKSTACK
13674 + pushq %rax
13675 + call pax_randomize_kstack
13676 + popq %rax
13677 +#endif
13678 + .endm
13679 +
13680 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13681 +ENTRY(pax_enter_kernel_user)
13682 + pushq %rdi
13683 + pushq %rbx
13684 +
13685 +#ifdef CONFIG_PARAVIRT
13686 + PV_SAVE_REGS(CLBR_RDI)
13687 +#endif
13688 +
13689 + GET_CR3_INTO_RDI
13690 + mov %rdi,%rbx
13691 + add $__START_KERNEL_map,%rbx
13692 + sub phys_base(%rip),%rbx
13693 +
13694 +#ifdef CONFIG_PARAVIRT
13695 + pushq %rdi
13696 + cmpl $0, pv_info+PARAVIRT_enabled
13697 + jz 1f
13698 + i = 0
13699 + .rept USER_PGD_PTRS
13700 + mov i*8(%rbx),%rsi
13701 + mov $0,%sil
13702 + lea i*8(%rbx),%rdi
13703 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
13704 + i = i + 1
13705 + .endr
13706 + jmp 2f
13707 +1:
13708 +#endif
13709 +
13710 + i = 0
13711 + .rept USER_PGD_PTRS
13712 + movb $0,i*8(%rbx)
13713 + i = i + 1
13714 + .endr
13715 +
13716 +#ifdef CONFIG_PARAVIRT
13717 +2: popq %rdi
13718 +#endif
13719 + SET_RDI_INTO_CR3
13720 +
13721 +#ifdef CONFIG_PAX_KERNEXEC
13722 + GET_CR0_INTO_RDI
13723 + bts $16,%rdi
13724 + SET_RDI_INTO_CR0
13725 +#endif
13726 +
13727 +#ifdef CONFIG_PARAVIRT
13728 + PV_RESTORE_REGS(CLBR_RDI)
13729 +#endif
13730 +
13731 + popq %rbx
13732 + popq %rdi
13733 + pax_force_retaddr
13734 + retq
13735 +ENDPROC(pax_enter_kernel_user)
13736 +
13737 +ENTRY(pax_exit_kernel_user)
13738 + push %rdi
13739 +
13740 +#ifdef CONFIG_PARAVIRT
13741 + pushq %rbx
13742 + PV_SAVE_REGS(CLBR_RDI)
13743 +#endif
13744 +
13745 +#ifdef CONFIG_PAX_KERNEXEC
13746 + GET_CR0_INTO_RDI
13747 + btr $16,%rdi
13748 + SET_RDI_INTO_CR0
13749 +#endif
13750 +
13751 + GET_CR3_INTO_RDI
13752 + add $__START_KERNEL_map,%rdi
13753 + sub phys_base(%rip),%rdi
13754 +
13755 +#ifdef CONFIG_PARAVIRT
13756 + cmpl $0, pv_info+PARAVIRT_enabled
13757 + jz 1f
13758 + mov %rdi,%rbx
13759 + i = 0
13760 + .rept USER_PGD_PTRS
13761 + mov i*8(%rbx),%rsi
13762 + mov $0x67,%sil
13763 + lea i*8(%rbx),%rdi
13764 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
13765 + i = i + 1
13766 + .endr
13767 + jmp 2f
13768 +1:
13769 +#endif
13770 +
13771 + i = 0
13772 + .rept USER_PGD_PTRS
13773 + movb $0x67,i*8(%rdi)
13774 + i = i + 1
13775 + .endr
13776 +
13777 +#ifdef CONFIG_PARAVIRT
13778 +2: PV_RESTORE_REGS(CLBR_RDI)
13779 + popq %rbx
13780 +#endif
13781 +
13782 + popq %rdi
13783 + pax_force_retaddr
13784 + retq
13785 +ENDPROC(pax_exit_kernel_user)
13786 +#endif
13787 +
13788 +.macro pax_erase_kstack
13789 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13790 + call pax_erase_kstack
13791 +#endif
13792 +.endm
13793 +
13794 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13795 +/*
13796 + * r11: thread_info
13797 + * rcx, rdx: can be clobbered
13798 + */
13799 +ENTRY(pax_erase_kstack)
13800 + pushq %rdi
13801 + pushq %rax
13802 + pushq %r11
13803 +
13804 + GET_THREAD_INFO(%r11)
13805 + mov TI_lowest_stack(%r11), %rdi
13806 + mov $-0xBEEF, %rax
13807 + std
13808 +
13809 +1: mov %edi, %ecx
13810 + and $THREAD_SIZE_asm - 1, %ecx
13811 + shr $3, %ecx
13812 + repne scasq
13813 + jecxz 2f
13814 +
13815 + cmp $2*8, %ecx
13816 + jc 2f
13817 +
13818 + mov $2*8, %ecx
13819 + repe scasq
13820 + jecxz 2f
13821 + jne 1b
13822 +
13823 +2: cld
13824 + mov %esp, %ecx
13825 + sub %edi, %ecx
13826 +
13827 + cmp $THREAD_SIZE_asm, %rcx
13828 + jb 3f
13829 + ud2
13830 +3:
13831 +
13832 + shr $3, %ecx
13833 + rep stosq
13834 +
13835 + mov TI_task_thread_sp0(%r11), %rdi
13836 + sub $256, %rdi
13837 + mov %rdi, TI_lowest_stack(%r11)
13838 +
13839 + popq %r11
13840 + popq %rax
13841 + popq %rdi
13842 + pax_force_retaddr
13843 + ret
13844 +ENDPROC(pax_erase_kstack)
13845 +#endif
13846
13847 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
13848 #ifdef CONFIG_TRACE_IRQFLAGS
13849 @@ -231,8 +515,8 @@ ENDPROC(native_usergs_sysret64)
13850 .endm
13851
13852 .macro UNFAKE_STACK_FRAME
13853 - addq $8*6, %rsp
13854 - CFI_ADJUST_CFA_OFFSET -(6*8)
13855 + addq $8*6 + ARG_SKIP, %rsp
13856 + CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
13857 .endm
13858
13859 /*
13860 @@ -319,7 +603,7 @@ ENDPROC(native_usergs_sysret64)
13861 movq %rsp, %rsi
13862
13863 leaq -RBP(%rsp),%rdi /* arg1 for handler */
13864 - testl $3, CS(%rdi)
13865 + testb $3, CS(%rdi)
13866 je 1f
13867 SWAPGS
13868 /*
13869 @@ -355,9 +639,10 @@ ENTRY(save_rest)
13870 movq_cfi r15, R15+16
13871 movq %r11, 8(%rsp) /* return address */
13872 FIXUP_TOP_OF_STACK %r11, 16
13873 + pax_force_retaddr
13874 ret
13875 CFI_ENDPROC
13876 -END(save_rest)
13877 +ENDPROC(save_rest)
13878
13879 /* save complete stack frame */
13880 .pushsection .kprobes.text, "ax"
13881 @@ -386,9 +671,10 @@ ENTRY(save_paranoid)
13882 js 1f /* negative -> in kernel */
13883 SWAPGS
13884 xorl %ebx,%ebx
13885 -1: ret
13886 +1: pax_force_retaddr_bts
13887 + ret
13888 CFI_ENDPROC
13889 -END(save_paranoid)
13890 +ENDPROC(save_paranoid)
13891 .popsection
13892
13893 /*
13894 @@ -410,7 +696,7 @@ ENTRY(ret_from_fork)
13895
13896 RESTORE_REST
13897
13898 - testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
13899 + testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
13900 je int_ret_from_sys_call
13901
13902 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
13903 @@ -420,7 +706,7 @@ ENTRY(ret_from_fork)
13904 jmp ret_from_sys_call # go to the SYSRET fastpath
13905
13906 CFI_ENDPROC
13907 -END(ret_from_fork)
13908 +ENDPROC(ret_from_fork)
13909
13910 /*
13911 * System call entry. Up to 6 arguments in registers are supported.
13912 @@ -456,7 +742,7 @@ END(ret_from_fork)
13913 ENTRY(system_call)
13914 CFI_STARTPROC simple
13915 CFI_SIGNAL_FRAME
13916 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
13917 + CFI_DEF_CFA rsp,0
13918 CFI_REGISTER rip,rcx
13919 /*CFI_REGISTER rflags,r11*/
13920 SWAPGS_UNSAFE_STACK
13921 @@ -469,12 +755,13 @@ ENTRY(system_call_after_swapgs)
13922
13923 movq %rsp,PER_CPU_VAR(old_rsp)
13924 movq PER_CPU_VAR(kernel_stack),%rsp
13925 + SAVE_ARGS 8*6,0
13926 + pax_enter_kernel_user
13927 /*
13928 * No need to follow this irqs off/on section - it's straight
13929 * and short:
13930 */
13931 ENABLE_INTERRUPTS(CLBR_NONE)
13932 - SAVE_ARGS 8,0
13933 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
13934 movq %rcx,RIP-ARGOFFSET(%rsp)
13935 CFI_REL_OFFSET rip,RIP-ARGOFFSET
13936 @@ -484,7 +771,7 @@ ENTRY(system_call_after_swapgs)
13937 system_call_fastpath:
13938 cmpq $__NR_syscall_max,%rax
13939 ja badsys
13940 - movq %r10,%rcx
13941 + movq R10-ARGOFFSET(%rsp),%rcx
13942 call *sys_call_table(,%rax,8) # XXX: rip relative
13943 movq %rax,RAX-ARGOFFSET(%rsp)
13944 /*
13945 @@ -503,6 +790,8 @@ sysret_check:
13946 andl %edi,%edx
13947 jnz sysret_careful
13948 CFI_REMEMBER_STATE
13949 + pax_exit_kernel_user
13950 + pax_erase_kstack
13951 /*
13952 * sysretq will re-enable interrupts:
13953 */
13954 @@ -554,14 +843,18 @@ badsys:
13955 * jump back to the normal fast path.
13956 */
13957 auditsys:
13958 - movq %r10,%r9 /* 6th arg: 4th syscall arg */
13959 + movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
13960 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
13961 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
13962 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
13963 movq %rax,%rsi /* 2nd arg: syscall number */
13964 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
13965 call audit_syscall_entry
13966 +
13967 + pax_erase_kstack
13968 +
13969 LOAD_ARGS 0 /* reload call-clobbered registers */
13970 + pax_set_fptr_mask
13971 jmp system_call_fastpath
13972
13973 /*
13974 @@ -591,16 +884,20 @@ tracesys:
13975 FIXUP_TOP_OF_STACK %rdi
13976 movq %rsp,%rdi
13977 call syscall_trace_enter
13978 +
13979 + pax_erase_kstack
13980 +
13981 /*
13982 * Reload arg registers from stack in case ptrace changed them.
13983 * We don't reload %rax because syscall_trace_enter() returned
13984 * the value it wants us to use in the table lookup.
13985 */
13986 LOAD_ARGS ARGOFFSET, 1
13987 + pax_set_fptr_mask
13988 RESTORE_REST
13989 cmpq $__NR_syscall_max,%rax
13990 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
13991 - movq %r10,%rcx /* fixup for C */
13992 + movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
13993 call *sys_call_table(,%rax,8)
13994 movq %rax,RAX-ARGOFFSET(%rsp)
13995 /* Use IRET because user could have changed frame */
13996 @@ -612,7 +909,7 @@ tracesys:
13997 GLOBAL(int_ret_from_sys_call)
13998 DISABLE_INTERRUPTS(CLBR_NONE)
13999 TRACE_IRQS_OFF
14000 - testl $3,CS-ARGOFFSET(%rsp)
14001 + testb $3,CS-ARGOFFSET(%rsp)
14002 je retint_restore_args
14003 movl $_TIF_ALLWORK_MASK,%edi
14004 /* edi: mask to check */
14005 @@ -669,7 +966,7 @@ int_restore_rest:
14006 TRACE_IRQS_OFF
14007 jmp int_with_check
14008 CFI_ENDPROC
14009 -END(system_call)
14010 +ENDPROC(system_call)
14011
14012 /*
14013 * Certain special system calls that need to save a complete full stack frame.
14014 @@ -685,7 +982,7 @@ ENTRY(\label)
14015 call \func
14016 jmp ptregscall_common
14017 CFI_ENDPROC
14018 -END(\label)
14019 +ENDPROC(\label)
14020 .endm
14021
14022 PTREGSCALL stub_clone, sys_clone, %r8
14023 @@ -703,9 +1000,10 @@ ENTRY(ptregscall_common)
14024 movq_cfi_restore R12+8, r12
14025 movq_cfi_restore RBP+8, rbp
14026 movq_cfi_restore RBX+8, rbx
14027 + pax_force_retaddr
14028 ret $REST_SKIP /* pop extended registers */
14029 CFI_ENDPROC
14030 -END(ptregscall_common)
14031 +ENDPROC(ptregscall_common)
14032
14033 ENTRY(stub_execve)
14034 CFI_STARTPROC
14035 @@ -720,7 +1018,7 @@ ENTRY(stub_execve)
14036 RESTORE_REST
14037 jmp int_ret_from_sys_call
14038 CFI_ENDPROC
14039 -END(stub_execve)
14040 +ENDPROC(stub_execve)
14041
14042 /*
14043 * sigreturn is special because it needs to restore all registers on return.
14044 @@ -738,7 +1036,7 @@ ENTRY(stub_rt_sigreturn)
14045 RESTORE_REST
14046 jmp int_ret_from_sys_call
14047 CFI_ENDPROC
14048 -END(stub_rt_sigreturn)
14049 +ENDPROC(stub_rt_sigreturn)
14050
14051 /*
14052 * Build the entry stubs and pointer table with some assembler magic.
14053 @@ -773,7 +1071,7 @@ vector=vector+1
14054 2: jmp common_interrupt
14055 .endr
14056 CFI_ENDPROC
14057 -END(irq_entries_start)
14058 +ENDPROC(irq_entries_start)
14059
14060 .previous
14061 END(interrupt)
14062 @@ -793,6 +1091,16 @@ END(interrupt)
14063 subq $ORIG_RAX-RBP, %rsp
14064 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
14065 SAVE_ARGS_IRQ
14066 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14067 + testb $3, CS(%rdi)
14068 + jnz 1f
14069 + pax_enter_kernel
14070 + jmp 2f
14071 +1: pax_enter_kernel_user
14072 +2:
14073 +#else
14074 + pax_enter_kernel
14075 +#endif
14076 call \func
14077 .endm
14078
14079 @@ -824,7 +1132,7 @@ ret_from_intr:
14080
14081 exit_intr:
14082 GET_THREAD_INFO(%rcx)
14083 - testl $3,CS-ARGOFFSET(%rsp)
14084 + testb $3,CS-ARGOFFSET(%rsp)
14085 je retint_kernel
14086
14087 /* Interrupt came from user space */
14088 @@ -846,12 +1154,16 @@ retint_swapgs: /* return to user-space */
14089 * The iretq could re-enable interrupts:
14090 */
14091 DISABLE_INTERRUPTS(CLBR_ANY)
14092 + pax_exit_kernel_user
14093 + pax_erase_kstack
14094 TRACE_IRQS_IRETQ
14095 SWAPGS
14096 jmp restore_args
14097
14098 retint_restore_args: /* return to kernel space */
14099 DISABLE_INTERRUPTS(CLBR_ANY)
14100 + pax_exit_kernel
14101 + pax_force_retaddr RIP-ARGOFFSET
14102 /*
14103 * The iretq could re-enable interrupts:
14104 */
14105 @@ -940,7 +1252,7 @@ ENTRY(retint_kernel)
14106 #endif
14107
14108 CFI_ENDPROC
14109 -END(common_interrupt)
14110 +ENDPROC(common_interrupt)
14111 /*
14112 * End of kprobes section
14113 */
14114 @@ -956,7 +1268,7 @@ ENTRY(\sym)
14115 interrupt \do_sym
14116 jmp ret_from_intr
14117 CFI_ENDPROC
14118 -END(\sym)
14119 +ENDPROC(\sym)
14120 .endm
14121
14122 #ifdef CONFIG_SMP
14123 @@ -1021,12 +1333,22 @@ ENTRY(\sym)
14124 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14125 call error_entry
14126 DEFAULT_FRAME 0
14127 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14128 + testb $3, CS(%rsp)
14129 + jnz 1f
14130 + pax_enter_kernel
14131 + jmp 2f
14132 +1: pax_enter_kernel_user
14133 +2:
14134 +#else
14135 + pax_enter_kernel
14136 +#endif
14137 movq %rsp,%rdi /* pt_regs pointer */
14138 xorl %esi,%esi /* no error code */
14139 call \do_sym
14140 jmp error_exit /* %ebx: no swapgs flag */
14141 CFI_ENDPROC
14142 -END(\sym)
14143 +ENDPROC(\sym)
14144 .endm
14145
14146 .macro paranoidzeroentry sym do_sym
14147 @@ -1038,15 +1360,25 @@ ENTRY(\sym)
14148 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14149 call save_paranoid
14150 TRACE_IRQS_OFF
14151 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14152 + testb $3, CS(%rsp)
14153 + jnz 1f
14154 + pax_enter_kernel
14155 + jmp 2f
14156 +1: pax_enter_kernel_user
14157 +2:
14158 +#else
14159 + pax_enter_kernel
14160 +#endif
14161 movq %rsp,%rdi /* pt_regs pointer */
14162 xorl %esi,%esi /* no error code */
14163 call \do_sym
14164 jmp paranoid_exit /* %ebx: no swapgs flag */
14165 CFI_ENDPROC
14166 -END(\sym)
14167 +ENDPROC(\sym)
14168 .endm
14169
14170 -#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
14171 +#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
14172 .macro paranoidzeroentry_ist sym do_sym ist
14173 ENTRY(\sym)
14174 INTR_FRAME
14175 @@ -1056,14 +1388,30 @@ ENTRY(\sym)
14176 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14177 call save_paranoid
14178 TRACE_IRQS_OFF
14179 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14180 + testb $3, CS(%rsp)
14181 + jnz 1f
14182 + pax_enter_kernel
14183 + jmp 2f
14184 +1: pax_enter_kernel_user
14185 +2:
14186 +#else
14187 + pax_enter_kernel
14188 +#endif
14189 movq %rsp,%rdi /* pt_regs pointer */
14190 xorl %esi,%esi /* no error code */
14191 +#ifdef CONFIG_SMP
14192 + imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
14193 + lea init_tss(%r12), %r12
14194 +#else
14195 + lea init_tss(%rip), %r12
14196 +#endif
14197 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
14198 call \do_sym
14199 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
14200 jmp paranoid_exit /* %ebx: no swapgs flag */
14201 CFI_ENDPROC
14202 -END(\sym)
14203 +ENDPROC(\sym)
14204 .endm
14205
14206 .macro errorentry sym do_sym
14207 @@ -1074,13 +1422,23 @@ ENTRY(\sym)
14208 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14209 call error_entry
14210 DEFAULT_FRAME 0
14211 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14212 + testb $3, CS(%rsp)
14213 + jnz 1f
14214 + pax_enter_kernel
14215 + jmp 2f
14216 +1: pax_enter_kernel_user
14217 +2:
14218 +#else
14219 + pax_enter_kernel
14220 +#endif
14221 movq %rsp,%rdi /* pt_regs pointer */
14222 movq ORIG_RAX(%rsp),%rsi /* get error code */
14223 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
14224 call \do_sym
14225 jmp error_exit /* %ebx: no swapgs flag */
14226 CFI_ENDPROC
14227 -END(\sym)
14228 +ENDPROC(\sym)
14229 .endm
14230
14231 /* error code is on the stack already */
14232 @@ -1093,13 +1451,23 @@ ENTRY(\sym)
14233 call save_paranoid
14234 DEFAULT_FRAME 0
14235 TRACE_IRQS_OFF
14236 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14237 + testb $3, CS(%rsp)
14238 + jnz 1f
14239 + pax_enter_kernel
14240 + jmp 2f
14241 +1: pax_enter_kernel_user
14242 +2:
14243 +#else
14244 + pax_enter_kernel
14245 +#endif
14246 movq %rsp,%rdi /* pt_regs pointer */
14247 movq ORIG_RAX(%rsp),%rsi /* get error code */
14248 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
14249 call \do_sym
14250 jmp paranoid_exit /* %ebx: no swapgs flag */
14251 CFI_ENDPROC
14252 -END(\sym)
14253 +ENDPROC(\sym)
14254 .endm
14255
14256 zeroentry divide_error do_divide_error
14257 @@ -1129,9 +1497,10 @@ gs_change:
14258 2: mfence /* workaround */
14259 SWAPGS
14260 popfq_cfi
14261 + pax_force_retaddr
14262 ret
14263 CFI_ENDPROC
14264 -END(native_load_gs_index)
14265 +ENDPROC(native_load_gs_index)
14266
14267 .section __ex_table,"a"
14268 .align 8
14269 @@ -1153,13 +1522,14 @@ ENTRY(kernel_thread_helper)
14270 * Here we are in the child and the registers are set as they were
14271 * at kernel_thread() invocation in the parent.
14272 */
14273 + pax_force_fptr %rsi
14274 call *%rsi
14275 # exit
14276 mov %eax, %edi
14277 call do_exit
14278 ud2 # padding for call trace
14279 CFI_ENDPROC
14280 -END(kernel_thread_helper)
14281 +ENDPROC(kernel_thread_helper)
14282
14283 /*
14284 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
14285 @@ -1186,11 +1556,11 @@ ENTRY(kernel_execve)
14286 RESTORE_REST
14287 testq %rax,%rax
14288 je int_ret_from_sys_call
14289 - RESTORE_ARGS
14290 UNFAKE_STACK_FRAME
14291 + pax_force_retaddr
14292 ret
14293 CFI_ENDPROC
14294 -END(kernel_execve)
14295 +ENDPROC(kernel_execve)
14296
14297 /* Call softirq on interrupt stack. Interrupts are off. */
14298 ENTRY(call_softirq)
14299 @@ -1208,9 +1578,10 @@ ENTRY(call_softirq)
14300 CFI_DEF_CFA_REGISTER rsp
14301 CFI_ADJUST_CFA_OFFSET -8
14302 decl PER_CPU_VAR(irq_count)
14303 + pax_force_retaddr
14304 ret
14305 CFI_ENDPROC
14306 -END(call_softirq)
14307 +ENDPROC(call_softirq)
14308
14309 #ifdef CONFIG_XEN
14310 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
14311 @@ -1248,7 +1619,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
14312 decl PER_CPU_VAR(irq_count)
14313 jmp error_exit
14314 CFI_ENDPROC
14315 -END(xen_do_hypervisor_callback)
14316 +ENDPROC(xen_do_hypervisor_callback)
14317
14318 /*
14319 * Hypervisor uses this for application faults while it executes.
14320 @@ -1307,7 +1678,7 @@ ENTRY(xen_failsafe_callback)
14321 SAVE_ALL
14322 jmp error_exit
14323 CFI_ENDPROC
14324 -END(xen_failsafe_callback)
14325 +ENDPROC(xen_failsafe_callback)
14326
14327 apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
14328 xen_hvm_callback_vector xen_evtchn_do_upcall
14329 @@ -1356,16 +1727,31 @@ ENTRY(paranoid_exit)
14330 TRACE_IRQS_OFF
14331 testl %ebx,%ebx /* swapgs needed? */
14332 jnz paranoid_restore
14333 - testl $3,CS(%rsp)
14334 + testb $3,CS(%rsp)
14335 jnz paranoid_userspace
14336 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14337 + pax_exit_kernel
14338 + TRACE_IRQS_IRETQ 0
14339 + SWAPGS_UNSAFE_STACK
14340 + RESTORE_ALL 8
14341 + pax_force_retaddr_bts
14342 + jmp irq_return
14343 +#endif
14344 paranoid_swapgs:
14345 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14346 + pax_exit_kernel_user
14347 +#else
14348 + pax_exit_kernel
14349 +#endif
14350 TRACE_IRQS_IRETQ 0
14351 SWAPGS_UNSAFE_STACK
14352 RESTORE_ALL 8
14353 jmp irq_return
14354 paranoid_restore:
14355 + pax_exit_kernel
14356 TRACE_IRQS_IRETQ 0
14357 RESTORE_ALL 8
14358 + pax_force_retaddr_bts
14359 jmp irq_return
14360 paranoid_userspace:
14361 GET_THREAD_INFO(%rcx)
14362 @@ -1394,7 +1780,7 @@ paranoid_schedule:
14363 TRACE_IRQS_OFF
14364 jmp paranoid_userspace
14365 CFI_ENDPROC
14366 -END(paranoid_exit)
14367 +ENDPROC(paranoid_exit)
14368
14369 /*
14370 * Exception entry point. This expects an error code/orig_rax on the stack.
14371 @@ -1421,12 +1807,13 @@ ENTRY(error_entry)
14372 movq_cfi r14, R14+8
14373 movq_cfi r15, R15+8
14374 xorl %ebx,%ebx
14375 - testl $3,CS+8(%rsp)
14376 + testb $3,CS+8(%rsp)
14377 je error_kernelspace
14378 error_swapgs:
14379 SWAPGS
14380 error_sti:
14381 TRACE_IRQS_OFF
14382 + pax_force_retaddr_bts
14383 ret
14384
14385 /*
14386 @@ -1453,7 +1840,7 @@ bstep_iret:
14387 movq %rcx,RIP+8(%rsp)
14388 jmp error_swapgs
14389 CFI_ENDPROC
14390 -END(error_entry)
14391 +ENDPROC(error_entry)
14392
14393
14394 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
14395 @@ -1473,7 +1860,7 @@ ENTRY(error_exit)
14396 jnz retint_careful
14397 jmp retint_swapgs
14398 CFI_ENDPROC
14399 -END(error_exit)
14400 +ENDPROC(error_exit)
14401
14402
14403 /* runs on exception stack */
14404 @@ -1485,6 +1872,16 @@ ENTRY(nmi)
14405 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14406 call save_paranoid
14407 DEFAULT_FRAME 0
14408 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14409 + testb $3, CS(%rsp)
14410 + jnz 1f
14411 + pax_enter_kernel
14412 + jmp 2f
14413 +1: pax_enter_kernel_user
14414 +2:
14415 +#else
14416 + pax_enter_kernel
14417 +#endif
14418 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
14419 movq %rsp,%rdi
14420 movq $-1,%rsi
14421 @@ -1495,12 +1892,28 @@ ENTRY(nmi)
14422 DISABLE_INTERRUPTS(CLBR_NONE)
14423 testl %ebx,%ebx /* swapgs needed? */
14424 jnz nmi_restore
14425 - testl $3,CS(%rsp)
14426 + testb $3,CS(%rsp)
14427 jnz nmi_userspace
14428 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14429 + pax_exit_kernel
14430 + SWAPGS_UNSAFE_STACK
14431 + RESTORE_ALL 8
14432 + pax_force_retaddr_bts
14433 + jmp irq_return
14434 +#endif
14435 nmi_swapgs:
14436 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14437 + pax_exit_kernel_user
14438 +#else
14439 + pax_exit_kernel
14440 +#endif
14441 SWAPGS_UNSAFE_STACK
14442 + RESTORE_ALL 8
14443 + jmp irq_return
14444 nmi_restore:
14445 + pax_exit_kernel
14446 RESTORE_ALL 8
14447 + pax_force_retaddr_bts
14448 jmp irq_return
14449 nmi_userspace:
14450 GET_THREAD_INFO(%rcx)
14451 @@ -1529,14 +1942,14 @@ nmi_schedule:
14452 jmp paranoid_exit
14453 CFI_ENDPROC
14454 #endif
14455 -END(nmi)
14456 +ENDPROC(nmi)
14457
14458 ENTRY(ignore_sysret)
14459 CFI_STARTPROC
14460 mov $-ENOSYS,%eax
14461 sysret
14462 CFI_ENDPROC
14463 -END(ignore_sysret)
14464 +ENDPROC(ignore_sysret)
14465
14466 /*
14467 * End of kprobes section
14468 diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
14469 index c9a281f..ce2f317 100644
14470 --- a/arch/x86/kernel/ftrace.c
14471 +++ b/arch/x86/kernel/ftrace.c
14472 @@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the IP to write to */
14473 static const void *mod_code_newcode; /* holds the text to write to the IP */
14474
14475 static unsigned nmi_wait_count;
14476 -static atomic_t nmi_update_count = ATOMIC_INIT(0);
14477 +static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
14478
14479 int ftrace_arch_read_dyn_info(char *buf, int size)
14480 {
14481 @@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf, int size)
14482
14483 r = snprintf(buf, size, "%u %u",
14484 nmi_wait_count,
14485 - atomic_read(&nmi_update_count));
14486 + atomic_read_unchecked(&nmi_update_count));
14487 return r;
14488 }
14489
14490 @@ -177,8 +177,10 @@ void ftrace_nmi_enter(void)
14491
14492 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
14493 smp_rmb();
14494 + pax_open_kernel();
14495 ftrace_mod_code();
14496 - atomic_inc(&nmi_update_count);
14497 + pax_close_kernel();
14498 + atomic_inc_unchecked(&nmi_update_count);
14499 }
14500 /* Must have previous changes seen before executions */
14501 smp_mb();
14502 @@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
14503 {
14504 unsigned char replaced[MCOUNT_INSN_SIZE];
14505
14506 + ip = ktla_ktva(ip);
14507 +
14508 /*
14509 * Note: Due to modules and __init, code can
14510 * disappear and change, we need to protect against faulting
14511 @@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
14512 unsigned char old[MCOUNT_INSN_SIZE], *new;
14513 int ret;
14514
14515 - memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
14516 + memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
14517 new = ftrace_call_replace(ip, (unsigned long)func);
14518 ret = ftrace_modify_code(ip, old, new);
14519
14520 @@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long ip,
14521 {
14522 unsigned char code[MCOUNT_INSN_SIZE];
14523
14524 + ip = ktla_ktva(ip);
14525 +
14526 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
14527 return -EFAULT;
14528
14529 diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
14530 index 3bb0850..55a56f4 100644
14531 --- a/arch/x86/kernel/head32.c
14532 +++ b/arch/x86/kernel/head32.c
14533 @@ -19,6 +19,7 @@
14534 #include <asm/io_apic.h>
14535 #include <asm/bios_ebda.h>
14536 #include <asm/tlbflush.h>
14537 +#include <asm/boot.h>
14538
14539 static void __init i386_default_early_setup(void)
14540 {
14541 @@ -33,7 +34,7 @@ void __init i386_start_kernel(void)
14542 {
14543 memblock_init();
14544
14545 - memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
14546 + memblock_x86_reserve_range(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
14547
14548 #ifdef CONFIG_BLK_DEV_INITRD
14549 /* Reserve INITRD */
14550 diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
14551 index ce0be7c..c41476e 100644
14552 --- a/arch/x86/kernel/head_32.S
14553 +++ b/arch/x86/kernel/head_32.S
14554 @@ -25,6 +25,12 @@
14555 /* Physical address */
14556 #define pa(X) ((X) - __PAGE_OFFSET)
14557
14558 +#ifdef CONFIG_PAX_KERNEXEC
14559 +#define ta(X) (X)
14560 +#else
14561 +#define ta(X) ((X) - __PAGE_OFFSET)
14562 +#endif
14563 +
14564 /*
14565 * References to members of the new_cpu_data structure.
14566 */
14567 @@ -54,11 +60,7 @@
14568 * and small than max_low_pfn, otherwise will waste some page table entries
14569 */
14570
14571 -#if PTRS_PER_PMD > 1
14572 -#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
14573 -#else
14574 -#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
14575 -#endif
14576 +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
14577
14578 /* Number of possible pages in the lowmem region */
14579 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
14580 @@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
14581 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
14582
14583 /*
14584 + * Real beginning of normal "text" segment
14585 + */
14586 +ENTRY(stext)
14587 +ENTRY(_stext)
14588 +
14589 +/*
14590 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
14591 * %esi points to the real-mode code as a 32-bit pointer.
14592 * CS and DS must be 4 GB flat segments, but we don't depend on
14593 @@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
14594 * can.
14595 */
14596 __HEAD
14597 +
14598 +#ifdef CONFIG_PAX_KERNEXEC
14599 + jmp startup_32
14600 +/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
14601 +.fill PAGE_SIZE-5,1,0xcc
14602 +#endif
14603 +
14604 ENTRY(startup_32)
14605 movl pa(stack_start),%ecx
14606
14607 @@ -105,6 +120,57 @@ ENTRY(startup_32)
14608 2:
14609 leal -__PAGE_OFFSET(%ecx),%esp
14610
14611 +#ifdef CONFIG_SMP
14612 + movl $pa(cpu_gdt_table),%edi
14613 + movl $__per_cpu_load,%eax
14614 + movw %ax,__KERNEL_PERCPU + 2(%edi)
14615 + rorl $16,%eax
14616 + movb %al,__KERNEL_PERCPU + 4(%edi)
14617 + movb %ah,__KERNEL_PERCPU + 7(%edi)
14618 + movl $__per_cpu_end - 1,%eax
14619 + subl $__per_cpu_start,%eax
14620 + movw %ax,__KERNEL_PERCPU + 0(%edi)
14621 +#endif
14622 +
14623 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14624 + movl $NR_CPUS,%ecx
14625 + movl $pa(cpu_gdt_table),%edi
14626 +1:
14627 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
14628 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
14629 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
14630 + addl $PAGE_SIZE_asm,%edi
14631 + loop 1b
14632 +#endif
14633 +
14634 +#ifdef CONFIG_PAX_KERNEXEC
14635 + movl $pa(boot_gdt),%edi
14636 + movl $__LOAD_PHYSICAL_ADDR,%eax
14637 + movw %ax,__BOOT_CS + 2(%edi)
14638 + rorl $16,%eax
14639 + movb %al,__BOOT_CS + 4(%edi)
14640 + movb %ah,__BOOT_CS + 7(%edi)
14641 + rorl $16,%eax
14642 +
14643 + ljmp $(__BOOT_CS),$1f
14644 +1:
14645 +
14646 + movl $NR_CPUS,%ecx
14647 + movl $pa(cpu_gdt_table),%edi
14648 + addl $__PAGE_OFFSET,%eax
14649 +1:
14650 + movw %ax,__KERNEL_CS + 2(%edi)
14651 + movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
14652 + rorl $16,%eax
14653 + movb %al,__KERNEL_CS + 4(%edi)
14654 + movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
14655 + movb %ah,__KERNEL_CS + 7(%edi)
14656 + movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
14657 + rorl $16,%eax
14658 + addl $PAGE_SIZE_asm,%edi
14659 + loop 1b
14660 +#endif
14661 +
14662 /*
14663 * Clear BSS first so that there are no surprises...
14664 */
14665 @@ -195,8 +261,11 @@ ENTRY(startup_32)
14666 movl %eax, pa(max_pfn_mapped)
14667
14668 /* Do early initialization of the fixmap area */
14669 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
14670 - movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
14671 +#ifdef CONFIG_COMPAT_VDSO
14672 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
14673 +#else
14674 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
14675 +#endif
14676 #else /* Not PAE */
14677
14678 page_pde_offset = (__PAGE_OFFSET >> 20);
14679 @@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
14680 movl %eax, pa(max_pfn_mapped)
14681
14682 /* Do early initialization of the fixmap area */
14683 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
14684 - movl %eax,pa(initial_page_table+0xffc)
14685 +#ifdef CONFIG_COMPAT_VDSO
14686 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
14687 +#else
14688 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
14689 +#endif
14690 #endif
14691
14692 #ifdef CONFIG_PARAVIRT
14693 @@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
14694 cmpl $num_subarch_entries, %eax
14695 jae bad_subarch
14696
14697 - movl pa(subarch_entries)(,%eax,4), %eax
14698 - subl $__PAGE_OFFSET, %eax
14699 - jmp *%eax
14700 + jmp *pa(subarch_entries)(,%eax,4)
14701
14702 bad_subarch:
14703 WEAK(lguest_entry)
14704 @@ -255,10 +325,10 @@ WEAK(xen_entry)
14705 __INITDATA
14706
14707 subarch_entries:
14708 - .long default_entry /* normal x86/PC */
14709 - .long lguest_entry /* lguest hypervisor */
14710 - .long xen_entry /* Xen hypervisor */
14711 - .long default_entry /* Moorestown MID */
14712 + .long ta(default_entry) /* normal x86/PC */
14713 + .long ta(lguest_entry) /* lguest hypervisor */
14714 + .long ta(xen_entry) /* Xen hypervisor */
14715 + .long ta(default_entry) /* Moorestown MID */
14716 num_subarch_entries = (. - subarch_entries) / 4
14717 .previous
14718 #else
14719 @@ -312,6 +382,7 @@ default_entry:
14720 orl %edx,%eax
14721 movl %eax,%cr4
14722
14723 +#ifdef CONFIG_X86_PAE
14724 testb $X86_CR4_PAE, %al # check if PAE is enabled
14725 jz 6f
14726
14727 @@ -340,6 +411,9 @@ default_entry:
14728 /* Make changes effective */
14729 wrmsr
14730
14731 + btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
14732 +#endif
14733 +
14734 6:
14735
14736 /*
14737 @@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP
14738 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
14739 movl %eax,%ss # after changing gdt.
14740
14741 - movl $(__USER_DS),%eax # DS/ES contains default USER segment
14742 +# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
14743 movl %eax,%ds
14744 movl %eax,%es
14745
14746 @@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP
14747 */
14748 cmpb $0,ready
14749 jne 1f
14750 - movl $gdt_page,%eax
14751 + movl $cpu_gdt_table,%eax
14752 movl $stack_canary,%ecx
14753 +#ifdef CONFIG_SMP
14754 + addl $__per_cpu_load,%ecx
14755 +#endif
14756 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
14757 shrl $16, %ecx
14758 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
14759 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
14760 1:
14761 -#endif
14762 movl $(__KERNEL_STACK_CANARY),%eax
14763 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
14764 + movl $(__USER_DS),%eax
14765 +#else
14766 + xorl %eax,%eax
14767 +#endif
14768 movl %eax,%gs
14769
14770 xorl %eax,%eax # Clear LDT
14771 @@ -558,22 +639,22 @@ early_page_fault:
14772 jmp early_fault
14773
14774 early_fault:
14775 - cld
14776 #ifdef CONFIG_PRINTK
14777 + cmpl $1,%ss:early_recursion_flag
14778 + je hlt_loop
14779 + incl %ss:early_recursion_flag
14780 + cld
14781 pusha
14782 movl $(__KERNEL_DS),%eax
14783 movl %eax,%ds
14784 movl %eax,%es
14785 - cmpl $2,early_recursion_flag
14786 - je hlt_loop
14787 - incl early_recursion_flag
14788 movl %cr2,%eax
14789 pushl %eax
14790 pushl %edx /* trapno */
14791 pushl $fault_msg
14792 call printk
14793 +; call dump_stack
14794 #endif
14795 - call dump_stack
14796 hlt_loop:
14797 hlt
14798 jmp hlt_loop
14799 @@ -581,8 +662,11 @@ hlt_loop:
14800 /* This is the default interrupt "handler" :-) */
14801 ALIGN
14802 ignore_int:
14803 - cld
14804 #ifdef CONFIG_PRINTK
14805 + cmpl $2,%ss:early_recursion_flag
14806 + je hlt_loop
14807 + incl %ss:early_recursion_flag
14808 + cld
14809 pushl %eax
14810 pushl %ecx
14811 pushl %edx
14812 @@ -591,9 +675,6 @@ ignore_int:
14813 movl $(__KERNEL_DS),%eax
14814 movl %eax,%ds
14815 movl %eax,%es
14816 - cmpl $2,early_recursion_flag
14817 - je hlt_loop
14818 - incl early_recursion_flag
14819 pushl 16(%esp)
14820 pushl 24(%esp)
14821 pushl 32(%esp)
14822 @@ -622,29 +703,43 @@ ENTRY(initial_code)
14823 /*
14824 * BSS section
14825 */
14826 -__PAGE_ALIGNED_BSS
14827 - .align PAGE_SIZE
14828 #ifdef CONFIG_X86_PAE
14829 +.section .initial_pg_pmd,"a",@progbits
14830 initial_pg_pmd:
14831 .fill 1024*KPMDS,4,0
14832 #else
14833 +.section .initial_page_table,"a",@progbits
14834 ENTRY(initial_page_table)
14835 .fill 1024,4,0
14836 #endif
14837 +.section .initial_pg_fixmap,"a",@progbits
14838 initial_pg_fixmap:
14839 .fill 1024,4,0
14840 +.section .empty_zero_page,"a",@progbits
14841 ENTRY(empty_zero_page)
14842 .fill 4096,1,0
14843 +.section .swapper_pg_dir,"a",@progbits
14844 ENTRY(swapper_pg_dir)
14845 +#ifdef CONFIG_X86_PAE
14846 + .fill 4,8,0
14847 +#else
14848 .fill 1024,4,0
14849 +#endif
14850 +
14851 +/*
14852 + * The IDT has to be page-aligned to simplify the Pentium
14853 + * F0 0F bug workaround.. We have a special link segment
14854 + * for this.
14855 + */
14856 +.section .idt,"a",@progbits
14857 +ENTRY(idt_table)
14858 + .fill 256,8,0
14859
14860 /*
14861 * This starts the data section.
14862 */
14863 #ifdef CONFIG_X86_PAE
14864 -__PAGE_ALIGNED_DATA
14865 - /* Page-aligned for the benefit of paravirt? */
14866 - .align PAGE_SIZE
14867 +.section .initial_page_table,"a",@progbits
14868 ENTRY(initial_page_table)
14869 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
14870 # if KPMDS == 3
14871 @@ -663,18 +758,27 @@ ENTRY(initial_page_table)
14872 # error "Kernel PMDs should be 1, 2 or 3"
14873 # endif
14874 .align PAGE_SIZE /* needs to be page-sized too */
14875 +
14876 +#ifdef CONFIG_PAX_PER_CPU_PGD
14877 +ENTRY(cpu_pgd)
14878 + .rept NR_CPUS
14879 + .fill 4,8,0
14880 + .endr
14881 +#endif
14882 +
14883 #endif
14884
14885 .data
14886 .balign 4
14887 ENTRY(stack_start)
14888 - .long init_thread_union+THREAD_SIZE
14889 + .long init_thread_union+THREAD_SIZE-8
14890
14891 +ready: .byte 0
14892 +
14893 +.section .rodata,"a",@progbits
14894 early_recursion_flag:
14895 .long 0
14896
14897 -ready: .byte 0
14898 -
14899 int_msg:
14900 .asciz "Unknown interrupt or fault at: %p %p %p\n"
14901
14902 @@ -707,7 +811,7 @@ fault_msg:
14903 .word 0 # 32 bit align gdt_desc.address
14904 boot_gdt_descr:
14905 .word __BOOT_DS+7
14906 - .long boot_gdt - __PAGE_OFFSET
14907 + .long pa(boot_gdt)
14908
14909 .word 0 # 32-bit align idt_desc.address
14910 idt_descr:
14911 @@ -718,7 +822,7 @@ idt_descr:
14912 .word 0 # 32 bit align gdt_desc.address
14913 ENTRY(early_gdt_descr)
14914 .word GDT_ENTRIES*8-1
14915 - .long gdt_page /* Overwritten for secondary CPUs */
14916 + .long cpu_gdt_table /* Overwritten for secondary CPUs */
14917
14918 /*
14919 * The boot_gdt must mirror the equivalent in setup.S and is
14920 @@ -727,5 +831,65 @@ ENTRY(early_gdt_descr)
14921 .align L1_CACHE_BYTES
14922 ENTRY(boot_gdt)
14923 .fill GDT_ENTRY_BOOT_CS,8,0
14924 - .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
14925 - .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
14926 + .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
14927 + .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
14928 +
14929 + .align PAGE_SIZE_asm
14930 +ENTRY(cpu_gdt_table)
14931 + .rept NR_CPUS
14932 + .quad 0x0000000000000000 /* NULL descriptor */
14933 + .quad 0x0000000000000000 /* 0x0b reserved */
14934 + .quad 0x0000000000000000 /* 0x13 reserved */
14935 + .quad 0x0000000000000000 /* 0x1b reserved */
14936 +
14937 +#ifdef CONFIG_PAX_KERNEXEC
14938 + .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
14939 +#else
14940 + .quad 0x0000000000000000 /* 0x20 unused */
14941 +#endif
14942 +
14943 + .quad 0x0000000000000000 /* 0x28 unused */
14944 + .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
14945 + .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
14946 + .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
14947 + .quad 0x0000000000000000 /* 0x4b reserved */
14948 + .quad 0x0000000000000000 /* 0x53 reserved */
14949 + .quad 0x0000000000000000 /* 0x5b reserved */
14950 +
14951 + .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
14952 + .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
14953 + .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
14954 + .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
14955 +
14956 + .quad 0x0000000000000000 /* 0x80 TSS descriptor */
14957 + .quad 0x0000000000000000 /* 0x88 LDT descriptor */
14958 +
14959 + /*
14960 + * Segments used for calling PnP BIOS have byte granularity.
14961 + * The code segments and data segments have fixed 64k limits,
14962 + * the transfer segment sizes are set at run time.
14963 + */
14964 + .quad 0x00409b000000ffff /* 0x90 32-bit code */
14965 + .quad 0x00009b000000ffff /* 0x98 16-bit code */
14966 + .quad 0x000093000000ffff /* 0xa0 16-bit data */
14967 + .quad 0x0000930000000000 /* 0xa8 16-bit data */
14968 + .quad 0x0000930000000000 /* 0xb0 16-bit data */
14969 +
14970 + /*
14971 + * The APM segments have byte granularity and their bases
14972 + * are set at run time. All have 64k limits.
14973 + */
14974 + .quad 0x00409b000000ffff /* 0xb8 APM CS code */
14975 + .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
14976 + .quad 0x004093000000ffff /* 0xc8 APM DS data */
14977 +
14978 + .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
14979 + .quad 0x0040930000000000 /* 0xd8 - PERCPU */
14980 + .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
14981 + .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
14982 + .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
14983 + .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
14984 +
14985 + /* Be sure this is zeroed to avoid false validations in Xen */
14986 + .fill PAGE_SIZE_asm - GDT_SIZE,1,0
14987 + .endr
14988 diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
14989 index e11e394..9aebc5d 100644
14990 --- a/arch/x86/kernel/head_64.S
14991 +++ b/arch/x86/kernel/head_64.S
14992 @@ -19,6 +19,8 @@
14993 #include <asm/cache.h>
14994 #include <asm/processor-flags.h>
14995 #include <asm/percpu.h>
14996 +#include <asm/cpufeature.h>
14997 +#include <asm/alternative-asm.h>
14998
14999 #ifdef CONFIG_PARAVIRT
15000 #include <asm/asm-offsets.h>
15001 @@ -38,6 +40,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
15002 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
15003 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
15004 L3_START_KERNEL = pud_index(__START_KERNEL_map)
15005 +L4_VMALLOC_START = pgd_index(VMALLOC_START)
15006 +L3_VMALLOC_START = pud_index(VMALLOC_START)
15007 +L4_VMALLOC_END = pgd_index(VMALLOC_END)
15008 +L3_VMALLOC_END = pud_index(VMALLOC_END)
15009 +L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
15010 +L3_VMEMMAP_START = pud_index(VMEMMAP_START)
15011
15012 .text
15013 __HEAD
15014 @@ -85,35 +93,23 @@ startup_64:
15015 */
15016 addq %rbp, init_level4_pgt + 0(%rip)
15017 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
15018 + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
15019 + addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
15020 + addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
15021 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
15022
15023 addq %rbp, level3_ident_pgt + 0(%rip)
15024 +#ifndef CONFIG_XEN
15025 + addq %rbp, level3_ident_pgt + 8(%rip)
15026 +#endif
15027
15028 - addq %rbp, level3_kernel_pgt + (510*8)(%rip)
15029 - addq %rbp, level3_kernel_pgt + (511*8)(%rip)
15030 + addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
15031 +
15032 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
15033 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
15034
15035 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
15036 -
15037 - /* Add an Identity mapping if I am above 1G */
15038 - leaq _text(%rip), %rdi
15039 - andq $PMD_PAGE_MASK, %rdi
15040 -
15041 - movq %rdi, %rax
15042 - shrq $PUD_SHIFT, %rax
15043 - andq $(PTRS_PER_PUD - 1), %rax
15044 - jz ident_complete
15045 -
15046 - leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
15047 - leaq level3_ident_pgt(%rip), %rbx
15048 - movq %rdx, 0(%rbx, %rax, 8)
15049 -
15050 - movq %rdi, %rax
15051 - shrq $PMD_SHIFT, %rax
15052 - andq $(PTRS_PER_PMD - 1), %rax
15053 - leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
15054 - leaq level2_spare_pgt(%rip), %rbx
15055 - movq %rdx, 0(%rbx, %rax, 8)
15056 -ident_complete:
15057 + addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
15058
15059 /*
15060 * Fixup the kernel text+data virtual addresses. Note that
15061 @@ -160,8 +156,8 @@ ENTRY(secondary_startup_64)
15062 * after the boot processor executes this code.
15063 */
15064
15065 - /* Enable PAE mode and PGE */
15066 - movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
15067 + /* Enable PAE mode and PSE/PGE */
15068 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
15069 movq %rax, %cr4
15070
15071 /* Setup early boot stage 4 level pagetables. */
15072 @@ -183,9 +179,17 @@ ENTRY(secondary_startup_64)
15073 movl $MSR_EFER, %ecx
15074 rdmsr
15075 btsl $_EFER_SCE, %eax /* Enable System Call */
15076 - btl $20,%edi /* No Execute supported? */
15077 + btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
15078 jnc 1f
15079 btsl $_EFER_NX, %eax
15080 + leaq init_level4_pgt(%rip), %rdi
15081 +#ifndef CONFIG_EFI
15082 + btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
15083 +#endif
15084 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
15085 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
15086 + btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
15087 + btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
15088 1: wrmsr /* Make changes effective */
15089
15090 /* Setup cr0 */
15091 @@ -247,6 +251,7 @@ ENTRY(secondary_startup_64)
15092 * jump. In addition we need to ensure %cs is set so we make this
15093 * a far return.
15094 */
15095 + pax_set_fptr_mask
15096 movq initial_code(%rip),%rax
15097 pushq $0 # fake return address to stop unwinder
15098 pushq $__KERNEL_CS # set correct cs
15099 @@ -269,7 +274,7 @@ ENTRY(secondary_startup_64)
15100 bad_address:
15101 jmp bad_address
15102
15103 - .section ".init.text","ax"
15104 + __INIT
15105 #ifdef CONFIG_EARLY_PRINTK
15106 .globl early_idt_handlers
15107 early_idt_handlers:
15108 @@ -314,18 +319,23 @@ ENTRY(early_idt_handler)
15109 #endif /* EARLY_PRINTK */
15110 1: hlt
15111 jmp 1b
15112 + .previous
15113
15114 #ifdef CONFIG_EARLY_PRINTK
15115 + __INITDATA
15116 early_recursion_flag:
15117 .long 0
15118 + .previous
15119
15120 + .section .rodata,"a",@progbits
15121 early_idt_msg:
15122 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
15123 early_idt_ripmsg:
15124 .asciz "RIP %s\n"
15125 + .previous
15126 #endif /* CONFIG_EARLY_PRINTK */
15127 - .previous
15128
15129 + .section .rodata,"a",@progbits
15130 #define NEXT_PAGE(name) \
15131 .balign PAGE_SIZE; \
15132 ENTRY(name)
15133 @@ -338,7 +348,6 @@ ENTRY(name)
15134 i = i + 1 ; \
15135 .endr
15136
15137 - .data
15138 /*
15139 * This default setting generates an ident mapping at address 0x100000
15140 * and a mapping for the kernel that precisely maps virtual address
15141 @@ -349,13 +358,41 @@ NEXT_PAGE(init_level4_pgt)
15142 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15143 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
15144 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15145 + .org init_level4_pgt + L4_VMALLOC_START*8, 0
15146 + .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
15147 + .org init_level4_pgt + L4_VMALLOC_END*8, 0
15148 + .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
15149 + .org init_level4_pgt + L4_VMEMMAP_START*8, 0
15150 + .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
15151 .org init_level4_pgt + L4_START_KERNEL*8, 0
15152 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
15153 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
15154
15155 +#ifdef CONFIG_PAX_PER_CPU_PGD
15156 +NEXT_PAGE(cpu_pgd)
15157 + .rept NR_CPUS
15158 + .fill 512,8,0
15159 + .endr
15160 +#endif
15161 +
15162 NEXT_PAGE(level3_ident_pgt)
15163 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15164 +#ifdef CONFIG_XEN
15165 .fill 511,8,0
15166 +#else
15167 + .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
15168 + .fill 510,8,0
15169 +#endif
15170 +
15171 +NEXT_PAGE(level3_vmalloc_start_pgt)
15172 + .fill 512,8,0
15173 +
15174 +NEXT_PAGE(level3_vmalloc_end_pgt)
15175 + .fill 512,8,0
15176 +
15177 +NEXT_PAGE(level3_vmemmap_pgt)
15178 + .fill L3_VMEMMAP_START,8,0
15179 + .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
15180
15181 NEXT_PAGE(level3_kernel_pgt)
15182 .fill L3_START_KERNEL,8,0
15183 @@ -363,20 +400,23 @@ NEXT_PAGE(level3_kernel_pgt)
15184 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
15185 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
15186
15187 +NEXT_PAGE(level2_vmemmap_pgt)
15188 + .fill 512,8,0
15189 +
15190 NEXT_PAGE(level2_fixmap_pgt)
15191 - .fill 506,8,0
15192 - .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
15193 - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
15194 - .fill 5,8,0
15195 + .fill 507,8,0
15196 + .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
15197 + /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
15198 + .fill 4,8,0
15199
15200 -NEXT_PAGE(level1_fixmap_pgt)
15201 +NEXT_PAGE(level1_vsyscall_pgt)
15202 .fill 512,8,0
15203
15204 -NEXT_PAGE(level2_ident_pgt)
15205 - /* Since I easily can, map the first 1G.
15206 + /* Since I easily can, map the first 2G.
15207 * Don't set NX because code runs from these pages.
15208 */
15209 - PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
15210 +NEXT_PAGE(level2_ident_pgt)
15211 + PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
15212
15213 NEXT_PAGE(level2_kernel_pgt)
15214 /*
15215 @@ -389,33 +429,55 @@ NEXT_PAGE(level2_kernel_pgt)
15216 * If you want to increase this then increase MODULES_VADDR
15217 * too.)
15218 */
15219 - PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
15220 - KERNEL_IMAGE_SIZE/PMD_SIZE)
15221 -
15222 -NEXT_PAGE(level2_spare_pgt)
15223 - .fill 512, 8, 0
15224 + PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
15225
15226 #undef PMDS
15227 #undef NEXT_PAGE
15228
15229 - .data
15230 + .align PAGE_SIZE
15231 +ENTRY(cpu_gdt_table)
15232 + .rept NR_CPUS
15233 + .quad 0x0000000000000000 /* NULL descriptor */
15234 + .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
15235 + .quad 0x00af9b000000ffff /* __KERNEL_CS */
15236 + .quad 0x00cf93000000ffff /* __KERNEL_DS */
15237 + .quad 0x00cffb000000ffff /* __USER32_CS */
15238 + .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
15239 + .quad 0x00affb000000ffff /* __USER_CS */
15240 +
15241 +#ifdef CONFIG_PAX_KERNEXEC
15242 + .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
15243 +#else
15244 + .quad 0x0 /* unused */
15245 +#endif
15246 +
15247 + .quad 0,0 /* TSS */
15248 + .quad 0,0 /* LDT */
15249 + .quad 0,0,0 /* three TLS descriptors */
15250 + .quad 0x0000f40000000000 /* node/CPU stored in limit */
15251 + /* asm/segment.h:GDT_ENTRIES must match this */
15252 +
15253 + /* zero the remaining page */
15254 + .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
15255 + .endr
15256 +
15257 .align 16
15258 .globl early_gdt_descr
15259 early_gdt_descr:
15260 .word GDT_ENTRIES*8-1
15261 early_gdt_descr_base:
15262 - .quad INIT_PER_CPU_VAR(gdt_page)
15263 + .quad cpu_gdt_table
15264
15265 ENTRY(phys_base)
15266 /* This must match the first entry in level2_kernel_pgt */
15267 .quad 0x0000000000000000
15268
15269 #include "../../x86/xen/xen-head.S"
15270 -
15271 - .section .bss, "aw", @nobits
15272 +
15273 + .section .rodata,"a",@progbits
15274 .align L1_CACHE_BYTES
15275 ENTRY(idt_table)
15276 - .skip IDT_ENTRIES * 16
15277 + .fill 512,8,0
15278
15279 __PAGE_ALIGNED_BSS
15280 .align PAGE_SIZE
15281 diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
15282 index 9c3bd4a..e1d9b35 100644
15283 --- a/arch/x86/kernel/i386_ksyms_32.c
15284 +++ b/arch/x86/kernel/i386_ksyms_32.c
15285 @@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
15286 EXPORT_SYMBOL(cmpxchg8b_emu);
15287 #endif
15288
15289 +EXPORT_SYMBOL_GPL(cpu_gdt_table);
15290 +
15291 /* Networking helper routines. */
15292 EXPORT_SYMBOL(csum_partial_copy_generic);
15293 +EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
15294 +EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
15295
15296 EXPORT_SYMBOL(__get_user_1);
15297 EXPORT_SYMBOL(__get_user_2);
15298 @@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
15299
15300 EXPORT_SYMBOL(csum_partial);
15301 EXPORT_SYMBOL(empty_zero_page);
15302 +
15303 +#ifdef CONFIG_PAX_KERNEXEC
15304 +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
15305 +#endif
15306 diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
15307 index 6104852..6114160 100644
15308 --- a/arch/x86/kernel/i8259.c
15309 +++ b/arch/x86/kernel/i8259.c
15310 @@ -210,7 +210,7 @@ spurious_8259A_irq:
15311 "spurious 8259A interrupt: IRQ%d.\n", irq);
15312 spurious_irq_mask |= irqmask;
15313 }
15314 - atomic_inc(&irq_err_count);
15315 + atomic_inc_unchecked(&irq_err_count);
15316 /*
15317 * Theoretically we do not have to handle this IRQ,
15318 * but in Linux this does not cause problems and is
15319 diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
15320 index 43e9ccf..44ccf6f 100644
15321 --- a/arch/x86/kernel/init_task.c
15322 +++ b/arch/x86/kernel/init_task.c
15323 @@ -20,8 +20,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
15324 * way process stacks are handled. This is done by having a special
15325 * "init_task" linker map entry..
15326 */
15327 -union thread_union init_thread_union __init_task_data =
15328 - { INIT_THREAD_INFO(init_task) };
15329 +union thread_union init_thread_union __init_task_data;
15330
15331 /*
15332 * Initial task structure.
15333 @@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
15334 * section. Since TSS's are completely CPU-local, we want them
15335 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
15336 */
15337 -DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
15338 -
15339 +struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
15340 +EXPORT_SYMBOL(init_tss);
15341 diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
15342 index 8c96897..be66bfa 100644
15343 --- a/arch/x86/kernel/ioport.c
15344 +++ b/arch/x86/kernel/ioport.c
15345 @@ -6,6 +6,7 @@
15346 #include <linux/sched.h>
15347 #include <linux/kernel.h>
15348 #include <linux/capability.h>
15349 +#include <linux/security.h>
15350 #include <linux/errno.h>
15351 #include <linux/types.h>
15352 #include <linux/ioport.h>
15353 @@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
15354
15355 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
15356 return -EINVAL;
15357 +#ifdef CONFIG_GRKERNSEC_IO
15358 + if (turn_on && grsec_disable_privio) {
15359 + gr_handle_ioperm();
15360 + return -EPERM;
15361 + }
15362 +#endif
15363 if (turn_on && !capable(CAP_SYS_RAWIO))
15364 return -EPERM;
15365
15366 @@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
15367 * because the ->io_bitmap_max value must match the bitmap
15368 * contents:
15369 */
15370 - tss = &per_cpu(init_tss, get_cpu());
15371 + tss = init_tss + get_cpu();
15372
15373 if (turn_on)
15374 bitmap_clear(t->io_bitmap_ptr, from, num);
15375 @@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct pt_regs *regs)
15376 return -EINVAL;
15377 /* Trying to gain more privileges? */
15378 if (level > old) {
15379 +#ifdef CONFIG_GRKERNSEC_IO
15380 + if (grsec_disable_privio) {
15381 + gr_handle_iopl();
15382 + return -EPERM;
15383 + }
15384 +#endif
15385 if (!capable(CAP_SYS_RAWIO))
15386 return -EPERM;
15387 }
15388 diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
15389 index 429e0c9..17b3ece 100644
15390 --- a/arch/x86/kernel/irq.c
15391 +++ b/arch/x86/kernel/irq.c
15392 @@ -18,7 +18,7 @@
15393 #include <asm/mce.h>
15394 #include <asm/hw_irq.h>
15395
15396 -atomic_t irq_err_count;
15397 +atomic_unchecked_t irq_err_count;
15398
15399 /* Function pointer for generic interrupt vector handling */
15400 void (*x86_platform_ipi_callback)(void) = NULL;
15401 @@ -117,9 +117,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
15402 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
15403 seq_printf(p, " Machine check polls\n");
15404 #endif
15405 - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
15406 + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
15407 #if defined(CONFIG_X86_IO_APIC)
15408 - seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
15409 + seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
15410 #endif
15411 return 0;
15412 }
15413 @@ -159,10 +159,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
15414
15415 u64 arch_irq_stat(void)
15416 {
15417 - u64 sum = atomic_read(&irq_err_count);
15418 + u64 sum = atomic_read_unchecked(&irq_err_count);
15419
15420 #ifdef CONFIG_X86_IO_APIC
15421 - sum += atomic_read(&irq_mis_count);
15422 + sum += atomic_read_unchecked(&irq_mis_count);
15423 #endif
15424 return sum;
15425 }
15426 diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
15427 index 7209070..cbcd71a 100644
15428 --- a/arch/x86/kernel/irq_32.c
15429 +++ b/arch/x86/kernel/irq_32.c
15430 @@ -36,7 +36,7 @@ static int check_stack_overflow(void)
15431 __asm__ __volatile__("andl %%esp,%0" :
15432 "=r" (sp) : "0" (THREAD_SIZE - 1));
15433
15434 - return sp < (sizeof(struct thread_info) + STACK_WARN);
15435 + return sp < STACK_WARN;
15436 }
15437
15438 static void print_stack_overflow(void)
15439 @@ -54,8 +54,8 @@ static inline void print_stack_overflow(void) { }
15440 * per-CPU IRQ handling contexts (thread information and stack)
15441 */
15442 union irq_ctx {
15443 - struct thread_info tinfo;
15444 - u32 stack[THREAD_SIZE/sizeof(u32)];
15445 + unsigned long previous_esp;
15446 + u32 stack[THREAD_SIZE/sizeof(u32)];
15447 } __attribute__((aligned(THREAD_SIZE)));
15448
15449 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
15450 @@ -75,10 +75,9 @@ static void call_on_stack(void *func, void *stack)
15451 static inline int
15452 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15453 {
15454 - union irq_ctx *curctx, *irqctx;
15455 + union irq_ctx *irqctx;
15456 u32 *isp, arg1, arg2;
15457
15458 - curctx = (union irq_ctx *) current_thread_info();
15459 irqctx = __this_cpu_read(hardirq_ctx);
15460
15461 /*
15462 @@ -87,21 +86,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15463 * handler) we can't do that and just have to keep using the
15464 * current stack (which is the irq stack already after all)
15465 */
15466 - if (unlikely(curctx == irqctx))
15467 + if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
15468 return 0;
15469
15470 /* build the stack frame on the IRQ stack */
15471 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
15472 - irqctx->tinfo.task = curctx->tinfo.task;
15473 - irqctx->tinfo.previous_esp = current_stack_pointer;
15474 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
15475 + irqctx->previous_esp = current_stack_pointer;
15476
15477 - /*
15478 - * Copy the softirq bits in preempt_count so that the
15479 - * softirq checks work in the hardirq context.
15480 - */
15481 - irqctx->tinfo.preempt_count =
15482 - (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
15483 - (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
15484 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15485 + __set_fs(MAKE_MM_SEG(0));
15486 +#endif
15487
15488 if (unlikely(overflow))
15489 call_on_stack(print_stack_overflow, isp);
15490 @@ -113,6 +107,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15491 : "0" (irq), "1" (desc), "2" (isp),
15492 "D" (desc->handle_irq)
15493 : "memory", "cc", "ecx");
15494 +
15495 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15496 + __set_fs(current_thread_info()->addr_limit);
15497 +#endif
15498 +
15499 return 1;
15500 }
15501
15502 @@ -121,29 +120,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15503 */
15504 void __cpuinit irq_ctx_init(int cpu)
15505 {
15506 - union irq_ctx *irqctx;
15507 -
15508 if (per_cpu(hardirq_ctx, cpu))
15509 return;
15510
15511 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
15512 - THREAD_FLAGS,
15513 - THREAD_ORDER));
15514 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
15515 - irqctx->tinfo.cpu = cpu;
15516 - irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
15517 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
15518 -
15519 - per_cpu(hardirq_ctx, cpu) = irqctx;
15520 -
15521 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
15522 - THREAD_FLAGS,
15523 - THREAD_ORDER));
15524 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
15525 - irqctx->tinfo.cpu = cpu;
15526 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
15527 -
15528 - per_cpu(softirq_ctx, cpu) = irqctx;
15529 + per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
15530 + per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
15531
15532 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
15533 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
15534 @@ -152,7 +133,6 @@ void __cpuinit irq_ctx_init(int cpu)
15535 asmlinkage void do_softirq(void)
15536 {
15537 unsigned long flags;
15538 - struct thread_info *curctx;
15539 union irq_ctx *irqctx;
15540 u32 *isp;
15541
15542 @@ -162,15 +142,22 @@ asmlinkage void do_softirq(void)
15543 local_irq_save(flags);
15544
15545 if (local_softirq_pending()) {
15546 - curctx = current_thread_info();
15547 irqctx = __this_cpu_read(softirq_ctx);
15548 - irqctx->tinfo.task = curctx->task;
15549 - irqctx->tinfo.previous_esp = current_stack_pointer;
15550 + irqctx->previous_esp = current_stack_pointer;
15551
15552 /* build the stack frame on the softirq stack */
15553 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
15554 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
15555 +
15556 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15557 + __set_fs(MAKE_MM_SEG(0));
15558 +#endif
15559
15560 call_on_stack(__do_softirq, isp);
15561 +
15562 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15563 + __set_fs(current_thread_info()->addr_limit);
15564 +#endif
15565 +
15566 /*
15567 * Shouldn't happen, we returned above if in_interrupt():
15568 */
15569 diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
15570 index 69bca46..0bac999 100644
15571 --- a/arch/x86/kernel/irq_64.c
15572 +++ b/arch/x86/kernel/irq_64.c
15573 @@ -38,7 +38,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
15574 #ifdef CONFIG_DEBUG_STACKOVERFLOW
15575 u64 curbase = (u64)task_stack_page(current);
15576
15577 - if (user_mode_vm(regs))
15578 + if (user_mode(regs))
15579 return;
15580
15581 WARN_ONCE(regs->sp >= curbase &&
15582 diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
15583 index faba577..93b9e71 100644
15584 --- a/arch/x86/kernel/kgdb.c
15585 +++ b/arch/x86/kernel/kgdb.c
15586 @@ -124,11 +124,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
15587 #ifdef CONFIG_X86_32
15588 switch (regno) {
15589 case GDB_SS:
15590 - if (!user_mode_vm(regs))
15591 + if (!user_mode(regs))
15592 *(unsigned long *)mem = __KERNEL_DS;
15593 break;
15594 case GDB_SP:
15595 - if (!user_mode_vm(regs))
15596 + if (!user_mode(regs))
15597 *(unsigned long *)mem = kernel_stack_pointer(regs);
15598 break;
15599 case GDB_GS:
15600 @@ -473,12 +473,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
15601 case 'k':
15602 /* clear the trace bit */
15603 linux_regs->flags &= ~X86_EFLAGS_TF;
15604 - atomic_set(&kgdb_cpu_doing_single_step, -1);
15605 + atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
15606
15607 /* set the trace bit if we're stepping */
15608 if (remcomInBuffer[0] == 's') {
15609 linux_regs->flags |= X86_EFLAGS_TF;
15610 - atomic_set(&kgdb_cpu_doing_single_step,
15611 + atomic_set_unchecked(&kgdb_cpu_doing_single_step,
15612 raw_smp_processor_id());
15613 }
15614
15615 @@ -543,7 +543,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
15616
15617 switch (cmd) {
15618 case DIE_DEBUG:
15619 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
15620 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
15621 if (user_mode(regs))
15622 return single_step_cont(regs, args);
15623 break;
15624 diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
15625 index 7da647d..5d3c4c1 100644
15626 --- a/arch/x86/kernel/kprobes.c
15627 +++ b/arch/x86/kernel/kprobes.c
15628 @@ -118,8 +118,11 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
15629 } __attribute__((packed)) *insn;
15630
15631 insn = (struct __arch_relative_insn *)from;
15632 +
15633 + pax_open_kernel();
15634 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
15635 insn->op = op;
15636 + pax_close_kernel();
15637 }
15638
15639 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
15640 @@ -156,7 +159,7 @@ static int __kprobes can_boost(kprobe_opcode_t *opcodes)
15641 kprobe_opcode_t opcode;
15642 kprobe_opcode_t *orig_opcodes = opcodes;
15643
15644 - if (search_exception_tables((unsigned long)opcodes))
15645 + if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
15646 return 0; /* Page fault may occur on this address. */
15647
15648 retry:
15649 @@ -317,7 +320,9 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
15650 }
15651 }
15652 insn_get_length(&insn);
15653 + pax_open_kernel();
15654 memcpy(dest, insn.kaddr, insn.length);
15655 + pax_close_kernel();
15656
15657 #ifdef CONFIG_X86_64
15658 if (insn_rip_relative(&insn)) {
15659 @@ -341,7 +346,9 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
15660 (u8 *) dest;
15661 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
15662 disp = (u8 *) dest + insn_offset_displacement(&insn);
15663 + pax_open_kernel();
15664 *(s32 *) disp = (s32) newdisp;
15665 + pax_close_kernel();
15666 }
15667 #endif
15668 return insn.length;
15669 @@ -355,12 +362,12 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p)
15670 */
15671 __copy_instruction(p->ainsn.insn, p->addr, 0);
15672
15673 - if (can_boost(p->addr))
15674 + if (can_boost(ktla_ktva(p->addr)))
15675 p->ainsn.boostable = 0;
15676 else
15677 p->ainsn.boostable = -1;
15678
15679 - p->opcode = *p->addr;
15680 + p->opcode = *(ktla_ktva(p->addr));
15681 }
15682
15683 int __kprobes arch_prepare_kprobe(struct kprobe *p)
15684 @@ -477,7 +484,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
15685 * nor set current_kprobe, because it doesn't use single
15686 * stepping.
15687 */
15688 - regs->ip = (unsigned long)p->ainsn.insn;
15689 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
15690 preempt_enable_no_resched();
15691 return;
15692 }
15693 @@ -496,7 +503,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
15694 if (p->opcode == BREAKPOINT_INSTRUCTION)
15695 regs->ip = (unsigned long)p->addr;
15696 else
15697 - regs->ip = (unsigned long)p->ainsn.insn;
15698 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
15699 }
15700
15701 /*
15702 @@ -575,7 +582,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
15703 setup_singlestep(p, regs, kcb, 0);
15704 return 1;
15705 }
15706 - } else if (*addr != BREAKPOINT_INSTRUCTION) {
15707 + } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
15708 /*
15709 * The breakpoint instruction was removed right
15710 * after we hit it. Another cpu has removed
15711 @@ -683,6 +690,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
15712 " movq %rax, 152(%rsp)\n"
15713 RESTORE_REGS_STRING
15714 " popfq\n"
15715 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
15716 + " btsq $63,(%rsp)\n"
15717 +#endif
15718 #else
15719 " pushf\n"
15720 SAVE_REGS_STRING
15721 @@ -820,7 +830,7 @@ static void __kprobes resume_execution(struct kprobe *p,
15722 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
15723 {
15724 unsigned long *tos = stack_addr(regs);
15725 - unsigned long copy_ip = (unsigned long)p->ainsn.insn;
15726 + unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
15727 unsigned long orig_ip = (unsigned long)p->addr;
15728 kprobe_opcode_t *insn = p->ainsn.insn;
15729
15730 @@ -1002,7 +1012,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
15731 struct die_args *args = data;
15732 int ret = NOTIFY_DONE;
15733
15734 - if (args->regs && user_mode_vm(args->regs))
15735 + if (args->regs && user_mode(args->regs))
15736 return ret;
15737
15738 switch (val) {
15739 @@ -1384,7 +1394,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
15740 * Verify if the address gap is in 2GB range, because this uses
15741 * a relative jump.
15742 */
15743 - rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
15744 + rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
15745 if (abs(rel) > 0x7fffffff)
15746 return -ERANGE;
15747
15748 @@ -1405,11 +1415,11 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
15749 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
15750
15751 /* Set probe function call */
15752 - synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
15753 + synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
15754
15755 /* Set returning jmp instruction at the tail of out-of-line buffer */
15756 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
15757 - (u8 *)op->kp.addr + op->optinsn.size);
15758 + (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
15759
15760 flush_icache_range((unsigned long) buf,
15761 (unsigned long) buf + TMPL_END_IDX +
15762 @@ -1431,7 +1441,7 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
15763 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
15764
15765 /* Backup instructions which will be replaced by jump address */
15766 - memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
15767 + memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
15768 RELATIVE_ADDR_SIZE);
15769
15770 insn_buf[0] = RELATIVEJUMP_OPCODE;
15771 diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
15772 index a9c2116..a52d4fc 100644
15773 --- a/arch/x86/kernel/kvm.c
15774 +++ b/arch/x86/kernel/kvm.c
15775 @@ -437,6 +437,7 @@ static void __init paravirt_ops_setup(void)
15776 pv_mmu_ops.set_pud = kvm_set_pud;
15777 #if PAGETABLE_LEVELS == 4
15778 pv_mmu_ops.set_pgd = kvm_set_pgd;
15779 + pv_mmu_ops.set_pgd_batched = kvm_set_pgd;
15780 #endif
15781 #endif
15782 pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
15783 diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
15784 index ea69726..604d066 100644
15785 --- a/arch/x86/kernel/ldt.c
15786 +++ b/arch/x86/kernel/ldt.c
15787 @@ -67,13 +67,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
15788 if (reload) {
15789 #ifdef CONFIG_SMP
15790 preempt_disable();
15791 - load_LDT(pc);
15792 + load_LDT_nolock(pc);
15793 if (!cpumask_equal(mm_cpumask(current->mm),
15794 cpumask_of(smp_processor_id())))
15795 smp_call_function(flush_ldt, current->mm, 1);
15796 preempt_enable();
15797 #else
15798 - load_LDT(pc);
15799 + load_LDT_nolock(pc);
15800 #endif
15801 }
15802 if (oldsize) {
15803 @@ -95,7 +95,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
15804 return err;
15805
15806 for (i = 0; i < old->size; i++)
15807 - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
15808 + write_ldt_entry(new->ldt, i, old->ldt + i);
15809 return 0;
15810 }
15811
15812 @@ -116,6 +116,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
15813 retval = copy_ldt(&mm->context, &old_mm->context);
15814 mutex_unlock(&old_mm->context.lock);
15815 }
15816 +
15817 + if (tsk == current) {
15818 + mm->context.vdso = 0;
15819 +
15820 +#ifdef CONFIG_X86_32
15821 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
15822 + mm->context.user_cs_base = 0UL;
15823 + mm->context.user_cs_limit = ~0UL;
15824 +
15825 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
15826 + cpus_clear(mm->context.cpu_user_cs_mask);
15827 +#endif
15828 +
15829 +#endif
15830 +#endif
15831 +
15832 + }
15833 +
15834 return retval;
15835 }
15836
15837 @@ -230,6 +248,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
15838 }
15839 }
15840
15841 +#ifdef CONFIG_PAX_SEGMEXEC
15842 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
15843 + error = -EINVAL;
15844 + goto out_unlock;
15845 + }
15846 +#endif
15847 +
15848 fill_ldt(&ldt, &ldt_info);
15849 if (oldmode)
15850 ldt.avl = 0;
15851 diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
15852 index a3fa43b..8966f4c 100644
15853 --- a/arch/x86/kernel/machine_kexec_32.c
15854 +++ b/arch/x86/kernel/machine_kexec_32.c
15855 @@ -27,7 +27,7 @@
15856 #include <asm/cacheflush.h>
15857 #include <asm/debugreg.h>
15858
15859 -static void set_idt(void *newidt, __u16 limit)
15860 +static void set_idt(struct desc_struct *newidt, __u16 limit)
15861 {
15862 struct desc_ptr curidt;
15863
15864 @@ -39,7 +39,7 @@ static void set_idt(void *newidt, __u16 limit)
15865 }
15866
15867
15868 -static void set_gdt(void *newgdt, __u16 limit)
15869 +static void set_gdt(struct desc_struct *newgdt, __u16 limit)
15870 {
15871 struct desc_ptr curgdt;
15872
15873 @@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
15874 }
15875
15876 control_page = page_address(image->control_code_page);
15877 - memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
15878 + memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
15879
15880 relocate_kernel_ptr = control_page;
15881 page_list[PA_CONTROL_PAGE] = __pa(control_page);
15882 diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
15883 index 3ca42d0..7cff8cc 100644
15884 --- a/arch/x86/kernel/microcode_intel.c
15885 +++ b/arch/x86/kernel/microcode_intel.c
15886 @@ -436,13 +436,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
15887
15888 static int get_ucode_user(void *to, const void *from, size_t n)
15889 {
15890 - return copy_from_user(to, from, n);
15891 + return copy_from_user(to, (const void __force_user *)from, n);
15892 }
15893
15894 static enum ucode_state
15895 request_microcode_user(int cpu, const void __user *buf, size_t size)
15896 {
15897 - return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
15898 + return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
15899 }
15900
15901 static void microcode_fini_cpu(int cpu)
15902 diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
15903 index 925179f..267ac7a 100644
15904 --- a/arch/x86/kernel/module.c
15905 +++ b/arch/x86/kernel/module.c
15906 @@ -36,15 +36,60 @@
15907 #define DEBUGP(fmt...)
15908 #endif
15909
15910 -void *module_alloc(unsigned long size)
15911 +static inline void *__module_alloc(unsigned long size, pgprot_t prot)
15912 {
15913 - if (PAGE_ALIGN(size) > MODULES_LEN)
15914 + if (size == 0 || PAGE_ALIGN(size) > MODULES_LEN)
15915 return NULL;
15916 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
15917 - GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
15918 + GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
15919 -1, __builtin_return_address(0));
15920 }
15921
15922 +void *module_alloc(unsigned long size)
15923 +{
15924 +
15925 +#ifdef CONFIG_PAX_KERNEXEC
15926 + return __module_alloc(size, PAGE_KERNEL);
15927 +#else
15928 + return __module_alloc(size, PAGE_KERNEL_EXEC);
15929 +#endif
15930 +
15931 +}
15932 +
15933 +#ifdef CONFIG_PAX_KERNEXEC
15934 +#ifdef CONFIG_X86_32
15935 +void *module_alloc_exec(unsigned long size)
15936 +{
15937 + struct vm_struct *area;
15938 +
15939 + if (size == 0)
15940 + return NULL;
15941 +
15942 + area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
15943 + return area ? area->addr : NULL;
15944 +}
15945 +EXPORT_SYMBOL(module_alloc_exec);
15946 +
15947 +void module_free_exec(struct module *mod, void *module_region)
15948 +{
15949 + vunmap(module_region);
15950 +}
15951 +EXPORT_SYMBOL(module_free_exec);
15952 +#else
15953 +void module_free_exec(struct module *mod, void *module_region)
15954 +{
15955 + module_free(mod, module_region);
15956 +}
15957 +EXPORT_SYMBOL(module_free_exec);
15958 +
15959 +void *module_alloc_exec(unsigned long size)
15960 +{
15961 + return __module_alloc(size, PAGE_KERNEL_RX);
15962 +}
15963 +EXPORT_SYMBOL(module_alloc_exec);
15964 +#endif
15965 +#endif
15966 +
15967 #ifdef CONFIG_X86_32
15968 int apply_relocate(Elf32_Shdr *sechdrs,
15969 const char *strtab,
15970 @@ -55,14 +100,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
15971 unsigned int i;
15972 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
15973 Elf32_Sym *sym;
15974 - uint32_t *location;
15975 + uint32_t *plocation, location;
15976
15977 DEBUGP("Applying relocate section %u to %u\n", relsec,
15978 sechdrs[relsec].sh_info);
15979 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
15980 /* This is where to make the change */
15981 - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
15982 - + rel[i].r_offset;
15983 + plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
15984 + location = (uint32_t)plocation;
15985 + if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
15986 + plocation = ktla_ktva((void *)plocation);
15987 /* This is the symbol it is referring to. Note that all
15988 undefined symbols have been resolved. */
15989 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
15990 @@ -71,11 +118,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
15991 switch (ELF32_R_TYPE(rel[i].r_info)) {
15992 case R_386_32:
15993 /* We add the value into the location given */
15994 - *location += sym->st_value;
15995 + pax_open_kernel();
15996 + *plocation += sym->st_value;
15997 + pax_close_kernel();
15998 break;
15999 case R_386_PC32:
16000 /* Add the value, subtract its postition */
16001 - *location += sym->st_value - (uint32_t)location;
16002 + pax_open_kernel();
16003 + *plocation += sym->st_value - location;
16004 + pax_close_kernel();
16005 break;
16006 default:
16007 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
16008 @@ -120,21 +171,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
16009 case R_X86_64_NONE:
16010 break;
16011 case R_X86_64_64:
16012 + pax_open_kernel();
16013 *(u64 *)loc = val;
16014 + pax_close_kernel();
16015 break;
16016 case R_X86_64_32:
16017 + pax_open_kernel();
16018 *(u32 *)loc = val;
16019 + pax_close_kernel();
16020 if (val != *(u32 *)loc)
16021 goto overflow;
16022 break;
16023 case R_X86_64_32S:
16024 + pax_open_kernel();
16025 *(s32 *)loc = val;
16026 + pax_close_kernel();
16027 if ((s64)val != *(s32 *)loc)
16028 goto overflow;
16029 break;
16030 case R_X86_64_PC32:
16031 val -= (u64)loc;
16032 + pax_open_kernel();
16033 *(u32 *)loc = val;
16034 + pax_close_kernel();
16035 +
16036 #if 0
16037 if ((s64)val != *(s32 *)loc)
16038 goto overflow;
16039 diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
16040 index e88f37b..1353db6 100644
16041 --- a/arch/x86/kernel/nmi.c
16042 +++ b/arch/x86/kernel/nmi.c
16043 @@ -408,6 +408,17 @@ static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
16044 dotraplinkage notrace __kprobes void
16045 do_nmi(struct pt_regs *regs, long error_code)
16046 {
16047 +
16048 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16049 + if (!user_mode(regs)) {
16050 + unsigned long cs = regs->cs & 0xFFFF;
16051 + unsigned long ip = ktva_ktla(regs->ip);
16052 +
16053 + if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
16054 + regs->ip = ip;
16055 + }
16056 +#endif
16057 +
16058 nmi_enter();
16059
16060 inc_irq_stat(__nmi_count);
16061 diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
16062 index 676b8c7..870ba04 100644
16063 --- a/arch/x86/kernel/paravirt-spinlocks.c
16064 +++ b/arch/x86/kernel/paravirt-spinlocks.c
16065 @@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
16066 arch_spin_lock(lock);
16067 }
16068
16069 -struct pv_lock_ops pv_lock_ops = {
16070 +struct pv_lock_ops pv_lock_ops __read_only = {
16071 #ifdef CONFIG_SMP
16072 .spin_is_locked = __ticket_spin_is_locked,
16073 .spin_is_contended = __ticket_spin_is_contended,
16074 diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
16075 index d90272e..6bb013b 100644
16076 --- a/arch/x86/kernel/paravirt.c
16077 +++ b/arch/x86/kernel/paravirt.c
16078 @@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
16079 {
16080 return x;
16081 }
16082 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
16083 +PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
16084 +#endif
16085
16086 void __init default_banner(void)
16087 {
16088 @@ -145,15 +148,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
16089 if (opfunc == NULL)
16090 /* If there's no function, patch it with a ud2a (BUG) */
16091 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
16092 - else if (opfunc == _paravirt_nop)
16093 + else if (opfunc == (void *)_paravirt_nop)
16094 /* If the operation is a nop, then nop the callsite */
16095 ret = paravirt_patch_nop();
16096
16097 /* identity functions just return their single argument */
16098 - else if (opfunc == _paravirt_ident_32)
16099 + else if (opfunc == (void *)_paravirt_ident_32)
16100 ret = paravirt_patch_ident_32(insnbuf, len);
16101 - else if (opfunc == _paravirt_ident_64)
16102 + else if (opfunc == (void *)_paravirt_ident_64)
16103 ret = paravirt_patch_ident_64(insnbuf, len);
16104 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
16105 + else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
16106 + ret = paravirt_patch_ident_64(insnbuf, len);
16107 +#endif
16108
16109 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
16110 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
16111 @@ -178,7 +185,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
16112 if (insn_len > len || start == NULL)
16113 insn_len = len;
16114 else
16115 - memcpy(insnbuf, start, insn_len);
16116 + memcpy(insnbuf, ktla_ktva(start), insn_len);
16117
16118 return insn_len;
16119 }
16120 @@ -302,7 +309,7 @@ void arch_flush_lazy_mmu_mode(void)
16121 preempt_enable();
16122 }
16123
16124 -struct pv_info pv_info = {
16125 +struct pv_info pv_info __read_only = {
16126 .name = "bare hardware",
16127 .paravirt_enabled = 0,
16128 .kernel_rpl = 0,
16129 @@ -313,16 +320,16 @@ struct pv_info pv_info = {
16130 #endif
16131 };
16132
16133 -struct pv_init_ops pv_init_ops = {
16134 +struct pv_init_ops pv_init_ops __read_only = {
16135 .patch = native_patch,
16136 };
16137
16138 -struct pv_time_ops pv_time_ops = {
16139 +struct pv_time_ops pv_time_ops __read_only = {
16140 .sched_clock = native_sched_clock,
16141 .steal_clock = native_steal_clock,
16142 };
16143
16144 -struct pv_irq_ops pv_irq_ops = {
16145 +struct pv_irq_ops pv_irq_ops __read_only = {
16146 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
16147 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
16148 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
16149 @@ -334,7 +341,7 @@ struct pv_irq_ops pv_irq_ops = {
16150 #endif
16151 };
16152
16153 -struct pv_cpu_ops pv_cpu_ops = {
16154 +struct pv_cpu_ops pv_cpu_ops __read_only = {
16155 .cpuid = native_cpuid,
16156 .get_debugreg = native_get_debugreg,
16157 .set_debugreg = native_set_debugreg,
16158 @@ -395,21 +402,26 @@ struct pv_cpu_ops pv_cpu_ops = {
16159 .end_context_switch = paravirt_nop,
16160 };
16161
16162 -struct pv_apic_ops pv_apic_ops = {
16163 +struct pv_apic_ops pv_apic_ops __read_only = {
16164 #ifdef CONFIG_X86_LOCAL_APIC
16165 .startup_ipi_hook = paravirt_nop,
16166 #endif
16167 };
16168
16169 -#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
16170 +#ifdef CONFIG_X86_32
16171 +#ifdef CONFIG_X86_PAE
16172 +/* 64-bit pagetable entries */
16173 +#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
16174 +#else
16175 /* 32-bit pagetable entries */
16176 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
16177 +#endif
16178 #else
16179 /* 64-bit pagetable entries */
16180 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
16181 #endif
16182
16183 -struct pv_mmu_ops pv_mmu_ops = {
16184 +struct pv_mmu_ops pv_mmu_ops __read_only = {
16185
16186 .read_cr2 = native_read_cr2,
16187 .write_cr2 = native_write_cr2,
16188 @@ -459,6 +471,7 @@ struct pv_mmu_ops pv_mmu_ops = {
16189 .make_pud = PTE_IDENT,
16190
16191 .set_pgd = native_set_pgd,
16192 + .set_pgd_batched = native_set_pgd_batched,
16193 #endif
16194 #endif /* PAGETABLE_LEVELS >= 3 */
16195
16196 @@ -478,6 +491,12 @@ struct pv_mmu_ops pv_mmu_ops = {
16197 },
16198
16199 .set_fixmap = native_set_fixmap,
16200 +
16201 +#ifdef CONFIG_PAX_KERNEXEC
16202 + .pax_open_kernel = native_pax_open_kernel,
16203 + .pax_close_kernel = native_pax_close_kernel,
16204 +#endif
16205 +
16206 };
16207
16208 EXPORT_SYMBOL_GPL(pv_time_ops);
16209 diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
16210 index 35ccf75..7a15747 100644
16211 --- a/arch/x86/kernel/pci-iommu_table.c
16212 +++ b/arch/x86/kernel/pci-iommu_table.c
16213 @@ -2,7 +2,7 @@
16214 #include <asm/iommu_table.h>
16215 #include <linux/string.h>
16216 #include <linux/kallsyms.h>
16217 -
16218 +#include <linux/sched.h>
16219
16220 #define DEBUG 1
16221
16222 diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
16223 index ee5d4fb..426649b 100644
16224 --- a/arch/x86/kernel/process.c
16225 +++ b/arch/x86/kernel/process.c
16226 @@ -48,16 +48,33 @@ void free_thread_xstate(struct task_struct *tsk)
16227
16228 void free_thread_info(struct thread_info *ti)
16229 {
16230 - free_thread_xstate(ti->task);
16231 free_pages((unsigned long)ti, THREAD_ORDER);
16232 }
16233
16234 +static struct kmem_cache *task_struct_cachep;
16235 +
16236 void arch_task_cache_init(void)
16237 {
16238 - task_xstate_cachep =
16239 - kmem_cache_create("task_xstate", xstate_size,
16240 + /* create a slab on which task_structs can be allocated */
16241 + task_struct_cachep =
16242 + kmem_cache_create("task_struct", sizeof(struct task_struct),
16243 + ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
16244 +
16245 + task_xstate_cachep =
16246 + kmem_cache_create("task_xstate", xstate_size,
16247 __alignof__(union thread_xstate),
16248 - SLAB_PANIC | SLAB_NOTRACK, NULL);
16249 + SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
16250 +}
16251 +
16252 +struct task_struct *alloc_task_struct_node(int node)
16253 +{
16254 + return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
16255 +}
16256 +
16257 +void free_task_struct(struct task_struct *task)
16258 +{
16259 + free_thread_xstate(task);
16260 + kmem_cache_free(task_struct_cachep, task);
16261 }
16262
16263 /*
16264 @@ -70,7 +87,7 @@ void exit_thread(void)
16265 unsigned long *bp = t->io_bitmap_ptr;
16266
16267 if (bp) {
16268 - struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
16269 + struct tss_struct *tss = init_tss + get_cpu();
16270
16271 t->io_bitmap_ptr = NULL;
16272 clear_thread_flag(TIF_IO_BITMAP);
16273 @@ -106,7 +123,7 @@ void show_regs_common(void)
16274
16275 printk(KERN_CONT "\n");
16276 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
16277 - current->pid, current->comm, print_tainted(),
16278 + task_pid_nr(current), current->comm, print_tainted(),
16279 init_utsname()->release,
16280 (int)strcspn(init_utsname()->version, " "),
16281 init_utsname()->version);
16282 @@ -120,6 +137,9 @@ void flush_thread(void)
16283 {
16284 struct task_struct *tsk = current;
16285
16286 +#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
16287 + loadsegment(gs, 0);
16288 +#endif
16289 flush_ptrace_hw_breakpoint(tsk);
16290 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
16291 /*
16292 @@ -282,10 +302,10 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
16293 regs.di = (unsigned long) arg;
16294
16295 #ifdef CONFIG_X86_32
16296 - regs.ds = __USER_DS;
16297 - regs.es = __USER_DS;
16298 + regs.ds = __KERNEL_DS;
16299 + regs.es = __KERNEL_DS;
16300 regs.fs = __KERNEL_PERCPU;
16301 - regs.gs = __KERNEL_STACK_CANARY;
16302 + savesegment(gs, regs.gs);
16303 #else
16304 regs.ss = __KERNEL_DS;
16305 #endif
16306 @@ -411,7 +431,7 @@ bool set_pm_idle_to_default(void)
16307
16308 return ret;
16309 }
16310 -void stop_this_cpu(void *dummy)
16311 +__noreturn void stop_this_cpu(void *dummy)
16312 {
16313 local_irq_disable();
16314 /*
16315 @@ -653,16 +673,37 @@ static int __init idle_setup(char *str)
16316 }
16317 early_param("idle", idle_setup);
16318
16319 -unsigned long arch_align_stack(unsigned long sp)
16320 +#ifdef CONFIG_PAX_RANDKSTACK
16321 +void pax_randomize_kstack(struct pt_regs *regs)
16322 {
16323 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
16324 - sp -= get_random_int() % 8192;
16325 - return sp & ~0xf;
16326 -}
16327 + struct thread_struct *thread = &current->thread;
16328 + unsigned long time;
16329
16330 -unsigned long arch_randomize_brk(struct mm_struct *mm)
16331 -{
16332 - unsigned long range_end = mm->brk + 0x02000000;
16333 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
16334 -}
16335 + if (!randomize_va_space)
16336 + return;
16337 +
16338 + if (v8086_mode(regs))
16339 + return;
16340
16341 + rdtscl(time);
16342 +
16343 + /* P4 seems to return a 0 LSB, ignore it */
16344 +#ifdef CONFIG_MPENTIUM4
16345 + time &= 0x3EUL;
16346 + time <<= 2;
16347 +#elif defined(CONFIG_X86_64)
16348 + time &= 0xFUL;
16349 + time <<= 4;
16350 +#else
16351 + time &= 0x1FUL;
16352 + time <<= 3;
16353 +#endif
16354 +
16355 + thread->sp0 ^= time;
16356 + load_sp0(init_tss + smp_processor_id(), thread);
16357 +
16358 +#ifdef CONFIG_X86_64
16359 + percpu_write(kernel_stack, thread->sp0);
16360 +#endif
16361 +}
16362 +#endif
16363 diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
16364 index 795b79f..063767a 100644
16365 --- a/arch/x86/kernel/process_32.c
16366 +++ b/arch/x86/kernel/process_32.c
16367 @@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
16368 unsigned long thread_saved_pc(struct task_struct *tsk)
16369 {
16370 return ((unsigned long *)tsk->thread.sp)[3];
16371 +//XXX return tsk->thread.eip;
16372 }
16373
16374 #ifndef CONFIG_SMP
16375 @@ -130,15 +131,14 @@ void __show_regs(struct pt_regs *regs, int all)
16376 unsigned long sp;
16377 unsigned short ss, gs;
16378
16379 - if (user_mode_vm(regs)) {
16380 + if (user_mode(regs)) {
16381 sp = regs->sp;
16382 ss = regs->ss & 0xffff;
16383 - gs = get_user_gs(regs);
16384 } else {
16385 sp = kernel_stack_pointer(regs);
16386 savesegment(ss, ss);
16387 - savesegment(gs, gs);
16388 }
16389 + gs = get_user_gs(regs);
16390
16391 show_regs_common();
16392
16393 @@ -200,13 +200,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
16394 struct task_struct *tsk;
16395 int err;
16396
16397 - childregs = task_pt_regs(p);
16398 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
16399 *childregs = *regs;
16400 childregs->ax = 0;
16401 childregs->sp = sp;
16402
16403 p->thread.sp = (unsigned long) childregs;
16404 p->thread.sp0 = (unsigned long) (childregs+1);
16405 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
16406
16407 p->thread.ip = (unsigned long) ret_from_fork;
16408
16409 @@ -296,7 +297,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16410 struct thread_struct *prev = &prev_p->thread,
16411 *next = &next_p->thread;
16412 int cpu = smp_processor_id();
16413 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
16414 + struct tss_struct *tss = init_tss + cpu;
16415 bool preload_fpu;
16416
16417 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
16418 @@ -331,6 +332,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16419 */
16420 lazy_save_gs(prev->gs);
16421
16422 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16423 + __set_fs(task_thread_info(next_p)->addr_limit);
16424 +#endif
16425 +
16426 /*
16427 * Load the per-thread Thread-Local Storage descriptor.
16428 */
16429 @@ -366,6 +371,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16430 */
16431 arch_end_context_switch(next_p);
16432
16433 + percpu_write(current_task, next_p);
16434 + percpu_write(current_tinfo, &next_p->tinfo);
16435 +
16436 if (preload_fpu)
16437 __math_state_restore();
16438
16439 @@ -375,8 +383,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16440 if (prev->gs | next->gs)
16441 lazy_load_gs(next->gs);
16442
16443 - percpu_write(current_task, next_p);
16444 -
16445 return prev_p;
16446 }
16447
16448 @@ -406,4 +412,3 @@ unsigned long get_wchan(struct task_struct *p)
16449 } while (count++ < 16);
16450 return 0;
16451 }
16452 -
16453 diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
16454 index 3bd7e6e..90b2bcf 100644
16455 --- a/arch/x86/kernel/process_64.c
16456 +++ b/arch/x86/kernel/process_64.c
16457 @@ -89,7 +89,7 @@ static void __exit_idle(void)
16458 void exit_idle(void)
16459 {
16460 /* idle loop has pid 0 */
16461 - if (current->pid)
16462 + if (task_pid_nr(current))
16463 return;
16464 __exit_idle();
16465 }
16466 @@ -264,8 +264,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
16467 struct pt_regs *childregs;
16468 struct task_struct *me = current;
16469
16470 - childregs = ((struct pt_regs *)
16471 - (THREAD_SIZE + task_stack_page(p))) - 1;
16472 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
16473 *childregs = *regs;
16474
16475 childregs->ax = 0;
16476 @@ -277,6 +276,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
16477 p->thread.sp = (unsigned long) childregs;
16478 p->thread.sp0 = (unsigned long) (childregs+1);
16479 p->thread.usersp = me->thread.usersp;
16480 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
16481
16482 set_tsk_thread_flag(p, TIF_FORK);
16483
16484 @@ -379,7 +379,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16485 struct thread_struct *prev = &prev_p->thread;
16486 struct thread_struct *next = &next_p->thread;
16487 int cpu = smp_processor_id();
16488 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
16489 + struct tss_struct *tss = init_tss + cpu;
16490 unsigned fsindex, gsindex;
16491 bool preload_fpu;
16492
16493 @@ -475,10 +475,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16494 prev->usersp = percpu_read(old_rsp);
16495 percpu_write(old_rsp, next->usersp);
16496 percpu_write(current_task, next_p);
16497 + percpu_write(current_tinfo, &next_p->tinfo);
16498
16499 - percpu_write(kernel_stack,
16500 - (unsigned long)task_stack_page(next_p) +
16501 - THREAD_SIZE - KERNEL_STACK_OFFSET);
16502 + percpu_write(kernel_stack, next->sp0);
16503
16504 /*
16505 * Now maybe reload the debug registers and handle I/O bitmaps
16506 @@ -540,12 +539,11 @@ unsigned long get_wchan(struct task_struct *p)
16507 if (!p || p == current || p->state == TASK_RUNNING)
16508 return 0;
16509 stack = (unsigned long)task_stack_page(p);
16510 - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
16511 + if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
16512 return 0;
16513 fp = *(u64 *)(p->thread.sp);
16514 do {
16515 - if (fp < (unsigned long)stack ||
16516 - fp >= (unsigned long)stack+THREAD_SIZE)
16517 + if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
16518 return 0;
16519 ip = *(u64 *)(fp+8);
16520 if (!in_sched_functions(ip))
16521 diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
16522 index 8252879..d3219e0 100644
16523 --- a/arch/x86/kernel/ptrace.c
16524 +++ b/arch/x86/kernel/ptrace.c
16525 @@ -822,7 +822,7 @@ long arch_ptrace(struct task_struct *child, long request,
16526 unsigned long addr, unsigned long data)
16527 {
16528 int ret;
16529 - unsigned long __user *datap = (unsigned long __user *)data;
16530 + unsigned long __user *datap = (__force unsigned long __user *)data;
16531
16532 switch (request) {
16533 /* read the word at location addr in the USER area. */
16534 @@ -907,14 +907,14 @@ long arch_ptrace(struct task_struct *child, long request,
16535 if ((int) addr < 0)
16536 return -EIO;
16537 ret = do_get_thread_area(child, addr,
16538 - (struct user_desc __user *)data);
16539 + (__force struct user_desc __user *) data);
16540 break;
16541
16542 case PTRACE_SET_THREAD_AREA:
16543 if ((int) addr < 0)
16544 return -EIO;
16545 ret = do_set_thread_area(child, addr,
16546 - (struct user_desc __user *)data, 0);
16547 + (__force struct user_desc __user *) data, 0);
16548 break;
16549 #endif
16550
16551 @@ -1331,7 +1331,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
16552 memset(info, 0, sizeof(*info));
16553 info->si_signo = SIGTRAP;
16554 info->si_code = si_code;
16555 - info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
16556 + info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
16557 }
16558
16559 void user_single_step_siginfo(struct task_struct *tsk,
16560 diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
16561 index 42eb330..139955c 100644
16562 --- a/arch/x86/kernel/pvclock.c
16563 +++ b/arch/x86/kernel/pvclock.c
16564 @@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
16565 return pv_tsc_khz;
16566 }
16567
16568 -static atomic64_t last_value = ATOMIC64_INIT(0);
16569 +static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
16570
16571 void pvclock_resume(void)
16572 {
16573 - atomic64_set(&last_value, 0);
16574 + atomic64_set_unchecked(&last_value, 0);
16575 }
16576
16577 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
16578 @@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
16579 * updating at the same time, and one of them could be slightly behind,
16580 * making the assumption that last_value always go forward fail to hold.
16581 */
16582 - last = atomic64_read(&last_value);
16583 + last = atomic64_read_unchecked(&last_value);
16584 do {
16585 if (ret < last)
16586 return last;
16587 - last = atomic64_cmpxchg(&last_value, last, ret);
16588 + last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
16589 } while (unlikely(last != ret));
16590
16591 return ret;
16592 diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
16593 index 37a458b..e63d183 100644
16594 --- a/arch/x86/kernel/reboot.c
16595 +++ b/arch/x86/kernel/reboot.c
16596 @@ -35,7 +35,7 @@ void (*pm_power_off)(void);
16597 EXPORT_SYMBOL(pm_power_off);
16598
16599 static const struct desc_ptr no_idt = {};
16600 -static int reboot_mode;
16601 +static unsigned short reboot_mode;
16602 enum reboot_type reboot_type = BOOT_ACPI;
16603 int reboot_force;
16604
16605 @@ -324,13 +324,17 @@ core_initcall(reboot_init);
16606 extern const unsigned char machine_real_restart_asm[];
16607 extern const u64 machine_real_restart_gdt[3];
16608
16609 -void machine_real_restart(unsigned int type)
16610 +__noreturn void machine_real_restart(unsigned int type)
16611 {
16612 void *restart_va;
16613 unsigned long restart_pa;
16614 - void (*restart_lowmem)(unsigned int);
16615 + void (* __noreturn restart_lowmem)(unsigned int);
16616 u64 *lowmem_gdt;
16617
16618 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
16619 + struct desc_struct *gdt;
16620 +#endif
16621 +
16622 local_irq_disable();
16623
16624 /* Write zero to CMOS register number 0x0f, which the BIOS POST
16625 @@ -356,14 +360,14 @@ void machine_real_restart(unsigned int type)
16626 boot)". This seems like a fairly standard thing that gets set by
16627 REBOOT.COM programs, and the previous reset routine did this
16628 too. */
16629 - *((unsigned short *)0x472) = reboot_mode;
16630 + *(unsigned short *)(__va(0x472)) = reboot_mode;
16631
16632 /* Patch the GDT in the low memory trampoline */
16633 lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
16634
16635 restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
16636 restart_pa = virt_to_phys(restart_va);
16637 - restart_lowmem = (void (*)(unsigned int))restart_pa;
16638 + restart_lowmem = (void *)restart_pa;
16639
16640 /* GDT[0]: GDT self-pointer */
16641 lowmem_gdt[0] =
16642 @@ -374,7 +378,33 @@ void machine_real_restart(unsigned int type)
16643 GDT_ENTRY(0x009b, restart_pa, 0xffff);
16644
16645 /* Jump to the identity-mapped low memory code */
16646 +
16647 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
16648 + gdt = get_cpu_gdt_table(smp_processor_id());
16649 + pax_open_kernel();
16650 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16651 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
16652 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
16653 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
16654 +#endif
16655 +#ifdef CONFIG_PAX_KERNEXEC
16656 + gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
16657 + gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
16658 + gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
16659 + gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
16660 + gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
16661 + gdt[GDT_ENTRY_KERNEL_CS].g = 1;
16662 +#endif
16663 + pax_close_kernel();
16664 +#endif
16665 +
16666 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16667 + asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
16668 + unreachable();
16669 +#else
16670 restart_lowmem(type);
16671 +#endif
16672 +
16673 }
16674 #ifdef CONFIG_APM_MODULE
16675 EXPORT_SYMBOL(machine_real_restart);
16676 @@ -540,7 +570,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
16677 * try to force a triple fault and then cycle between hitting the keyboard
16678 * controller and doing that
16679 */
16680 -static void native_machine_emergency_restart(void)
16681 +__noreturn static void native_machine_emergency_restart(void)
16682 {
16683 int i;
16684 int attempt = 0;
16685 @@ -664,13 +694,13 @@ void native_machine_shutdown(void)
16686 #endif
16687 }
16688
16689 -static void __machine_emergency_restart(int emergency)
16690 +static __noreturn void __machine_emergency_restart(int emergency)
16691 {
16692 reboot_emergency = emergency;
16693 machine_ops.emergency_restart();
16694 }
16695
16696 -static void native_machine_restart(char *__unused)
16697 +static __noreturn void native_machine_restart(char *__unused)
16698 {
16699 printk("machine restart\n");
16700
16701 @@ -679,7 +709,7 @@ static void native_machine_restart(char *__unused)
16702 __machine_emergency_restart(0);
16703 }
16704
16705 -static void native_machine_halt(void)
16706 +static __noreturn void native_machine_halt(void)
16707 {
16708 /* stop other cpus and apics */
16709 machine_shutdown();
16710 @@ -690,7 +720,7 @@ static void native_machine_halt(void)
16711 stop_this_cpu(NULL);
16712 }
16713
16714 -static void native_machine_power_off(void)
16715 +__noreturn static void native_machine_power_off(void)
16716 {
16717 if (pm_power_off) {
16718 if (!reboot_force)
16719 @@ -699,6 +729,7 @@ static void native_machine_power_off(void)
16720 }
16721 /* a fallback in case there is no PM info available */
16722 tboot_shutdown(TB_SHUTDOWN_HALT);
16723 + unreachable();
16724 }
16725
16726 struct machine_ops machine_ops = {
16727 diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
16728 index 7a6f3b3..bed145d7 100644
16729 --- a/arch/x86/kernel/relocate_kernel_64.S
16730 +++ b/arch/x86/kernel/relocate_kernel_64.S
16731 @@ -11,6 +11,7 @@
16732 #include <asm/kexec.h>
16733 #include <asm/processor-flags.h>
16734 #include <asm/pgtable_types.h>
16735 +#include <asm/alternative-asm.h>
16736
16737 /*
16738 * Must be relocatable PIC code callable as a C function
16739 @@ -160,13 +161,14 @@ identity_mapped:
16740 xorq %rbp, %rbp
16741 xorq %r8, %r8
16742 xorq %r9, %r9
16743 - xorq %r10, %r9
16744 + xorq %r10, %r10
16745 xorq %r11, %r11
16746 xorq %r12, %r12
16747 xorq %r13, %r13
16748 xorq %r14, %r14
16749 xorq %r15, %r15
16750
16751 + pax_force_retaddr 0, 1
16752 ret
16753
16754 1:
16755 diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
16756 index cf0ef98..e3f780b 100644
16757 --- a/arch/x86/kernel/setup.c
16758 +++ b/arch/x86/kernel/setup.c
16759 @@ -447,7 +447,7 @@ static void __init parse_setup_data(void)
16760
16761 switch (data->type) {
16762 case SETUP_E820_EXT:
16763 - parse_e820_ext(data);
16764 + parse_e820_ext((struct setup_data __force_kernel *)data);
16765 break;
16766 case SETUP_DTB:
16767 add_dtb(pa_data);
16768 @@ -650,7 +650,7 @@ static void __init trim_bios_range(void)
16769 * area (640->1Mb) as ram even though it is not.
16770 * take them out.
16771 */
16772 - e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
16773 + e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
16774 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
16775 }
16776
16777 @@ -773,14 +773,14 @@ void __init setup_arch(char **cmdline_p)
16778
16779 if (!boot_params.hdr.root_flags)
16780 root_mountflags &= ~MS_RDONLY;
16781 - init_mm.start_code = (unsigned long) _text;
16782 - init_mm.end_code = (unsigned long) _etext;
16783 + init_mm.start_code = ktla_ktva((unsigned long) _text);
16784 + init_mm.end_code = ktla_ktva((unsigned long) _etext);
16785 init_mm.end_data = (unsigned long) _edata;
16786 init_mm.brk = _brk_end;
16787
16788 - code_resource.start = virt_to_phys(_text);
16789 - code_resource.end = virt_to_phys(_etext)-1;
16790 - data_resource.start = virt_to_phys(_etext);
16791 + code_resource.start = virt_to_phys(ktla_ktva(_text));
16792 + code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
16793 + data_resource.start = virt_to_phys(_sdata);
16794 data_resource.end = virt_to_phys(_edata)-1;
16795 bss_resource.start = virt_to_phys(&__bss_start);
16796 bss_resource.end = virt_to_phys(&__bss_stop)-1;
16797 diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
16798 index 71f4727..16dc9f7 100644
16799 --- a/arch/x86/kernel/setup_percpu.c
16800 +++ b/arch/x86/kernel/setup_percpu.c
16801 @@ -21,19 +21,17 @@
16802 #include <asm/cpu.h>
16803 #include <asm/stackprotector.h>
16804
16805 -DEFINE_PER_CPU(int, cpu_number);
16806 +#ifdef CONFIG_SMP
16807 +DEFINE_PER_CPU(unsigned int, cpu_number);
16808 EXPORT_PER_CPU_SYMBOL(cpu_number);
16809 +#endif
16810
16811 -#ifdef CONFIG_X86_64
16812 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
16813 -#else
16814 -#define BOOT_PERCPU_OFFSET 0
16815 -#endif
16816
16817 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
16818 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
16819
16820 -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
16821 +unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
16822 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
16823 };
16824 EXPORT_SYMBOL(__per_cpu_offset);
16825 @@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
16826 {
16827 #ifdef CONFIG_X86_32
16828 struct desc_struct gdt;
16829 + unsigned long base = per_cpu_offset(cpu);
16830
16831 - pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
16832 - 0x2 | DESCTYPE_S, 0x8);
16833 - gdt.s = 1;
16834 + pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
16835 + 0x83 | DESCTYPE_S, 0xC);
16836 write_gdt_entry(get_cpu_gdt_table(cpu),
16837 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
16838 #endif
16839 @@ -207,6 +205,11 @@ void __init setup_per_cpu_areas(void)
16840 /* alrighty, percpu areas up and running */
16841 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
16842 for_each_possible_cpu(cpu) {
16843 +#ifdef CONFIG_CC_STACKPROTECTOR
16844 +#ifdef CONFIG_X86_32
16845 + unsigned long canary = per_cpu(stack_canary.canary, cpu);
16846 +#endif
16847 +#endif
16848 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
16849 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
16850 per_cpu(cpu_number, cpu) = cpu;
16851 @@ -247,6 +250,12 @@ void __init setup_per_cpu_areas(void)
16852 */
16853 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
16854 #endif
16855 +#ifdef CONFIG_CC_STACKPROTECTOR
16856 +#ifdef CONFIG_X86_32
16857 + if (!cpu)
16858 + per_cpu(stack_canary.canary, cpu) = canary;
16859 +#endif
16860 +#endif
16861 /*
16862 * Up to this point, the boot CPU has been using .init.data
16863 * area. Reload any changed state for the boot CPU.
16864 diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
16865 index 54ddaeb2..22c3bdc 100644
16866 --- a/arch/x86/kernel/signal.c
16867 +++ b/arch/x86/kernel/signal.c
16868 @@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsigned long sp)
16869 * Align the stack pointer according to the i386 ABI,
16870 * i.e. so that on function entry ((sp + 4) & 15) == 0.
16871 */
16872 - sp = ((sp + 4) & -16ul) - 4;
16873 + sp = ((sp - 12) & -16ul) - 4;
16874 #else /* !CONFIG_X86_32 */
16875 sp = round_down(sp, 16) - 8;
16876 #endif
16877 @@ -249,11 +249,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
16878 * Return an always-bogus address instead so we will die with SIGSEGV.
16879 */
16880 if (onsigstack && !likely(on_sig_stack(sp)))
16881 - return (void __user *)-1L;
16882 + return (__force void __user *)-1L;
16883
16884 /* save i387 state */
16885 if (used_math() && save_i387_xstate(*fpstate) < 0)
16886 - return (void __user *)-1L;
16887 + return (__force void __user *)-1L;
16888
16889 return (void __user *)sp;
16890 }
16891 @@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
16892 }
16893
16894 if (current->mm->context.vdso)
16895 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
16896 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
16897 else
16898 - restorer = &frame->retcode;
16899 + restorer = (void __user *)&frame->retcode;
16900 if (ka->sa.sa_flags & SA_RESTORER)
16901 restorer = ka->sa.sa_restorer;
16902
16903 @@ -324,7 +324,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
16904 * reasons and because gdb uses it as a signature to notice
16905 * signal handler stack frames.
16906 */
16907 - err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
16908 + err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
16909
16910 if (err)
16911 return -EFAULT;
16912 @@ -378,7 +378,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
16913 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
16914
16915 /* Set up to return from userspace. */
16916 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
16917 + if (current->mm->context.vdso)
16918 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
16919 + else
16920 + restorer = (void __user *)&frame->retcode;
16921 if (ka->sa.sa_flags & SA_RESTORER)
16922 restorer = ka->sa.sa_restorer;
16923 put_user_ex(restorer, &frame->pretcode);
16924 @@ -390,7 +393,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
16925 * reasons and because gdb uses it as a signature to notice
16926 * signal handler stack frames.
16927 */
16928 - put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
16929 + put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
16930 } put_user_catch(err);
16931
16932 if (err)
16933 @@ -769,7 +772,7 @@ static void do_signal(struct pt_regs *regs)
16934 * X86_32: vm86 regs switched out by assembly code before reaching
16935 * here, so testing against kernel CS suffices.
16936 */
16937 - if (!user_mode(regs))
16938 + if (!user_mode_novm(regs))
16939 return;
16940
16941 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
16942 diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
16943 index 9f548cb..caf76f7 100644
16944 --- a/arch/x86/kernel/smpboot.c
16945 +++ b/arch/x86/kernel/smpboot.c
16946 @@ -709,17 +709,20 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
16947 set_idle_for_cpu(cpu, c_idle.idle);
16948 do_rest:
16949 per_cpu(current_task, cpu) = c_idle.idle;
16950 + per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
16951 #ifdef CONFIG_X86_32
16952 /* Stack for startup_32 can be just as for start_secondary onwards */
16953 irq_ctx_init(cpu);
16954 #else
16955 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
16956 initial_gs = per_cpu_offset(cpu);
16957 - per_cpu(kernel_stack, cpu) =
16958 - (unsigned long)task_stack_page(c_idle.idle) -
16959 - KERNEL_STACK_OFFSET + THREAD_SIZE;
16960 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
16961 #endif
16962 +
16963 + pax_open_kernel();
16964 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
16965 + pax_close_kernel();
16966 +
16967 initial_code = (unsigned long)start_secondary;
16968 stack_start = c_idle.idle->thread.sp;
16969
16970 @@ -861,6 +864,12 @@ int __cpuinit native_cpu_up(unsigned int cpu)
16971
16972 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
16973
16974 +#ifdef CONFIG_PAX_PER_CPU_PGD
16975 + clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
16976 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
16977 + KERNEL_PGD_PTRS);
16978 +#endif
16979 +
16980 err = do_boot_cpu(apicid, cpu);
16981 if (err) {
16982 pr_debug("do_boot_cpu failed %d\n", err);
16983 diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
16984 index c346d11..d43b163 100644
16985 --- a/arch/x86/kernel/step.c
16986 +++ b/arch/x86/kernel/step.c
16987 @@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
16988 struct desc_struct *desc;
16989 unsigned long base;
16990
16991 - seg &= ~7UL;
16992 + seg >>= 3;
16993
16994 mutex_lock(&child->mm->context.lock);
16995 - if (unlikely((seg >> 3) >= child->mm->context.size))
16996 + if (unlikely(seg >= child->mm->context.size))
16997 addr = -1L; /* bogus selector, access would fault */
16998 else {
16999 desc = child->mm->context.ldt + seg;
17000 @@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
17001 addr += base;
17002 }
17003 mutex_unlock(&child->mm->context.lock);
17004 - }
17005 + } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
17006 + addr = ktla_ktva(addr);
17007
17008 return addr;
17009 }
17010 @@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
17011 unsigned char opcode[15];
17012 unsigned long addr = convert_ip_to_linear(child, regs);
17013
17014 + if (addr == -EINVAL)
17015 + return 0;
17016 +
17017 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
17018 for (i = 0; i < copied; i++) {
17019 switch (opcode[i]) {
17020 diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
17021 index 0b0cb5f..db6b9ed 100644
17022 --- a/arch/x86/kernel/sys_i386_32.c
17023 +++ b/arch/x86/kernel/sys_i386_32.c
17024 @@ -24,17 +24,224 @@
17025
17026 #include <asm/syscalls.h>
17027
17028 -/*
17029 - * Do a system call from kernel instead of calling sys_execve so we
17030 - * end up with proper pt_regs.
17031 - */
17032 -int kernel_execve(const char *filename,
17033 - const char *const argv[],
17034 - const char *const envp[])
17035 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
17036 {
17037 - long __res;
17038 - asm volatile ("int $0x80"
17039 - : "=a" (__res)
17040 - : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
17041 - return __res;
17042 + unsigned long pax_task_size = TASK_SIZE;
17043 +
17044 +#ifdef CONFIG_PAX_SEGMEXEC
17045 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
17046 + pax_task_size = SEGMEXEC_TASK_SIZE;
17047 +#endif
17048 +
17049 + if (len > pax_task_size || addr > pax_task_size - len)
17050 + return -EINVAL;
17051 +
17052 + return 0;
17053 +}
17054 +
17055 +unsigned long
17056 +arch_get_unmapped_area(struct file *filp, unsigned long addr,
17057 + unsigned long len, unsigned long pgoff, unsigned long flags)
17058 +{
17059 + struct mm_struct *mm = current->mm;
17060 + struct vm_area_struct *vma;
17061 + unsigned long start_addr, pax_task_size = TASK_SIZE;
17062 +
17063 +#ifdef CONFIG_PAX_SEGMEXEC
17064 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
17065 + pax_task_size = SEGMEXEC_TASK_SIZE;
17066 +#endif
17067 +
17068 + pax_task_size -= PAGE_SIZE;
17069 +
17070 + if (len > pax_task_size)
17071 + return -ENOMEM;
17072 +
17073 + if (flags & MAP_FIXED)
17074 + return addr;
17075 +
17076 +#ifdef CONFIG_PAX_RANDMMAP
17077 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17078 +#endif
17079 +
17080 + if (addr) {
17081 + addr = PAGE_ALIGN(addr);
17082 + if (pax_task_size - len >= addr) {
17083 + vma = find_vma(mm, addr);
17084 + if (check_heap_stack_gap(vma, addr, len))
17085 + return addr;
17086 + }
17087 + }
17088 + if (len > mm->cached_hole_size) {
17089 + start_addr = addr = mm->free_area_cache;
17090 + } else {
17091 + start_addr = addr = mm->mmap_base;
17092 + mm->cached_hole_size = 0;
17093 + }
17094 +
17095 +#ifdef CONFIG_PAX_PAGEEXEC
17096 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
17097 + start_addr = 0x00110000UL;
17098 +
17099 +#ifdef CONFIG_PAX_RANDMMAP
17100 + if (mm->pax_flags & MF_PAX_RANDMMAP)
17101 + start_addr += mm->delta_mmap & 0x03FFF000UL;
17102 +#endif
17103 +
17104 + if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
17105 + start_addr = addr = mm->mmap_base;
17106 + else
17107 + addr = start_addr;
17108 + }
17109 +#endif
17110 +
17111 +full_search:
17112 + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
17113 + /* At this point: (!vma || addr < vma->vm_end). */
17114 + if (pax_task_size - len < addr) {
17115 + /*
17116 + * Start a new search - just in case we missed
17117 + * some holes.
17118 + */
17119 + if (start_addr != mm->mmap_base) {
17120 + start_addr = addr = mm->mmap_base;
17121 + mm->cached_hole_size = 0;
17122 + goto full_search;
17123 + }
17124 + return -ENOMEM;
17125 + }
17126 + if (check_heap_stack_gap(vma, addr, len))
17127 + break;
17128 + if (addr + mm->cached_hole_size < vma->vm_start)
17129 + mm->cached_hole_size = vma->vm_start - addr;
17130 + addr = vma->vm_end;
17131 + if (mm->start_brk <= addr && addr < mm->mmap_base) {
17132 + start_addr = addr = mm->mmap_base;
17133 + mm->cached_hole_size = 0;
17134 + goto full_search;
17135 + }
17136 + }
17137 +
17138 + /*
17139 + * Remember the place where we stopped the search:
17140 + */
17141 + mm->free_area_cache = addr + len;
17142 + return addr;
17143 +}
17144 +
17145 +unsigned long
17146 +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17147 + const unsigned long len, const unsigned long pgoff,
17148 + const unsigned long flags)
17149 +{
17150 + struct vm_area_struct *vma;
17151 + struct mm_struct *mm = current->mm;
17152 + unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
17153 +
17154 +#ifdef CONFIG_PAX_SEGMEXEC
17155 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
17156 + pax_task_size = SEGMEXEC_TASK_SIZE;
17157 +#endif
17158 +
17159 + pax_task_size -= PAGE_SIZE;
17160 +
17161 + /* requested length too big for entire address space */
17162 + if (len > pax_task_size)
17163 + return -ENOMEM;
17164 +
17165 + if (flags & MAP_FIXED)
17166 + return addr;
17167 +
17168 +#ifdef CONFIG_PAX_PAGEEXEC
17169 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
17170 + goto bottomup;
17171 +#endif
17172 +
17173 +#ifdef CONFIG_PAX_RANDMMAP
17174 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17175 +#endif
17176 +
17177 + /* requesting a specific address */
17178 + if (addr) {
17179 + addr = PAGE_ALIGN(addr);
17180 + if (pax_task_size - len >= addr) {
17181 + vma = find_vma(mm, addr);
17182 + if (check_heap_stack_gap(vma, addr, len))
17183 + return addr;
17184 + }
17185 + }
17186 +
17187 + /* check if free_area_cache is useful for us */
17188 + if (len <= mm->cached_hole_size) {
17189 + mm->cached_hole_size = 0;
17190 + mm->free_area_cache = mm->mmap_base;
17191 + }
17192 +
17193 + /* either no address requested or can't fit in requested address hole */
17194 + addr = mm->free_area_cache;
17195 +
17196 + /* make sure it can fit in the remaining address space */
17197 + if (addr > len) {
17198 + vma = find_vma(mm, addr-len);
17199 + if (check_heap_stack_gap(vma, addr - len, len))
17200 + /* remember the address as a hint for next time */
17201 + return (mm->free_area_cache = addr-len);
17202 + }
17203 +
17204 + if (mm->mmap_base < len)
17205 + goto bottomup;
17206 +
17207 + addr = mm->mmap_base-len;
17208 +
17209 + do {
17210 + /*
17211 + * Lookup failure means no vma is above this address,
17212 + * else if new region fits below vma->vm_start,
17213 + * return with success:
17214 + */
17215 + vma = find_vma(mm, addr);
17216 + if (check_heap_stack_gap(vma, addr, len))
17217 + /* remember the address as a hint for next time */
17218 + return (mm->free_area_cache = addr);
17219 +
17220 + /* remember the largest hole we saw so far */
17221 + if (addr + mm->cached_hole_size < vma->vm_start)
17222 + mm->cached_hole_size = vma->vm_start - addr;
17223 +
17224 + /* try just below the current vma->vm_start */
17225 + addr = skip_heap_stack_gap(vma, len);
17226 + } while (!IS_ERR_VALUE(addr));
17227 +
17228 +bottomup:
17229 + /*
17230 + * A failed mmap() very likely causes application failure,
17231 + * so fall back to the bottom-up function here. This scenario
17232 + * can happen with large stack limits and large mmap()
17233 + * allocations.
17234 + */
17235 +
17236 +#ifdef CONFIG_PAX_SEGMEXEC
17237 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
17238 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
17239 + else
17240 +#endif
17241 +
17242 + mm->mmap_base = TASK_UNMAPPED_BASE;
17243 +
17244 +#ifdef CONFIG_PAX_RANDMMAP
17245 + if (mm->pax_flags & MF_PAX_RANDMMAP)
17246 + mm->mmap_base += mm->delta_mmap;
17247 +#endif
17248 +
17249 + mm->free_area_cache = mm->mmap_base;
17250 + mm->cached_hole_size = ~0UL;
17251 + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
17252 + /*
17253 + * Restore the topdown base:
17254 + */
17255 + mm->mmap_base = base;
17256 + mm->free_area_cache = base;
17257 + mm->cached_hole_size = ~0UL;
17258 +
17259 + return addr;
17260 }
17261 diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
17262 index 0514890..3dbebce 100644
17263 --- a/arch/x86/kernel/sys_x86_64.c
17264 +++ b/arch/x86/kernel/sys_x86_64.c
17265 @@ -95,8 +95,8 @@ out:
17266 return error;
17267 }
17268
17269 -static void find_start_end(unsigned long flags, unsigned long *begin,
17270 - unsigned long *end)
17271 +static void find_start_end(struct mm_struct *mm, unsigned long flags,
17272 + unsigned long *begin, unsigned long *end)
17273 {
17274 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
17275 unsigned long new_begin;
17276 @@ -115,7 +115,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
17277 *begin = new_begin;
17278 }
17279 } else {
17280 - *begin = TASK_UNMAPPED_BASE;
17281 + *begin = mm->mmap_base;
17282 *end = TASK_SIZE;
17283 }
17284 }
17285 @@ -132,16 +132,19 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
17286 if (flags & MAP_FIXED)
17287 return addr;
17288
17289 - find_start_end(flags, &begin, &end);
17290 + find_start_end(mm, flags, &begin, &end);
17291
17292 if (len > end)
17293 return -ENOMEM;
17294
17295 +#ifdef CONFIG_PAX_RANDMMAP
17296 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17297 +#endif
17298 +
17299 if (addr) {
17300 addr = PAGE_ALIGN(addr);
17301 vma = find_vma(mm, addr);
17302 - if (end - len >= addr &&
17303 - (!vma || addr + len <= vma->vm_start))
17304 + if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
17305 return addr;
17306 }
17307 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
17308 @@ -172,7 +175,7 @@ full_search:
17309 }
17310 return -ENOMEM;
17311 }
17312 - if (!vma || addr + len <= vma->vm_start) {
17313 + if (check_heap_stack_gap(vma, addr, len)) {
17314 /*
17315 * Remember the place where we stopped the search:
17316 */
17317 @@ -195,7 +198,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17318 {
17319 struct vm_area_struct *vma;
17320 struct mm_struct *mm = current->mm;
17321 - unsigned long addr = addr0;
17322 + unsigned long base = mm->mmap_base, addr = addr0;
17323
17324 /* requested length too big for entire address space */
17325 if (len > TASK_SIZE)
17326 @@ -208,13 +211,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17327 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
17328 goto bottomup;
17329
17330 +#ifdef CONFIG_PAX_RANDMMAP
17331 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17332 +#endif
17333 +
17334 /* requesting a specific address */
17335 if (addr) {
17336 addr = PAGE_ALIGN(addr);
17337 - vma = find_vma(mm, addr);
17338 - if (TASK_SIZE - len >= addr &&
17339 - (!vma || addr + len <= vma->vm_start))
17340 - return addr;
17341 + if (TASK_SIZE - len >= addr) {
17342 + vma = find_vma(mm, addr);
17343 + if (check_heap_stack_gap(vma, addr, len))
17344 + return addr;
17345 + }
17346 }
17347
17348 /* check if free_area_cache is useful for us */
17349 @@ -232,7 +240,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17350 ALIGN_TOPDOWN);
17351
17352 vma = find_vma(mm, tmp_addr);
17353 - if (!vma || tmp_addr + len <= vma->vm_start)
17354 + if (check_heap_stack_gap(vma, tmp_addr, len))
17355 /* remember the address as a hint for next time */
17356 return mm->free_area_cache = tmp_addr;
17357 }
17358 @@ -251,7 +259,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17359 * return with success:
17360 */
17361 vma = find_vma(mm, addr);
17362 - if (!vma || addr+len <= vma->vm_start)
17363 + if (check_heap_stack_gap(vma, addr, len))
17364 /* remember the address as a hint for next time */
17365 return mm->free_area_cache = addr;
17366
17367 @@ -260,8 +268,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17368 mm->cached_hole_size = vma->vm_start - addr;
17369
17370 /* try just below the current vma->vm_start */
17371 - addr = vma->vm_start-len;
17372 - } while (len < vma->vm_start);
17373 + addr = skip_heap_stack_gap(vma, len);
17374 + } while (!IS_ERR_VALUE(addr));
17375
17376 bottomup:
17377 /*
17378 @@ -270,13 +278,21 @@ bottomup:
17379 * can happen with large stack limits and large mmap()
17380 * allocations.
17381 */
17382 + mm->mmap_base = TASK_UNMAPPED_BASE;
17383 +
17384 +#ifdef CONFIG_PAX_RANDMMAP
17385 + if (mm->pax_flags & MF_PAX_RANDMMAP)
17386 + mm->mmap_base += mm->delta_mmap;
17387 +#endif
17388 +
17389 + mm->free_area_cache = mm->mmap_base;
17390 mm->cached_hole_size = ~0UL;
17391 - mm->free_area_cache = TASK_UNMAPPED_BASE;
17392 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
17393 /*
17394 * Restore the topdown base:
17395 */
17396 - mm->free_area_cache = mm->mmap_base;
17397 + mm->mmap_base = base;
17398 + mm->free_area_cache = base;
17399 mm->cached_hole_size = ~0UL;
17400
17401 return addr;
17402 diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
17403 index 9a0e312..e6f66f2 100644
17404 --- a/arch/x86/kernel/syscall_table_32.S
17405 +++ b/arch/x86/kernel/syscall_table_32.S
17406 @@ -1,3 +1,4 @@
17407 +.section .rodata,"a",@progbits
17408 ENTRY(sys_call_table)
17409 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
17410 .long sys_exit
17411 diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
17412 index e2410e2..4fe3fbc 100644
17413 --- a/arch/x86/kernel/tboot.c
17414 +++ b/arch/x86/kernel/tboot.c
17415 @@ -219,7 +219,7 @@ static int tboot_setup_sleep(void)
17416
17417 void tboot_shutdown(u32 shutdown_type)
17418 {
17419 - void (*shutdown)(void);
17420 + void (* __noreturn shutdown)(void);
17421
17422 if (!tboot_enabled())
17423 return;
17424 @@ -241,7 +241,7 @@ void tboot_shutdown(u32 shutdown_type)
17425
17426 switch_to_tboot_pt();
17427
17428 - shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
17429 + shutdown = (void *)tboot->shutdown_entry;
17430 shutdown();
17431
17432 /* should not reach here */
17433 @@ -298,7 +298,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
17434 tboot_shutdown(acpi_shutdown_map[sleep_state]);
17435 }
17436
17437 -static atomic_t ap_wfs_count;
17438 +static atomic_unchecked_t ap_wfs_count;
17439
17440 static int tboot_wait_for_aps(int num_aps)
17441 {
17442 @@ -322,9 +322,9 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
17443 {
17444 switch (action) {
17445 case CPU_DYING:
17446 - atomic_inc(&ap_wfs_count);
17447 + atomic_inc_unchecked(&ap_wfs_count);
17448 if (num_online_cpus() == 1)
17449 - if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
17450 + if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
17451 return NOTIFY_BAD;
17452 break;
17453 }
17454 @@ -343,7 +343,7 @@ static __init int tboot_late_init(void)
17455
17456 tboot_create_trampoline();
17457
17458 - atomic_set(&ap_wfs_count, 0);
17459 + atomic_set_unchecked(&ap_wfs_count, 0);
17460 register_hotcpu_notifier(&tboot_cpu_notifier);
17461 return 0;
17462 }
17463 diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
17464 index dd5fbf4..b7f2232 100644
17465 --- a/arch/x86/kernel/time.c
17466 +++ b/arch/x86/kernel/time.c
17467 @@ -31,9 +31,9 @@ unsigned long profile_pc(struct pt_regs *regs)
17468 {
17469 unsigned long pc = instruction_pointer(regs);
17470
17471 - if (!user_mode_vm(regs) && in_lock_functions(pc)) {
17472 + if (!user_mode(regs) && in_lock_functions(pc)) {
17473 #ifdef CONFIG_FRAME_POINTER
17474 - return *(unsigned long *)(regs->bp + sizeof(long));
17475 + return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
17476 #else
17477 unsigned long *sp =
17478 (unsigned long *)kernel_stack_pointer(regs);
17479 @@ -42,11 +42,17 @@ unsigned long profile_pc(struct pt_regs *regs)
17480 * or above a saved flags. Eflags has bits 22-31 zero,
17481 * kernel addresses don't.
17482 */
17483 +
17484 +#ifdef CONFIG_PAX_KERNEXEC
17485 + return ktla_ktva(sp[0]);
17486 +#else
17487 if (sp[0] >> 22)
17488 return sp[0];
17489 if (sp[1] >> 22)
17490 return sp[1];
17491 #endif
17492 +
17493 +#endif
17494 }
17495 return pc;
17496 }
17497 diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
17498 index 6bb7b85..dd853e1 100644
17499 --- a/arch/x86/kernel/tls.c
17500 +++ b/arch/x86/kernel/tls.c
17501 @@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
17502 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
17503 return -EINVAL;
17504
17505 +#ifdef CONFIG_PAX_SEGMEXEC
17506 + if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
17507 + return -EINVAL;
17508 +#endif
17509 +
17510 set_tls_desc(p, idx, &info, 1);
17511
17512 return 0;
17513 diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
17514 index 451c0a7..e57f551 100644
17515 --- a/arch/x86/kernel/trampoline_32.S
17516 +++ b/arch/x86/kernel/trampoline_32.S
17517 @@ -32,6 +32,12 @@
17518 #include <asm/segment.h>
17519 #include <asm/page_types.h>
17520
17521 +#ifdef CONFIG_PAX_KERNEXEC
17522 +#define ta(X) (X)
17523 +#else
17524 +#define ta(X) ((X) - __PAGE_OFFSET)
17525 +#endif
17526 +
17527 #ifdef CONFIG_SMP
17528
17529 .section ".x86_trampoline","a"
17530 @@ -62,7 +68,7 @@ r_base = .
17531 inc %ax # protected mode (PE) bit
17532 lmsw %ax # into protected mode
17533 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
17534 - ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
17535 + ljmpl $__BOOT_CS, $ta(startup_32_smp)
17536
17537 # These need to be in the same 64K segment as the above;
17538 # hence we don't use the boot_gdt_descr defined in head.S
17539 diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
17540 index 09ff517..df19fbff 100644
17541 --- a/arch/x86/kernel/trampoline_64.S
17542 +++ b/arch/x86/kernel/trampoline_64.S
17543 @@ -90,7 +90,7 @@ startup_32:
17544 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
17545 movl %eax, %ds
17546
17547 - movl $X86_CR4_PAE, %eax
17548 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
17549 movl %eax, %cr4 # Enable PAE mode
17550
17551 # Setup trampoline 4 level pagetables
17552 @@ -138,7 +138,7 @@ tidt:
17553 # so the kernel can live anywhere
17554 .balign 4
17555 tgdt:
17556 - .short tgdt_end - tgdt # gdt limit
17557 + .short tgdt_end - tgdt - 1 # gdt limit
17558 .long tgdt - r_base
17559 .short 0
17560 .quad 0x00cf9b000000ffff # __KERNEL32_CS
17561 diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
17562 index a8e3eb8..c9dbd7d 100644
17563 --- a/arch/x86/kernel/traps.c
17564 +++ b/arch/x86/kernel/traps.c
17565 @@ -70,12 +70,6 @@ asmlinkage int system_call(void);
17566
17567 /* Do we ignore FPU interrupts ? */
17568 char ignore_fpu_irq;
17569 -
17570 -/*
17571 - * The IDT has to be page-aligned to simplify the Pentium
17572 - * F0 0F bug workaround.
17573 - */
17574 -gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
17575 #endif
17576
17577 DECLARE_BITMAP(used_vectors, NR_VECTORS);
17578 @@ -108,13 +102,13 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
17579 }
17580
17581 static void __kprobes
17582 -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
17583 +do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
17584 long error_code, siginfo_t *info)
17585 {
17586 struct task_struct *tsk = current;
17587
17588 #ifdef CONFIG_X86_32
17589 - if (regs->flags & X86_VM_MASK) {
17590 + if (v8086_mode(regs)) {
17591 /*
17592 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
17593 * On nmi (interrupt 2), do_trap should not be called.
17594 @@ -125,7 +119,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
17595 }
17596 #endif
17597
17598 - if (!user_mode(regs))
17599 + if (!user_mode_novm(regs))
17600 goto kernel_trap;
17601
17602 #ifdef CONFIG_X86_32
17603 @@ -148,7 +142,7 @@ trap_signal:
17604 printk_ratelimit()) {
17605 printk(KERN_INFO
17606 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
17607 - tsk->comm, tsk->pid, str,
17608 + tsk->comm, task_pid_nr(tsk), str,
17609 regs->ip, regs->sp, error_code);
17610 print_vma_addr(" in ", regs->ip);
17611 printk("\n");
17612 @@ -165,8 +159,20 @@ kernel_trap:
17613 if (!fixup_exception(regs)) {
17614 tsk->thread.error_code = error_code;
17615 tsk->thread.trap_no = trapnr;
17616 +
17617 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17618 + if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
17619 + str = "PAX: suspicious stack segment fault";
17620 +#endif
17621 +
17622 die(str, regs, error_code);
17623 }
17624 +
17625 +#ifdef CONFIG_PAX_REFCOUNT
17626 + if (trapnr == 4)
17627 + pax_report_refcount_overflow(regs);
17628 +#endif
17629 +
17630 return;
17631
17632 #ifdef CONFIG_X86_32
17633 @@ -255,14 +261,30 @@ do_general_protection(struct pt_regs *regs, long error_code)
17634 conditional_sti(regs);
17635
17636 #ifdef CONFIG_X86_32
17637 - if (regs->flags & X86_VM_MASK)
17638 + if (v8086_mode(regs))
17639 goto gp_in_vm86;
17640 #endif
17641
17642 tsk = current;
17643 - if (!user_mode(regs))
17644 + if (!user_mode_novm(regs))
17645 goto gp_in_kernel;
17646
17647 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
17648 + if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
17649 + struct mm_struct *mm = tsk->mm;
17650 + unsigned long limit;
17651 +
17652 + down_write(&mm->mmap_sem);
17653 + limit = mm->context.user_cs_limit;
17654 + if (limit < TASK_SIZE) {
17655 + track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
17656 + up_write(&mm->mmap_sem);
17657 + return;
17658 + }
17659 + up_write(&mm->mmap_sem);
17660 + }
17661 +#endif
17662 +
17663 tsk->thread.error_code = error_code;
17664 tsk->thread.trap_no = 13;
17665
17666 @@ -295,6 +317,13 @@ gp_in_kernel:
17667 if (notify_die(DIE_GPF, "general protection fault", regs,
17668 error_code, 13, SIGSEGV) == NOTIFY_STOP)
17669 return;
17670 +
17671 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17672 + if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
17673 + die("PAX: suspicious general protection fault", regs, error_code);
17674 + else
17675 +#endif
17676 +
17677 die("general protection fault", regs, error_code);
17678 }
17679
17680 @@ -414,7 +443,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
17681 /* It's safe to allow irq's after DR6 has been saved */
17682 preempt_conditional_sti(regs);
17683
17684 - if (regs->flags & X86_VM_MASK) {
17685 + if (v8086_mode(regs)) {
17686 handle_vm86_trap((struct kernel_vm86_regs *) regs,
17687 error_code, 1);
17688 preempt_conditional_cli(regs);
17689 @@ -428,7 +457,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
17690 * We already checked v86 mode above, so we can check for kernel mode
17691 * by just checking the CPL of CS.
17692 */
17693 - if ((dr6 & DR_STEP) && !user_mode(regs)) {
17694 + if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
17695 tsk->thread.debugreg6 &= ~DR_STEP;
17696 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
17697 regs->flags &= ~X86_EFLAGS_TF;
17698 @@ -457,7 +486,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
17699 return;
17700 conditional_sti(regs);
17701
17702 - if (!user_mode_vm(regs))
17703 + if (!user_mode(regs))
17704 {
17705 if (!fixup_exception(regs)) {
17706 task->thread.error_code = error_code;
17707 @@ -568,7 +597,7 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
17708 void __math_state_restore(void)
17709 {
17710 struct thread_info *thread = current_thread_info();
17711 - struct task_struct *tsk = thread->task;
17712 + struct task_struct *tsk = current;
17713
17714 /*
17715 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
17716 @@ -595,8 +624,7 @@ void __math_state_restore(void)
17717 */
17718 asmlinkage void math_state_restore(void)
17719 {
17720 - struct thread_info *thread = current_thread_info();
17721 - struct task_struct *tsk = thread->task;
17722 + struct task_struct *tsk = current;
17723
17724 if (!tsk_used_math(tsk)) {
17725 local_irq_enable();
17726 diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
17727 index b9242ba..50c5edd 100644
17728 --- a/arch/x86/kernel/verify_cpu.S
17729 +++ b/arch/x86/kernel/verify_cpu.S
17730 @@ -20,6 +20,7 @@
17731 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
17732 * arch/x86/kernel/trampoline_64.S: secondary processor verification
17733 * arch/x86/kernel/head_32.S: processor startup
17734 + * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
17735 *
17736 * verify_cpu, returns the status of longmode and SSE in register %eax.
17737 * 0: Success 1: Failure
17738 diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
17739 index 863f875..4307295 100644
17740 --- a/arch/x86/kernel/vm86_32.c
17741 +++ b/arch/x86/kernel/vm86_32.c
17742 @@ -41,6 +41,7 @@
17743 #include <linux/ptrace.h>
17744 #include <linux/audit.h>
17745 #include <linux/stddef.h>
17746 +#include <linux/grsecurity.h>
17747
17748 #include <asm/uaccess.h>
17749 #include <asm/io.h>
17750 @@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
17751 do_exit(SIGSEGV);
17752 }
17753
17754 - tss = &per_cpu(init_tss, get_cpu());
17755 + tss = init_tss + get_cpu();
17756 current->thread.sp0 = current->thread.saved_sp0;
17757 current->thread.sysenter_cs = __KERNEL_CS;
17758 load_sp0(tss, &current->thread);
17759 @@ -208,6 +209,13 @@ int sys_vm86old(struct vm86_struct __user *v86, struct pt_regs *regs)
17760 struct task_struct *tsk;
17761 int tmp, ret = -EPERM;
17762
17763 +#ifdef CONFIG_GRKERNSEC_VM86
17764 + if (!capable(CAP_SYS_RAWIO)) {
17765 + gr_handle_vm86();
17766 + goto out;
17767 + }
17768 +#endif
17769 +
17770 tsk = current;
17771 if (tsk->thread.saved_sp0)
17772 goto out;
17773 @@ -238,6 +246,14 @@ int sys_vm86(unsigned long cmd, unsigned long arg, struct pt_regs *regs)
17774 int tmp, ret;
17775 struct vm86plus_struct __user *v86;
17776
17777 +#ifdef CONFIG_GRKERNSEC_VM86
17778 + if (!capable(CAP_SYS_RAWIO)) {
17779 + gr_handle_vm86();
17780 + ret = -EPERM;
17781 + goto out;
17782 + }
17783 +#endif
17784 +
17785 tsk = current;
17786 switch (cmd) {
17787 case VM86_REQUEST_IRQ:
17788 @@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
17789 tsk->thread.saved_fs = info->regs32->fs;
17790 tsk->thread.saved_gs = get_user_gs(info->regs32);
17791
17792 - tss = &per_cpu(init_tss, get_cpu());
17793 + tss = init_tss + get_cpu();
17794 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
17795 if (cpu_has_sep)
17796 tsk->thread.sysenter_cs = 0;
17797 @@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
17798 goto cannot_handle;
17799 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
17800 goto cannot_handle;
17801 - intr_ptr = (unsigned long __user *) (i << 2);
17802 + intr_ptr = (__force unsigned long __user *) (i << 2);
17803 if (get_user(segoffs, intr_ptr))
17804 goto cannot_handle;
17805 if ((segoffs >> 16) == BIOSSEG)
17806 diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
17807 index 0f703f1..9e15f64 100644
17808 --- a/arch/x86/kernel/vmlinux.lds.S
17809 +++ b/arch/x86/kernel/vmlinux.lds.S
17810 @@ -26,6 +26,13 @@
17811 #include <asm/page_types.h>
17812 #include <asm/cache.h>
17813 #include <asm/boot.h>
17814 +#include <asm/segment.h>
17815 +
17816 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17817 +#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
17818 +#else
17819 +#define __KERNEL_TEXT_OFFSET 0
17820 +#endif
17821
17822 #undef i386 /* in case the preprocessor is a 32bit one */
17823
17824 @@ -69,30 +76,43 @@ jiffies_64 = jiffies;
17825
17826 PHDRS {
17827 text PT_LOAD FLAGS(5); /* R_E */
17828 +#ifdef CONFIG_X86_32
17829 + module PT_LOAD FLAGS(5); /* R_E */
17830 +#endif
17831 +#ifdef CONFIG_XEN
17832 + rodata PT_LOAD FLAGS(5); /* R_E */
17833 +#else
17834 + rodata PT_LOAD FLAGS(4); /* R__ */
17835 +#endif
17836 data PT_LOAD FLAGS(6); /* RW_ */
17837 -#ifdef CONFIG_X86_64
17838 + init.begin PT_LOAD FLAGS(6); /* RW_ */
17839 #ifdef CONFIG_SMP
17840 percpu PT_LOAD FLAGS(6); /* RW_ */
17841 #endif
17842 + text.init PT_LOAD FLAGS(5); /* R_E */
17843 + text.exit PT_LOAD FLAGS(5); /* R_E */
17844 init PT_LOAD FLAGS(7); /* RWE */
17845 -#endif
17846 note PT_NOTE FLAGS(0); /* ___ */
17847 }
17848
17849 SECTIONS
17850 {
17851 #ifdef CONFIG_X86_32
17852 - . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
17853 - phys_startup_32 = startup_32 - LOAD_OFFSET;
17854 + . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
17855 #else
17856 - . = __START_KERNEL;
17857 - phys_startup_64 = startup_64 - LOAD_OFFSET;
17858 + . = __START_KERNEL;
17859 #endif
17860
17861 /* Text and read-only data */
17862 - .text : AT(ADDR(.text) - LOAD_OFFSET) {
17863 - _text = .;
17864 + .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
17865 /* bootstrapping code */
17866 +#ifdef CONFIG_X86_32
17867 + phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17868 +#else
17869 + phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17870 +#endif
17871 + __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17872 + _text = .;
17873 HEAD_TEXT
17874 #ifdef CONFIG_X86_32
17875 . = ALIGN(PAGE_SIZE);
17876 @@ -108,13 +128,47 @@ SECTIONS
17877 IRQENTRY_TEXT
17878 *(.fixup)
17879 *(.gnu.warning)
17880 - /* End of text section */
17881 - _etext = .;
17882 } :text = 0x9090
17883
17884 - NOTES :text :note
17885 + . += __KERNEL_TEXT_OFFSET;
17886
17887 - EXCEPTION_TABLE(16) :text = 0x9090
17888 +#ifdef CONFIG_X86_32
17889 + . = ALIGN(PAGE_SIZE);
17890 + .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
17891 +
17892 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
17893 + MODULES_EXEC_VADDR = .;
17894 + BYTE(0)
17895 + . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
17896 + . = ALIGN(HPAGE_SIZE);
17897 + MODULES_EXEC_END = . - 1;
17898 +#endif
17899 +
17900 + } :module
17901 +#endif
17902 +
17903 + .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
17904 + /* End of text section */
17905 + _etext = . - __KERNEL_TEXT_OFFSET;
17906 + }
17907 +
17908 +#ifdef CONFIG_X86_32
17909 + . = ALIGN(PAGE_SIZE);
17910 + .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
17911 + *(.idt)
17912 + . = ALIGN(PAGE_SIZE);
17913 + *(.empty_zero_page)
17914 + *(.initial_pg_fixmap)
17915 + *(.initial_pg_pmd)
17916 + *(.initial_page_table)
17917 + *(.swapper_pg_dir)
17918 + } :rodata
17919 +#endif
17920 +
17921 + . = ALIGN(PAGE_SIZE);
17922 + NOTES :rodata :note
17923 +
17924 + EXCEPTION_TABLE(16) :rodata
17925
17926 #if defined(CONFIG_DEBUG_RODATA)
17927 /* .text should occupy whole number of pages */
17928 @@ -126,16 +180,20 @@ SECTIONS
17929
17930 /* Data */
17931 .data : AT(ADDR(.data) - LOAD_OFFSET) {
17932 +
17933 +#ifdef CONFIG_PAX_KERNEXEC
17934 + . = ALIGN(HPAGE_SIZE);
17935 +#else
17936 + . = ALIGN(PAGE_SIZE);
17937 +#endif
17938 +
17939 /* Start of data section */
17940 _sdata = .;
17941
17942 /* init_task */
17943 INIT_TASK_DATA(THREAD_SIZE)
17944
17945 -#ifdef CONFIG_X86_32
17946 - /* 32 bit has nosave before _edata */
17947 NOSAVE_DATA
17948 -#endif
17949
17950 PAGE_ALIGNED_DATA(PAGE_SIZE)
17951
17952 @@ -176,12 +234,19 @@ SECTIONS
17953 #endif /* CONFIG_X86_64 */
17954
17955 /* Init code and data - will be freed after init */
17956 - . = ALIGN(PAGE_SIZE);
17957 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
17958 + BYTE(0)
17959 +
17960 +#ifdef CONFIG_PAX_KERNEXEC
17961 + . = ALIGN(HPAGE_SIZE);
17962 +#else
17963 + . = ALIGN(PAGE_SIZE);
17964 +#endif
17965 +
17966 __init_begin = .; /* paired with __init_end */
17967 - }
17968 + } :init.begin
17969
17970 -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
17971 +#ifdef CONFIG_SMP
17972 /*
17973 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
17974 * output PHDR, so the next output section - .init.text - should
17975 @@ -190,12 +255,27 @@ SECTIONS
17976 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
17977 #endif
17978
17979 - INIT_TEXT_SECTION(PAGE_SIZE)
17980 -#ifdef CONFIG_X86_64
17981 - :init
17982 -#endif
17983 + . = ALIGN(PAGE_SIZE);
17984 + init_begin = .;
17985 + .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
17986 + VMLINUX_SYMBOL(_sinittext) = .;
17987 + INIT_TEXT
17988 + VMLINUX_SYMBOL(_einittext) = .;
17989 + . = ALIGN(PAGE_SIZE);
17990 + } :text.init
17991
17992 - INIT_DATA_SECTION(16)
17993 + /*
17994 + * .exit.text is discard at runtime, not link time, to deal with
17995 + * references from .altinstructions and .eh_frame
17996 + */
17997 + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
17998 + EXIT_TEXT
17999 + . = ALIGN(16);
18000 + } :text.exit
18001 + . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
18002 +
18003 + . = ALIGN(PAGE_SIZE);
18004 + INIT_DATA_SECTION(16) :init
18005
18006 /*
18007 * Code and data for a variety of lowlevel trampolines, to be
18008 @@ -269,19 +349,12 @@ SECTIONS
18009 }
18010
18011 . = ALIGN(8);
18012 - /*
18013 - * .exit.text is discard at runtime, not link time, to deal with
18014 - * references from .altinstructions and .eh_frame
18015 - */
18016 - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
18017 - EXIT_TEXT
18018 - }
18019
18020 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
18021 EXIT_DATA
18022 }
18023
18024 -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
18025 +#ifndef CONFIG_SMP
18026 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
18027 #endif
18028
18029 @@ -300,16 +373,10 @@ SECTIONS
18030 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
18031 __smp_locks = .;
18032 *(.smp_locks)
18033 - . = ALIGN(PAGE_SIZE);
18034 __smp_locks_end = .;
18035 + . = ALIGN(PAGE_SIZE);
18036 }
18037
18038 -#ifdef CONFIG_X86_64
18039 - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
18040 - NOSAVE_DATA
18041 - }
18042 -#endif
18043 -
18044 /* BSS */
18045 . = ALIGN(PAGE_SIZE);
18046 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
18047 @@ -325,6 +392,7 @@ SECTIONS
18048 __brk_base = .;
18049 . += 64 * 1024; /* 64k alignment slop space */
18050 *(.brk_reservation) /* areas brk users have reserved */
18051 + . = ALIGN(HPAGE_SIZE);
18052 __brk_limit = .;
18053 }
18054
18055 @@ -351,13 +419,12 @@ SECTIONS
18056 * for the boot processor.
18057 */
18058 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
18059 -INIT_PER_CPU(gdt_page);
18060 INIT_PER_CPU(irq_stack_union);
18061
18062 /*
18063 * Build-time check on the image size:
18064 */
18065 -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
18066 +. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
18067 "kernel image bigger than KERNEL_IMAGE_SIZE");
18068
18069 #ifdef CONFIG_SMP
18070 diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
18071 index e4d4a22..47ee71f 100644
18072 --- a/arch/x86/kernel/vsyscall_64.c
18073 +++ b/arch/x86/kernel/vsyscall_64.c
18074 @@ -57,15 +57,13 @@ DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
18075 .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
18076 };
18077
18078 -static enum { EMULATE, NATIVE, NONE } vsyscall_mode = NATIVE;
18079 +static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
18080
18081 static int __init vsyscall_setup(char *str)
18082 {
18083 if (str) {
18084 if (!strcmp("emulate", str))
18085 vsyscall_mode = EMULATE;
18086 - else if (!strcmp("native", str))
18087 - vsyscall_mode = NATIVE;
18088 else if (!strcmp("none", str))
18089 vsyscall_mode = NONE;
18090 else
18091 @@ -178,7 +176,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
18092
18093 tsk = current;
18094 if (seccomp_mode(&tsk->seccomp))
18095 - do_exit(SIGKILL);
18096 + do_group_exit(SIGKILL);
18097
18098 switch (vsyscall_nr) {
18099 case 0:
18100 @@ -220,8 +218,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
18101 return true;
18102
18103 sigsegv:
18104 - force_sig(SIGSEGV, current);
18105 - return true;
18106 + do_group_exit(SIGKILL);
18107 }
18108
18109 /*
18110 @@ -274,10 +271,7 @@ void __init map_vsyscall(void)
18111 extern char __vvar_page;
18112 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
18113
18114 - __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
18115 - vsyscall_mode == NATIVE
18116 - ? PAGE_KERNEL_VSYSCALL
18117 - : PAGE_KERNEL_VVAR);
18118 + __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
18119 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
18120 (unsigned long)VSYSCALL_START);
18121
18122 diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
18123 index 9796c2f..f686fbf 100644
18124 --- a/arch/x86/kernel/x8664_ksyms_64.c
18125 +++ b/arch/x86/kernel/x8664_ksyms_64.c
18126 @@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
18127 EXPORT_SYMBOL(copy_user_generic_string);
18128 EXPORT_SYMBOL(copy_user_generic_unrolled);
18129 EXPORT_SYMBOL(__copy_user_nocache);
18130 -EXPORT_SYMBOL(_copy_from_user);
18131 -EXPORT_SYMBOL(_copy_to_user);
18132
18133 EXPORT_SYMBOL(copy_page);
18134 EXPORT_SYMBOL(clear_page);
18135 diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
18136 index a391134..d0b63b6e 100644
18137 --- a/arch/x86/kernel/xsave.c
18138 +++ b/arch/x86/kernel/xsave.c
18139 @@ -130,7 +130,7 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf,
18140 fx_sw_user->xstate_size > fx_sw_user->extended_size)
18141 return -EINVAL;
18142
18143 - err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
18144 + err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
18145 fx_sw_user->extended_size -
18146 FP_XSTATE_MAGIC2_SIZE));
18147 if (err)
18148 @@ -267,7 +267,7 @@ fx_only:
18149 * the other extended state.
18150 */
18151 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
18152 - return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
18153 + return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
18154 }
18155
18156 /*
18157 @@ -299,7 +299,7 @@ int restore_i387_xstate(void __user *buf)
18158 if (use_xsave())
18159 err = restore_user_xstate(buf);
18160 else
18161 - err = fxrstor_checking((__force struct i387_fxsave_struct *)
18162 + err = fxrstor_checking((struct i387_fxsave_struct __force_kernel *)
18163 buf);
18164 if (unlikely(err)) {
18165 /*
18166 diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
18167 index f1e3be1..588efc8 100644
18168 --- a/arch/x86/kvm/emulate.c
18169 +++ b/arch/x86/kvm/emulate.c
18170 @@ -249,6 +249,7 @@ struct gprefix {
18171
18172 #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
18173 do { \
18174 + unsigned long _tmp; \
18175 __asm__ __volatile__ ( \
18176 _PRE_EFLAGS("0", "4", "2") \
18177 _op _suffix " %"_x"3,%1; " \
18178 @@ -263,8 +264,6 @@ struct gprefix {
18179 /* Raw emulation: instruction has two explicit operands. */
18180 #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
18181 do { \
18182 - unsigned long _tmp; \
18183 - \
18184 switch ((ctxt)->dst.bytes) { \
18185 case 2: \
18186 ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
18187 @@ -280,7 +279,6 @@ struct gprefix {
18188
18189 #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
18190 do { \
18191 - unsigned long _tmp; \
18192 switch ((ctxt)->dst.bytes) { \
18193 case 1: \
18194 ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
18195 diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
18196 index 54abb40..a192606 100644
18197 --- a/arch/x86/kvm/lapic.c
18198 +++ b/arch/x86/kvm/lapic.c
18199 @@ -53,7 +53,7 @@
18200 #define APIC_BUS_CYCLE_NS 1
18201
18202 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
18203 -#define apic_debug(fmt, arg...)
18204 +#define apic_debug(fmt, arg...) do {} while (0)
18205
18206 #define APIC_LVT_NUM 6
18207 /* 14 is the version for Xeon and Pentium 8.4.8*/
18208 diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
18209 index f1b36cf..af8a124 100644
18210 --- a/arch/x86/kvm/mmu.c
18211 +++ b/arch/x86/kvm/mmu.c
18212 @@ -3555,7 +3555,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
18213
18214 pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
18215
18216 - invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter);
18217 + invlpg_counter = atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter);
18218
18219 /*
18220 * Assume that the pte write on a page table of the same type
18221 @@ -3587,7 +3587,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
18222 }
18223
18224 spin_lock(&vcpu->kvm->mmu_lock);
18225 - if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
18226 + if (atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
18227 gentry = 0;
18228 kvm_mmu_free_some_pages(vcpu);
18229 ++vcpu->kvm->stat.mmu_pte_write;
18230 diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
18231 index 9299410..ade2f9b 100644
18232 --- a/arch/x86/kvm/paging_tmpl.h
18233 +++ b/arch/x86/kvm/paging_tmpl.h
18234 @@ -197,7 +197,7 @@ retry_walk:
18235 if (unlikely(kvm_is_error_hva(host_addr)))
18236 goto error;
18237
18238 - ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
18239 + ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
18240 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
18241 goto error;
18242
18243 @@ -705,7 +705,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
18244 if (need_flush)
18245 kvm_flush_remote_tlbs(vcpu->kvm);
18246
18247 - atomic_inc(&vcpu->kvm->arch.invlpg_counter);
18248 + atomic_inc_unchecked(&vcpu->kvm->arch.invlpg_counter);
18249
18250 spin_unlock(&vcpu->kvm->mmu_lock);
18251
18252 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
18253 index e32243e..a6e6172 100644
18254 --- a/arch/x86/kvm/svm.c
18255 +++ b/arch/x86/kvm/svm.c
18256 @@ -3400,7 +3400,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
18257 int cpu = raw_smp_processor_id();
18258
18259 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
18260 +
18261 + pax_open_kernel();
18262 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
18263 + pax_close_kernel();
18264 +
18265 load_TR_desc();
18266 }
18267
18268 @@ -3778,6 +3782,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
18269 #endif
18270 #endif
18271
18272 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18273 + __set_fs(current_thread_info()->addr_limit);
18274 +#endif
18275 +
18276 reload_tss(vcpu);
18277
18278 local_irq_disable();
18279 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
18280 index 579a0b5..ed7bbf9 100644
18281 --- a/arch/x86/kvm/vmx.c
18282 +++ b/arch/x86/kvm/vmx.c
18283 @@ -1305,7 +1305,11 @@ static void reload_tss(void)
18284 struct desc_struct *descs;
18285
18286 descs = (void *)gdt->address;
18287 +
18288 + pax_open_kernel();
18289 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
18290 + pax_close_kernel();
18291 +
18292 load_TR_desc();
18293 }
18294
18295 @@ -2633,8 +2637,11 @@ static __init int hardware_setup(void)
18296 if (!cpu_has_vmx_flexpriority())
18297 flexpriority_enabled = 0;
18298
18299 - if (!cpu_has_vmx_tpr_shadow())
18300 - kvm_x86_ops->update_cr8_intercept = NULL;
18301 + if (!cpu_has_vmx_tpr_shadow()) {
18302 + pax_open_kernel();
18303 + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
18304 + pax_close_kernel();
18305 + }
18306
18307 if (enable_ept && !cpu_has_vmx_ept_2m_page())
18308 kvm_disable_largepages();
18309 @@ -3648,7 +3655,7 @@ static void vmx_set_constant_host_state(void)
18310 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
18311
18312 asm("mov $.Lkvm_vmx_return, %0" : "=r"(tmpl));
18313 - vmcs_writel(HOST_RIP, tmpl); /* 22.2.5 */
18314 + vmcs_writel(HOST_RIP, ktla_ktva(tmpl)); /* 22.2.5 */
18315
18316 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
18317 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
18318 @@ -6169,6 +6176,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
18319 "jmp .Lkvm_vmx_return \n\t"
18320 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
18321 ".Lkvm_vmx_return: "
18322 +
18323 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18324 + "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
18325 + ".Lkvm_vmx_return2: "
18326 +#endif
18327 +
18328 /* Save guest registers, load host registers, keep flags */
18329 "mov %0, %c[wordsize](%%"R"sp) \n\t"
18330 "pop %0 \n\t"
18331 @@ -6217,6 +6230,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
18332 #endif
18333 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
18334 [wordsize]"i"(sizeof(ulong))
18335 +
18336 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18337 + ,[cs]"i"(__KERNEL_CS)
18338 +#endif
18339 +
18340 : "cc", "memory"
18341 , R"ax", R"bx", R"di", R"si"
18342 #ifdef CONFIG_X86_64
18343 @@ -6245,7 +6263,16 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
18344 }
18345 }
18346
18347 - asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
18348 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
18349 +
18350 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18351 + loadsegment(fs, __KERNEL_PERCPU);
18352 +#endif
18353 +
18354 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18355 + __set_fs(current_thread_info()->addr_limit);
18356 +#endif
18357 +
18358 vmx->loaded_vmcs->launched = 1;
18359
18360 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
18361 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
18362 index 4c938da..4ddef65 100644
18363 --- a/arch/x86/kvm/x86.c
18364 +++ b/arch/x86/kvm/x86.c
18365 @@ -1345,8 +1345,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
18366 {
18367 struct kvm *kvm = vcpu->kvm;
18368 int lm = is_long_mode(vcpu);
18369 - u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
18370 - : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
18371 + u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
18372 + : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
18373 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
18374 : kvm->arch.xen_hvm_config.blob_size_32;
18375 u32 page_num = data & ~PAGE_MASK;
18376 @@ -2165,6 +2165,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
18377 if (n < msr_list.nmsrs)
18378 goto out;
18379 r = -EFAULT;
18380 + if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
18381 + goto out;
18382 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
18383 num_msrs_to_save * sizeof(u32)))
18384 goto out;
18385 @@ -2340,15 +2342,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
18386 struct kvm_cpuid2 *cpuid,
18387 struct kvm_cpuid_entry2 __user *entries)
18388 {
18389 - int r;
18390 + int r, i;
18391
18392 r = -E2BIG;
18393 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
18394 goto out;
18395 r = -EFAULT;
18396 - if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
18397 - cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
18398 + if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
18399 goto out;
18400 + for (i = 0; i < cpuid->nent; ++i) {
18401 + struct kvm_cpuid_entry2 cpuid_entry;
18402 + if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
18403 + goto out;
18404 + vcpu->arch.cpuid_entries[i] = cpuid_entry;
18405 + }
18406 vcpu->arch.cpuid_nent = cpuid->nent;
18407 kvm_apic_set_version(vcpu);
18408 kvm_x86_ops->cpuid_update(vcpu);
18409 @@ -2363,15 +2370,19 @@ static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
18410 struct kvm_cpuid2 *cpuid,
18411 struct kvm_cpuid_entry2 __user *entries)
18412 {
18413 - int r;
18414 + int r, i;
18415
18416 r = -E2BIG;
18417 if (cpuid->nent < vcpu->arch.cpuid_nent)
18418 goto out;
18419 r = -EFAULT;
18420 - if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
18421 - vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
18422 + if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
18423 goto out;
18424 + for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
18425 + struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
18426 + if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
18427 + goto out;
18428 + }
18429 return 0;
18430
18431 out:
18432 @@ -2746,7 +2757,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
18433 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
18434 struct kvm_interrupt *irq)
18435 {
18436 - if (irq->irq < 0 || irq->irq >= 256)
18437 + if (irq->irq >= 256)
18438 return -EINVAL;
18439 if (irqchip_in_kernel(vcpu->kvm))
18440 return -ENXIO;
18441 @@ -5162,7 +5173,7 @@ static void kvm_set_mmio_spte_mask(void)
18442 kvm_mmu_set_mmio_spte_mask(mask);
18443 }
18444
18445 -int kvm_arch_init(void *opaque)
18446 +int kvm_arch_init(const void *opaque)
18447 {
18448 int r;
18449 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
18450 diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
18451 index cf4603b..7cdde38 100644
18452 --- a/arch/x86/lguest/boot.c
18453 +++ b/arch/x86/lguest/boot.c
18454 @@ -1195,9 +1195,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
18455 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
18456 * Launcher to reboot us.
18457 */
18458 -static void lguest_restart(char *reason)
18459 +static __noreturn void lguest_restart(char *reason)
18460 {
18461 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
18462 + BUG();
18463 }
18464
18465 /*G:050
18466 diff --git a/arch/x86/lib/atomic64_32.c b/arch/x86/lib/atomic64_32.c
18467 index 042f682..c92afb6 100644
18468 --- a/arch/x86/lib/atomic64_32.c
18469 +++ b/arch/x86/lib/atomic64_32.c
18470 @@ -8,18 +8,30 @@
18471
18472 long long atomic64_read_cx8(long long, const atomic64_t *v);
18473 EXPORT_SYMBOL(atomic64_read_cx8);
18474 +long long atomic64_read_unchecked_cx8(long long, const atomic64_unchecked_t *v);
18475 +EXPORT_SYMBOL(atomic64_read_unchecked_cx8);
18476 long long atomic64_set_cx8(long long, const atomic64_t *v);
18477 EXPORT_SYMBOL(atomic64_set_cx8);
18478 +long long atomic64_set_unchecked_cx8(long long, const atomic64_unchecked_t *v);
18479 +EXPORT_SYMBOL(atomic64_set_unchecked_cx8);
18480 long long atomic64_xchg_cx8(long long, unsigned high);
18481 EXPORT_SYMBOL(atomic64_xchg_cx8);
18482 long long atomic64_add_return_cx8(long long a, atomic64_t *v);
18483 EXPORT_SYMBOL(atomic64_add_return_cx8);
18484 +long long atomic64_add_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
18485 +EXPORT_SYMBOL(atomic64_add_return_unchecked_cx8);
18486 long long atomic64_sub_return_cx8(long long a, atomic64_t *v);
18487 EXPORT_SYMBOL(atomic64_sub_return_cx8);
18488 +long long atomic64_sub_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
18489 +EXPORT_SYMBOL(atomic64_sub_return_unchecked_cx8);
18490 long long atomic64_inc_return_cx8(long long a, atomic64_t *v);
18491 EXPORT_SYMBOL(atomic64_inc_return_cx8);
18492 +long long atomic64_inc_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
18493 +EXPORT_SYMBOL(atomic64_inc_return_unchecked_cx8);
18494 long long atomic64_dec_return_cx8(long long a, atomic64_t *v);
18495 EXPORT_SYMBOL(atomic64_dec_return_cx8);
18496 +long long atomic64_dec_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
18497 +EXPORT_SYMBOL(atomic64_dec_return_unchecked_cx8);
18498 long long atomic64_dec_if_positive_cx8(atomic64_t *v);
18499 EXPORT_SYMBOL(atomic64_dec_if_positive_cx8);
18500 int atomic64_inc_not_zero_cx8(atomic64_t *v);
18501 @@ -30,26 +42,46 @@ EXPORT_SYMBOL(atomic64_add_unless_cx8);
18502 #ifndef CONFIG_X86_CMPXCHG64
18503 long long atomic64_read_386(long long, const atomic64_t *v);
18504 EXPORT_SYMBOL(atomic64_read_386);
18505 +long long atomic64_read_unchecked_386(long long, const atomic64_unchecked_t *v);
18506 +EXPORT_SYMBOL(atomic64_read_unchecked_386);
18507 long long atomic64_set_386(long long, const atomic64_t *v);
18508 EXPORT_SYMBOL(atomic64_set_386);
18509 +long long atomic64_set_unchecked_386(long long, const atomic64_unchecked_t *v);
18510 +EXPORT_SYMBOL(atomic64_set_unchecked_386);
18511 long long atomic64_xchg_386(long long, unsigned high);
18512 EXPORT_SYMBOL(atomic64_xchg_386);
18513 long long atomic64_add_return_386(long long a, atomic64_t *v);
18514 EXPORT_SYMBOL(atomic64_add_return_386);
18515 +long long atomic64_add_return_unchecked_386(long long a, atomic64_unchecked_t *v);
18516 +EXPORT_SYMBOL(atomic64_add_return_unchecked_386);
18517 long long atomic64_sub_return_386(long long a, atomic64_t *v);
18518 EXPORT_SYMBOL(atomic64_sub_return_386);
18519 +long long atomic64_sub_return_unchecked_386(long long a, atomic64_unchecked_t *v);
18520 +EXPORT_SYMBOL(atomic64_sub_return_unchecked_386);
18521 long long atomic64_inc_return_386(long long a, atomic64_t *v);
18522 EXPORT_SYMBOL(atomic64_inc_return_386);
18523 +long long atomic64_inc_return_unchecked_386(long long a, atomic64_unchecked_t *v);
18524 +EXPORT_SYMBOL(atomic64_inc_return_unchecked_386);
18525 long long atomic64_dec_return_386(long long a, atomic64_t *v);
18526 EXPORT_SYMBOL(atomic64_dec_return_386);
18527 +long long atomic64_dec_return_unchecked_386(long long a, atomic64_unchecked_t *v);
18528 +EXPORT_SYMBOL(atomic64_dec_return_unchecked_386);
18529 long long atomic64_add_386(long long a, atomic64_t *v);
18530 EXPORT_SYMBOL(atomic64_add_386);
18531 +long long atomic64_add_unchecked_386(long long a, atomic64_unchecked_t *v);
18532 +EXPORT_SYMBOL(atomic64_add_unchecked_386);
18533 long long atomic64_sub_386(long long a, atomic64_t *v);
18534 EXPORT_SYMBOL(atomic64_sub_386);
18535 +long long atomic64_sub_unchecked_386(long long a, atomic64_unchecked_t *v);
18536 +EXPORT_SYMBOL(atomic64_sub_unchecked_386);
18537 long long atomic64_inc_386(long long a, atomic64_t *v);
18538 EXPORT_SYMBOL(atomic64_inc_386);
18539 +long long atomic64_inc_unchecked_386(long long a, atomic64_unchecked_t *v);
18540 +EXPORT_SYMBOL(atomic64_inc_unchecked_386);
18541 long long atomic64_dec_386(long long a, atomic64_t *v);
18542 EXPORT_SYMBOL(atomic64_dec_386);
18543 +long long atomic64_dec_unchecked_386(long long a, atomic64_unchecked_t *v);
18544 +EXPORT_SYMBOL(atomic64_dec_unchecked_386);
18545 long long atomic64_dec_if_positive_386(atomic64_t *v);
18546 EXPORT_SYMBOL(atomic64_dec_if_positive_386);
18547 int atomic64_inc_not_zero_386(atomic64_t *v);
18548 diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
18549 index e8e7e0d..56fd1b0 100644
18550 --- a/arch/x86/lib/atomic64_386_32.S
18551 +++ b/arch/x86/lib/atomic64_386_32.S
18552 @@ -48,6 +48,10 @@ BEGIN(read)
18553 movl (v), %eax
18554 movl 4(v), %edx
18555 RET_ENDP
18556 +BEGIN(read_unchecked)
18557 + movl (v), %eax
18558 + movl 4(v), %edx
18559 +RET_ENDP
18560 #undef v
18561
18562 #define v %esi
18563 @@ -55,6 +59,10 @@ BEGIN(set)
18564 movl %ebx, (v)
18565 movl %ecx, 4(v)
18566 RET_ENDP
18567 +BEGIN(set_unchecked)
18568 + movl %ebx, (v)
18569 + movl %ecx, 4(v)
18570 +RET_ENDP
18571 #undef v
18572
18573 #define v %esi
18574 @@ -70,6 +78,20 @@ RET_ENDP
18575 BEGIN(add)
18576 addl %eax, (v)
18577 adcl %edx, 4(v)
18578 +
18579 +#ifdef CONFIG_PAX_REFCOUNT
18580 + jno 0f
18581 + subl %eax, (v)
18582 + sbbl %edx, 4(v)
18583 + int $4
18584 +0:
18585 + _ASM_EXTABLE(0b, 0b)
18586 +#endif
18587 +
18588 +RET_ENDP
18589 +BEGIN(add_unchecked)
18590 + addl %eax, (v)
18591 + adcl %edx, 4(v)
18592 RET_ENDP
18593 #undef v
18594
18595 @@ -77,6 +99,24 @@ RET_ENDP
18596 BEGIN(add_return)
18597 addl (v), %eax
18598 adcl 4(v), %edx
18599 +
18600 +#ifdef CONFIG_PAX_REFCOUNT
18601 + into
18602 +1234:
18603 + _ASM_EXTABLE(1234b, 2f)
18604 +#endif
18605 +
18606 + movl %eax, (v)
18607 + movl %edx, 4(v)
18608 +
18609 +#ifdef CONFIG_PAX_REFCOUNT
18610 +2:
18611 +#endif
18612 +
18613 +RET_ENDP
18614 +BEGIN(add_return_unchecked)
18615 + addl (v), %eax
18616 + adcl 4(v), %edx
18617 movl %eax, (v)
18618 movl %edx, 4(v)
18619 RET_ENDP
18620 @@ -86,6 +126,20 @@ RET_ENDP
18621 BEGIN(sub)
18622 subl %eax, (v)
18623 sbbl %edx, 4(v)
18624 +
18625 +#ifdef CONFIG_PAX_REFCOUNT
18626 + jno 0f
18627 + addl %eax, (v)
18628 + adcl %edx, 4(v)
18629 + int $4
18630 +0:
18631 + _ASM_EXTABLE(0b, 0b)
18632 +#endif
18633 +
18634 +RET_ENDP
18635 +BEGIN(sub_unchecked)
18636 + subl %eax, (v)
18637 + sbbl %edx, 4(v)
18638 RET_ENDP
18639 #undef v
18640
18641 @@ -96,6 +150,27 @@ BEGIN(sub_return)
18642 sbbl $0, %edx
18643 addl (v), %eax
18644 adcl 4(v), %edx
18645 +
18646 +#ifdef CONFIG_PAX_REFCOUNT
18647 + into
18648 +1234:
18649 + _ASM_EXTABLE(1234b, 2f)
18650 +#endif
18651 +
18652 + movl %eax, (v)
18653 + movl %edx, 4(v)
18654 +
18655 +#ifdef CONFIG_PAX_REFCOUNT
18656 +2:
18657 +#endif
18658 +
18659 +RET_ENDP
18660 +BEGIN(sub_return_unchecked)
18661 + negl %edx
18662 + negl %eax
18663 + sbbl $0, %edx
18664 + addl (v), %eax
18665 + adcl 4(v), %edx
18666 movl %eax, (v)
18667 movl %edx, 4(v)
18668 RET_ENDP
18669 @@ -105,6 +180,20 @@ RET_ENDP
18670 BEGIN(inc)
18671 addl $1, (v)
18672 adcl $0, 4(v)
18673 +
18674 +#ifdef CONFIG_PAX_REFCOUNT
18675 + jno 0f
18676 + subl $1, (v)
18677 + sbbl $0, 4(v)
18678 + int $4
18679 +0:
18680 + _ASM_EXTABLE(0b, 0b)
18681 +#endif
18682 +
18683 +RET_ENDP
18684 +BEGIN(inc_unchecked)
18685 + addl $1, (v)
18686 + adcl $0, 4(v)
18687 RET_ENDP
18688 #undef v
18689
18690 @@ -114,6 +203,26 @@ BEGIN(inc_return)
18691 movl 4(v), %edx
18692 addl $1, %eax
18693 adcl $0, %edx
18694 +
18695 +#ifdef CONFIG_PAX_REFCOUNT
18696 + into
18697 +1234:
18698 + _ASM_EXTABLE(1234b, 2f)
18699 +#endif
18700 +
18701 + movl %eax, (v)
18702 + movl %edx, 4(v)
18703 +
18704 +#ifdef CONFIG_PAX_REFCOUNT
18705 +2:
18706 +#endif
18707 +
18708 +RET_ENDP
18709 +BEGIN(inc_return_unchecked)
18710 + movl (v), %eax
18711 + movl 4(v), %edx
18712 + addl $1, %eax
18713 + adcl $0, %edx
18714 movl %eax, (v)
18715 movl %edx, 4(v)
18716 RET_ENDP
18717 @@ -123,6 +232,20 @@ RET_ENDP
18718 BEGIN(dec)
18719 subl $1, (v)
18720 sbbl $0, 4(v)
18721 +
18722 +#ifdef CONFIG_PAX_REFCOUNT
18723 + jno 0f
18724 + addl $1, (v)
18725 + adcl $0, 4(v)
18726 + int $4
18727 +0:
18728 + _ASM_EXTABLE(0b, 0b)
18729 +#endif
18730 +
18731 +RET_ENDP
18732 +BEGIN(dec_unchecked)
18733 + subl $1, (v)
18734 + sbbl $0, 4(v)
18735 RET_ENDP
18736 #undef v
18737
18738 @@ -132,6 +255,26 @@ BEGIN(dec_return)
18739 movl 4(v), %edx
18740 subl $1, %eax
18741 sbbl $0, %edx
18742 +
18743 +#ifdef CONFIG_PAX_REFCOUNT
18744 + into
18745 +1234:
18746 + _ASM_EXTABLE(1234b, 2f)
18747 +#endif
18748 +
18749 + movl %eax, (v)
18750 + movl %edx, 4(v)
18751 +
18752 +#ifdef CONFIG_PAX_REFCOUNT
18753 +2:
18754 +#endif
18755 +
18756 +RET_ENDP
18757 +BEGIN(dec_return_unchecked)
18758 + movl (v), %eax
18759 + movl 4(v), %edx
18760 + subl $1, %eax
18761 + sbbl $0, %edx
18762 movl %eax, (v)
18763 movl %edx, 4(v)
18764 RET_ENDP
18765 @@ -143,6 +286,13 @@ BEGIN(add_unless)
18766 adcl %edx, %edi
18767 addl (v), %eax
18768 adcl 4(v), %edx
18769 +
18770 +#ifdef CONFIG_PAX_REFCOUNT
18771 + into
18772 +1234:
18773 + _ASM_EXTABLE(1234b, 2f)
18774 +#endif
18775 +
18776 cmpl %eax, %esi
18777 je 3f
18778 1:
18779 @@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
18780 1:
18781 addl $1, %eax
18782 adcl $0, %edx
18783 +
18784 +#ifdef CONFIG_PAX_REFCOUNT
18785 + into
18786 +1234:
18787 + _ASM_EXTABLE(1234b, 2f)
18788 +#endif
18789 +
18790 movl %eax, (v)
18791 movl %edx, 4(v)
18792 movl $1, %eax
18793 @@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
18794 movl 4(v), %edx
18795 subl $1, %eax
18796 sbbl $0, %edx
18797 +
18798 +#ifdef CONFIG_PAX_REFCOUNT
18799 + into
18800 +1234:
18801 + _ASM_EXTABLE(1234b, 1f)
18802 +#endif
18803 +
18804 js 1f
18805 movl %eax, (v)
18806 movl %edx, 4(v)
18807 diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
18808 index 391a083..d658e9f 100644
18809 --- a/arch/x86/lib/atomic64_cx8_32.S
18810 +++ b/arch/x86/lib/atomic64_cx8_32.S
18811 @@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
18812 CFI_STARTPROC
18813
18814 read64 %ecx
18815 + pax_force_retaddr
18816 ret
18817 CFI_ENDPROC
18818 ENDPROC(atomic64_read_cx8)
18819
18820 +ENTRY(atomic64_read_unchecked_cx8)
18821 + CFI_STARTPROC
18822 +
18823 + read64 %ecx
18824 + pax_force_retaddr
18825 + ret
18826 + CFI_ENDPROC
18827 +ENDPROC(atomic64_read_unchecked_cx8)
18828 +
18829 ENTRY(atomic64_set_cx8)
18830 CFI_STARTPROC
18831
18832 @@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
18833 cmpxchg8b (%esi)
18834 jne 1b
18835
18836 + pax_force_retaddr
18837 ret
18838 CFI_ENDPROC
18839 ENDPROC(atomic64_set_cx8)
18840
18841 +ENTRY(atomic64_set_unchecked_cx8)
18842 + CFI_STARTPROC
18843 +
18844 +1:
18845 +/* we don't need LOCK_PREFIX since aligned 64-bit writes
18846 + * are atomic on 586 and newer */
18847 + cmpxchg8b (%esi)
18848 + jne 1b
18849 +
18850 + pax_force_retaddr
18851 + ret
18852 + CFI_ENDPROC
18853 +ENDPROC(atomic64_set_unchecked_cx8)
18854 +
18855 ENTRY(atomic64_xchg_cx8)
18856 CFI_STARTPROC
18857
18858 @@ -62,12 +87,13 @@ ENTRY(atomic64_xchg_cx8)
18859 cmpxchg8b (%esi)
18860 jne 1b
18861
18862 + pax_force_retaddr
18863 ret
18864 CFI_ENDPROC
18865 ENDPROC(atomic64_xchg_cx8)
18866
18867 -.macro addsub_return func ins insc
18868 -ENTRY(atomic64_\func\()_return_cx8)
18869 +.macro addsub_return func ins insc unchecked=""
18870 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
18871 CFI_STARTPROC
18872 SAVE ebp
18873 SAVE ebx
18874 @@ -84,27 +110,44 @@ ENTRY(atomic64_\func\()_return_cx8)
18875 movl %edx, %ecx
18876 \ins\()l %esi, %ebx
18877 \insc\()l %edi, %ecx
18878 +
18879 +.ifb \unchecked
18880 +#ifdef CONFIG_PAX_REFCOUNT
18881 + into
18882 +2:
18883 + _ASM_EXTABLE(2b, 3f)
18884 +#endif
18885 +.endif
18886 +
18887 LOCK_PREFIX
18888 cmpxchg8b (%ebp)
18889 jne 1b
18890 -
18891 -10:
18892 movl %ebx, %eax
18893 movl %ecx, %edx
18894 +
18895 +.ifb \unchecked
18896 +#ifdef CONFIG_PAX_REFCOUNT
18897 +3:
18898 +#endif
18899 +.endif
18900 +
18901 RESTORE edi
18902 RESTORE esi
18903 RESTORE ebx
18904 RESTORE ebp
18905 + pax_force_retaddr
18906 ret
18907 CFI_ENDPROC
18908 -ENDPROC(atomic64_\func\()_return_cx8)
18909 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
18910 .endm
18911
18912 addsub_return add add adc
18913 addsub_return sub sub sbb
18914 +addsub_return add add adc _unchecked
18915 +addsub_return sub sub sbb _unchecked
18916
18917 -.macro incdec_return func ins insc
18918 -ENTRY(atomic64_\func\()_return_cx8)
18919 +.macro incdec_return func ins insc unchecked
18920 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
18921 CFI_STARTPROC
18922 SAVE ebx
18923
18924 @@ -114,21 +157,39 @@ ENTRY(atomic64_\func\()_return_cx8)
18925 movl %edx, %ecx
18926 \ins\()l $1, %ebx
18927 \insc\()l $0, %ecx
18928 +
18929 +.ifb \unchecked
18930 +#ifdef CONFIG_PAX_REFCOUNT
18931 + into
18932 +2:
18933 + _ASM_EXTABLE(2b, 3f)
18934 +#endif
18935 +.endif
18936 +
18937 LOCK_PREFIX
18938 cmpxchg8b (%esi)
18939 jne 1b
18940
18941 -10:
18942 movl %ebx, %eax
18943 movl %ecx, %edx
18944 +
18945 +.ifb \unchecked
18946 +#ifdef CONFIG_PAX_REFCOUNT
18947 +3:
18948 +#endif
18949 +.endif
18950 +
18951 RESTORE ebx
18952 + pax_force_retaddr
18953 ret
18954 CFI_ENDPROC
18955 -ENDPROC(atomic64_\func\()_return_cx8)
18956 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
18957 .endm
18958
18959 incdec_return inc add adc
18960 incdec_return dec sub sbb
18961 +incdec_return inc add adc _unchecked
18962 +incdec_return dec sub sbb _unchecked
18963
18964 ENTRY(atomic64_dec_if_positive_cx8)
18965 CFI_STARTPROC
18966 @@ -140,6 +201,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
18967 movl %edx, %ecx
18968 subl $1, %ebx
18969 sbb $0, %ecx
18970 +
18971 +#ifdef CONFIG_PAX_REFCOUNT
18972 + into
18973 +1234:
18974 + _ASM_EXTABLE(1234b, 2f)
18975 +#endif
18976 +
18977 js 2f
18978 LOCK_PREFIX
18979 cmpxchg8b (%esi)
18980 @@ -149,6 +217,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
18981 movl %ebx, %eax
18982 movl %ecx, %edx
18983 RESTORE ebx
18984 + pax_force_retaddr
18985 ret
18986 CFI_ENDPROC
18987 ENDPROC(atomic64_dec_if_positive_cx8)
18988 @@ -174,6 +243,13 @@ ENTRY(atomic64_add_unless_cx8)
18989 movl %edx, %ecx
18990 addl %esi, %ebx
18991 adcl %edi, %ecx
18992 +
18993 +#ifdef CONFIG_PAX_REFCOUNT
18994 + into
18995 +1234:
18996 + _ASM_EXTABLE(1234b, 3f)
18997 +#endif
18998 +
18999 LOCK_PREFIX
19000 cmpxchg8b (%ebp)
19001 jne 1b
19002 @@ -184,6 +260,7 @@ ENTRY(atomic64_add_unless_cx8)
19003 CFI_ADJUST_CFA_OFFSET -8
19004 RESTORE ebx
19005 RESTORE ebp
19006 + pax_force_retaddr
19007 ret
19008 4:
19009 cmpl %edx, 4(%esp)
19010 @@ -206,6 +283,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
19011 movl %edx, %ecx
19012 addl $1, %ebx
19013 adcl $0, %ecx
19014 +
19015 +#ifdef CONFIG_PAX_REFCOUNT
19016 + into
19017 +1234:
19018 + _ASM_EXTABLE(1234b, 3f)
19019 +#endif
19020 +
19021 LOCK_PREFIX
19022 cmpxchg8b (%esi)
19023 jne 1b
19024 @@ -213,6 +297,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
19025 movl $1, %eax
19026 3:
19027 RESTORE ebx
19028 + pax_force_retaddr
19029 ret
19030 4:
19031 testl %edx, %edx
19032 diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
19033 index 78d16a5..fbcf666 100644
19034 --- a/arch/x86/lib/checksum_32.S
19035 +++ b/arch/x86/lib/checksum_32.S
19036 @@ -28,7 +28,8 @@
19037 #include <linux/linkage.h>
19038 #include <asm/dwarf2.h>
19039 #include <asm/errno.h>
19040 -
19041 +#include <asm/segment.h>
19042 +
19043 /*
19044 * computes a partial checksum, e.g. for TCP/UDP fragments
19045 */
19046 @@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
19047
19048 #define ARGBASE 16
19049 #define FP 12
19050 -
19051 -ENTRY(csum_partial_copy_generic)
19052 +
19053 +ENTRY(csum_partial_copy_generic_to_user)
19054 CFI_STARTPROC
19055 +
19056 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19057 + pushl_cfi %gs
19058 + popl_cfi %es
19059 + jmp csum_partial_copy_generic
19060 +#endif
19061 +
19062 +ENTRY(csum_partial_copy_generic_from_user)
19063 +
19064 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19065 + pushl_cfi %gs
19066 + popl_cfi %ds
19067 +#endif
19068 +
19069 +ENTRY(csum_partial_copy_generic)
19070 subl $4,%esp
19071 CFI_ADJUST_CFA_OFFSET 4
19072 pushl_cfi %edi
19073 @@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
19074 jmp 4f
19075 SRC(1: movw (%esi), %bx )
19076 addl $2, %esi
19077 -DST( movw %bx, (%edi) )
19078 +DST( movw %bx, %es:(%edi) )
19079 addl $2, %edi
19080 addw %bx, %ax
19081 adcl $0, %eax
19082 @@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
19083 SRC(1: movl (%esi), %ebx )
19084 SRC( movl 4(%esi), %edx )
19085 adcl %ebx, %eax
19086 -DST( movl %ebx, (%edi) )
19087 +DST( movl %ebx, %es:(%edi) )
19088 adcl %edx, %eax
19089 -DST( movl %edx, 4(%edi) )
19090 +DST( movl %edx, %es:4(%edi) )
19091
19092 SRC( movl 8(%esi), %ebx )
19093 SRC( movl 12(%esi), %edx )
19094 adcl %ebx, %eax
19095 -DST( movl %ebx, 8(%edi) )
19096 +DST( movl %ebx, %es:8(%edi) )
19097 adcl %edx, %eax
19098 -DST( movl %edx, 12(%edi) )
19099 +DST( movl %edx, %es:12(%edi) )
19100
19101 SRC( movl 16(%esi), %ebx )
19102 SRC( movl 20(%esi), %edx )
19103 adcl %ebx, %eax
19104 -DST( movl %ebx, 16(%edi) )
19105 +DST( movl %ebx, %es:16(%edi) )
19106 adcl %edx, %eax
19107 -DST( movl %edx, 20(%edi) )
19108 +DST( movl %edx, %es:20(%edi) )
19109
19110 SRC( movl 24(%esi), %ebx )
19111 SRC( movl 28(%esi), %edx )
19112 adcl %ebx, %eax
19113 -DST( movl %ebx, 24(%edi) )
19114 +DST( movl %ebx, %es:24(%edi) )
19115 adcl %edx, %eax
19116 -DST( movl %edx, 28(%edi) )
19117 +DST( movl %edx, %es:28(%edi) )
19118
19119 lea 32(%esi), %esi
19120 lea 32(%edi), %edi
19121 @@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
19122 shrl $2, %edx # This clears CF
19123 SRC(3: movl (%esi), %ebx )
19124 adcl %ebx, %eax
19125 -DST( movl %ebx, (%edi) )
19126 +DST( movl %ebx, %es:(%edi) )
19127 lea 4(%esi), %esi
19128 lea 4(%edi), %edi
19129 dec %edx
19130 @@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
19131 jb 5f
19132 SRC( movw (%esi), %cx )
19133 leal 2(%esi), %esi
19134 -DST( movw %cx, (%edi) )
19135 +DST( movw %cx, %es:(%edi) )
19136 leal 2(%edi), %edi
19137 je 6f
19138 shll $16,%ecx
19139 SRC(5: movb (%esi), %cl )
19140 -DST( movb %cl, (%edi) )
19141 +DST( movb %cl, %es:(%edi) )
19142 6: addl %ecx, %eax
19143 adcl $0, %eax
19144 7:
19145 @@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
19146
19147 6001:
19148 movl ARGBASE+20(%esp), %ebx # src_err_ptr
19149 - movl $-EFAULT, (%ebx)
19150 + movl $-EFAULT, %ss:(%ebx)
19151
19152 # zero the complete destination - computing the rest
19153 # is too much work
19154 @@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
19155
19156 6002:
19157 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
19158 - movl $-EFAULT,(%ebx)
19159 + movl $-EFAULT,%ss:(%ebx)
19160 jmp 5000b
19161
19162 .previous
19163
19164 + pushl_cfi %ss
19165 + popl_cfi %ds
19166 + pushl_cfi %ss
19167 + popl_cfi %es
19168 popl_cfi %ebx
19169 CFI_RESTORE ebx
19170 popl_cfi %esi
19171 @@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
19172 popl_cfi %ecx # equivalent to addl $4,%esp
19173 ret
19174 CFI_ENDPROC
19175 -ENDPROC(csum_partial_copy_generic)
19176 +ENDPROC(csum_partial_copy_generic_to_user)
19177
19178 #else
19179
19180 /* Version for PentiumII/PPro */
19181
19182 #define ROUND1(x) \
19183 + nop; nop; nop; \
19184 SRC(movl x(%esi), %ebx ) ; \
19185 addl %ebx, %eax ; \
19186 - DST(movl %ebx, x(%edi) ) ;
19187 + DST(movl %ebx, %es:x(%edi)) ;
19188
19189 #define ROUND(x) \
19190 + nop; nop; nop; \
19191 SRC(movl x(%esi), %ebx ) ; \
19192 adcl %ebx, %eax ; \
19193 - DST(movl %ebx, x(%edi) ) ;
19194 + DST(movl %ebx, %es:x(%edi)) ;
19195
19196 #define ARGBASE 12
19197 -
19198 -ENTRY(csum_partial_copy_generic)
19199 +
19200 +ENTRY(csum_partial_copy_generic_to_user)
19201 CFI_STARTPROC
19202 +
19203 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19204 + pushl_cfi %gs
19205 + popl_cfi %es
19206 + jmp csum_partial_copy_generic
19207 +#endif
19208 +
19209 +ENTRY(csum_partial_copy_generic_from_user)
19210 +
19211 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19212 + pushl_cfi %gs
19213 + popl_cfi %ds
19214 +#endif
19215 +
19216 +ENTRY(csum_partial_copy_generic)
19217 pushl_cfi %ebx
19218 CFI_REL_OFFSET ebx, 0
19219 pushl_cfi %edi
19220 @@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
19221 subl %ebx, %edi
19222 lea -1(%esi),%edx
19223 andl $-32,%edx
19224 - lea 3f(%ebx,%ebx), %ebx
19225 + lea 3f(%ebx,%ebx,2), %ebx
19226 testl %esi, %esi
19227 jmp *%ebx
19228 1: addl $64,%esi
19229 @@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
19230 jb 5f
19231 SRC( movw (%esi), %dx )
19232 leal 2(%esi), %esi
19233 -DST( movw %dx, (%edi) )
19234 +DST( movw %dx, %es:(%edi) )
19235 leal 2(%edi), %edi
19236 je 6f
19237 shll $16,%edx
19238 5:
19239 SRC( movb (%esi), %dl )
19240 -DST( movb %dl, (%edi) )
19241 +DST( movb %dl, %es:(%edi) )
19242 6: addl %edx, %eax
19243 adcl $0, %eax
19244 7:
19245 .section .fixup, "ax"
19246 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
19247 - movl $-EFAULT, (%ebx)
19248 + movl $-EFAULT, %ss:(%ebx)
19249 # zero the complete destination (computing the rest is too much work)
19250 movl ARGBASE+8(%esp),%edi # dst
19251 movl ARGBASE+12(%esp),%ecx # len
19252 @@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
19253 rep; stosb
19254 jmp 7b
19255 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
19256 - movl $-EFAULT, (%ebx)
19257 + movl $-EFAULT, %ss:(%ebx)
19258 jmp 7b
19259 .previous
19260
19261 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19262 + pushl_cfi %ss
19263 + popl_cfi %ds
19264 + pushl_cfi %ss
19265 + popl_cfi %es
19266 +#endif
19267 +
19268 popl_cfi %esi
19269 CFI_RESTORE esi
19270 popl_cfi %edi
19271 @@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
19272 CFI_RESTORE ebx
19273 ret
19274 CFI_ENDPROC
19275 -ENDPROC(csum_partial_copy_generic)
19276 +ENDPROC(csum_partial_copy_generic_to_user)
19277
19278 #undef ROUND
19279 #undef ROUND1
19280 diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
19281 index f2145cf..cea889d 100644
19282 --- a/arch/x86/lib/clear_page_64.S
19283 +++ b/arch/x86/lib/clear_page_64.S
19284 @@ -11,6 +11,7 @@ ENTRY(clear_page_c)
19285 movl $4096/8,%ecx
19286 xorl %eax,%eax
19287 rep stosq
19288 + pax_force_retaddr
19289 ret
19290 CFI_ENDPROC
19291 ENDPROC(clear_page_c)
19292 @@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
19293 movl $4096,%ecx
19294 xorl %eax,%eax
19295 rep stosb
19296 + pax_force_retaddr
19297 ret
19298 CFI_ENDPROC
19299 ENDPROC(clear_page_c_e)
19300 @@ -43,6 +45,7 @@ ENTRY(clear_page)
19301 leaq 64(%rdi),%rdi
19302 jnz .Lloop
19303 nop
19304 + pax_force_retaddr
19305 ret
19306 CFI_ENDPROC
19307 .Lclear_page_end:
19308 @@ -58,7 +61,7 @@ ENDPROC(clear_page)
19309
19310 #include <asm/cpufeature.h>
19311
19312 - .section .altinstr_replacement,"ax"
19313 + .section .altinstr_replacement,"a"
19314 1: .byte 0xeb /* jmp <disp8> */
19315 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
19316 2: .byte 0xeb /* jmp <disp8> */
19317 diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
19318 index 1e572c5..2a162cd 100644
19319 --- a/arch/x86/lib/cmpxchg16b_emu.S
19320 +++ b/arch/x86/lib/cmpxchg16b_emu.S
19321 @@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
19322
19323 popf
19324 mov $1, %al
19325 + pax_force_retaddr
19326 ret
19327
19328 not_same:
19329 popf
19330 xor %al,%al
19331 + pax_force_retaddr
19332 ret
19333
19334 CFI_ENDPROC
19335 diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
19336 index 01c805b..dccb07f 100644
19337 --- a/arch/x86/lib/copy_page_64.S
19338 +++ b/arch/x86/lib/copy_page_64.S
19339 @@ -9,6 +9,7 @@ copy_page_c:
19340 CFI_STARTPROC
19341 movl $4096/8,%ecx
19342 rep movsq
19343 + pax_force_retaddr
19344 ret
19345 CFI_ENDPROC
19346 ENDPROC(copy_page_c)
19347 @@ -39,7 +40,7 @@ ENTRY(copy_page)
19348 movq 16 (%rsi), %rdx
19349 movq 24 (%rsi), %r8
19350 movq 32 (%rsi), %r9
19351 - movq 40 (%rsi), %r10
19352 + movq 40 (%rsi), %r13
19353 movq 48 (%rsi), %r11
19354 movq 56 (%rsi), %r12
19355
19356 @@ -50,7 +51,7 @@ ENTRY(copy_page)
19357 movq %rdx, 16 (%rdi)
19358 movq %r8, 24 (%rdi)
19359 movq %r9, 32 (%rdi)
19360 - movq %r10, 40 (%rdi)
19361 + movq %r13, 40 (%rdi)
19362 movq %r11, 48 (%rdi)
19363 movq %r12, 56 (%rdi)
19364
19365 @@ -69,7 +70,7 @@ ENTRY(copy_page)
19366 movq 16 (%rsi), %rdx
19367 movq 24 (%rsi), %r8
19368 movq 32 (%rsi), %r9
19369 - movq 40 (%rsi), %r10
19370 + movq 40 (%rsi), %r13
19371 movq 48 (%rsi), %r11
19372 movq 56 (%rsi), %r12
19373
19374 @@ -78,7 +79,7 @@ ENTRY(copy_page)
19375 movq %rdx, 16 (%rdi)
19376 movq %r8, 24 (%rdi)
19377 movq %r9, 32 (%rdi)
19378 - movq %r10, 40 (%rdi)
19379 + movq %r13, 40 (%rdi)
19380 movq %r11, 48 (%rdi)
19381 movq %r12, 56 (%rdi)
19382
19383 @@ -95,6 +96,7 @@ ENTRY(copy_page)
19384 CFI_RESTORE r13
19385 addq $3*8,%rsp
19386 CFI_ADJUST_CFA_OFFSET -3*8
19387 + pax_force_retaddr
19388 ret
19389 .Lcopy_page_end:
19390 CFI_ENDPROC
19391 @@ -105,7 +107,7 @@ ENDPROC(copy_page)
19392
19393 #include <asm/cpufeature.h>
19394
19395 - .section .altinstr_replacement,"ax"
19396 + .section .altinstr_replacement,"a"
19397 1: .byte 0xeb /* jmp <disp8> */
19398 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
19399 2:
19400 diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
19401 index 0248402..821c786 100644
19402 --- a/arch/x86/lib/copy_user_64.S
19403 +++ b/arch/x86/lib/copy_user_64.S
19404 @@ -16,6 +16,7 @@
19405 #include <asm/thread_info.h>
19406 #include <asm/cpufeature.h>
19407 #include <asm/alternative-asm.h>
19408 +#include <asm/pgtable.h>
19409
19410 /*
19411 * By placing feature2 after feature1 in altinstructions section, we logically
19412 @@ -29,7 +30,7 @@
19413 .byte 0xe9 /* 32bit jump */
19414 .long \orig-1f /* by default jump to orig */
19415 1:
19416 - .section .altinstr_replacement,"ax"
19417 + .section .altinstr_replacement,"a"
19418 2: .byte 0xe9 /* near jump with 32bit immediate */
19419 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
19420 3: .byte 0xe9 /* near jump with 32bit immediate */
19421 @@ -71,47 +72,20 @@
19422 #endif
19423 .endm
19424
19425 -/* Standard copy_to_user with segment limit checking */
19426 -ENTRY(_copy_to_user)
19427 - CFI_STARTPROC
19428 - GET_THREAD_INFO(%rax)
19429 - movq %rdi,%rcx
19430 - addq %rdx,%rcx
19431 - jc bad_to_user
19432 - cmpq TI_addr_limit(%rax),%rcx
19433 - ja bad_to_user
19434 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
19435 - copy_user_generic_unrolled,copy_user_generic_string, \
19436 - copy_user_enhanced_fast_string
19437 - CFI_ENDPROC
19438 -ENDPROC(_copy_to_user)
19439 -
19440 -/* Standard copy_from_user with segment limit checking */
19441 -ENTRY(_copy_from_user)
19442 - CFI_STARTPROC
19443 - GET_THREAD_INFO(%rax)
19444 - movq %rsi,%rcx
19445 - addq %rdx,%rcx
19446 - jc bad_from_user
19447 - cmpq TI_addr_limit(%rax),%rcx
19448 - ja bad_from_user
19449 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
19450 - copy_user_generic_unrolled,copy_user_generic_string, \
19451 - copy_user_enhanced_fast_string
19452 - CFI_ENDPROC
19453 -ENDPROC(_copy_from_user)
19454 -
19455 .section .fixup,"ax"
19456 /* must zero dest */
19457 ENTRY(bad_from_user)
19458 bad_from_user:
19459 CFI_STARTPROC
19460 + testl %edx,%edx
19461 + js bad_to_user
19462 movl %edx,%ecx
19463 xorl %eax,%eax
19464 rep
19465 stosb
19466 bad_to_user:
19467 movl %edx,%eax
19468 + pax_force_retaddr
19469 ret
19470 CFI_ENDPROC
19471 ENDPROC(bad_from_user)
19472 @@ -141,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
19473 jz 17f
19474 1: movq (%rsi),%r8
19475 2: movq 1*8(%rsi),%r9
19476 -3: movq 2*8(%rsi),%r10
19477 +3: movq 2*8(%rsi),%rax
19478 4: movq 3*8(%rsi),%r11
19479 5: movq %r8,(%rdi)
19480 6: movq %r9,1*8(%rdi)
19481 -7: movq %r10,2*8(%rdi)
19482 +7: movq %rax,2*8(%rdi)
19483 8: movq %r11,3*8(%rdi)
19484 9: movq 4*8(%rsi),%r8
19485 10: movq 5*8(%rsi),%r9
19486 -11: movq 6*8(%rsi),%r10
19487 +11: movq 6*8(%rsi),%rax
19488 12: movq 7*8(%rsi),%r11
19489 13: movq %r8,4*8(%rdi)
19490 14: movq %r9,5*8(%rdi)
19491 -15: movq %r10,6*8(%rdi)
19492 +15: movq %rax,6*8(%rdi)
19493 16: movq %r11,7*8(%rdi)
19494 leaq 64(%rsi),%rsi
19495 leaq 64(%rdi),%rdi
19496 @@ -179,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
19497 decl %ecx
19498 jnz 21b
19499 23: xor %eax,%eax
19500 + pax_force_retaddr
19501 ret
19502
19503 .section .fixup,"ax"
19504 @@ -251,6 +226,7 @@ ENTRY(copy_user_generic_string)
19505 3: rep
19506 movsb
19507 4: xorl %eax,%eax
19508 + pax_force_retaddr
19509 ret
19510
19511 .section .fixup,"ax"
19512 @@ -287,6 +263,7 @@ ENTRY(copy_user_enhanced_fast_string)
19513 1: rep
19514 movsb
19515 2: xorl %eax,%eax
19516 + pax_force_retaddr
19517 ret
19518
19519 .section .fixup,"ax"
19520 diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
19521 index cb0c112..e3a6895 100644
19522 --- a/arch/x86/lib/copy_user_nocache_64.S
19523 +++ b/arch/x86/lib/copy_user_nocache_64.S
19524 @@ -8,12 +8,14 @@
19525
19526 #include <linux/linkage.h>
19527 #include <asm/dwarf2.h>
19528 +#include <asm/alternative-asm.h>
19529
19530 #define FIX_ALIGNMENT 1
19531
19532 #include <asm/current.h>
19533 #include <asm/asm-offsets.h>
19534 #include <asm/thread_info.h>
19535 +#include <asm/pgtable.h>
19536
19537 .macro ALIGN_DESTINATION
19538 #ifdef FIX_ALIGNMENT
19539 @@ -50,6 +52,15 @@
19540 */
19541 ENTRY(__copy_user_nocache)
19542 CFI_STARTPROC
19543 +
19544 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19545 + mov $PAX_USER_SHADOW_BASE,%rcx
19546 + cmp %rcx,%rsi
19547 + jae 1f
19548 + add %rcx,%rsi
19549 +1:
19550 +#endif
19551 +
19552 cmpl $8,%edx
19553 jb 20f /* less then 8 bytes, go to byte copy loop */
19554 ALIGN_DESTINATION
19555 @@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
19556 jz 17f
19557 1: movq (%rsi),%r8
19558 2: movq 1*8(%rsi),%r9
19559 -3: movq 2*8(%rsi),%r10
19560 +3: movq 2*8(%rsi),%rax
19561 4: movq 3*8(%rsi),%r11
19562 5: movnti %r8,(%rdi)
19563 6: movnti %r9,1*8(%rdi)
19564 -7: movnti %r10,2*8(%rdi)
19565 +7: movnti %rax,2*8(%rdi)
19566 8: movnti %r11,3*8(%rdi)
19567 9: movq 4*8(%rsi),%r8
19568 10: movq 5*8(%rsi),%r9
19569 -11: movq 6*8(%rsi),%r10
19570 +11: movq 6*8(%rsi),%rax
19571 12: movq 7*8(%rsi),%r11
19572 13: movnti %r8,4*8(%rdi)
19573 14: movnti %r9,5*8(%rdi)
19574 -15: movnti %r10,6*8(%rdi)
19575 +15: movnti %rax,6*8(%rdi)
19576 16: movnti %r11,7*8(%rdi)
19577 leaq 64(%rsi),%rsi
19578 leaq 64(%rdi),%rdi
19579 @@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
19580 jnz 21b
19581 23: xorl %eax,%eax
19582 sfence
19583 + pax_force_retaddr
19584 ret
19585
19586 .section .fixup,"ax"
19587 diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
19588 index fb903b7..c92b7f7 100644
19589 --- a/arch/x86/lib/csum-copy_64.S
19590 +++ b/arch/x86/lib/csum-copy_64.S
19591 @@ -8,6 +8,7 @@
19592 #include <linux/linkage.h>
19593 #include <asm/dwarf2.h>
19594 #include <asm/errno.h>
19595 +#include <asm/alternative-asm.h>
19596
19597 /*
19598 * Checksum copy with exception handling.
19599 @@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
19600 CFI_RESTORE rbp
19601 addq $7*8, %rsp
19602 CFI_ADJUST_CFA_OFFSET -7*8
19603 + pax_force_retaddr 0, 1
19604 ret
19605 CFI_RESTORE_STATE
19606
19607 diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
19608 index 459b58a..9570bc7 100644
19609 --- a/arch/x86/lib/csum-wrappers_64.c
19610 +++ b/arch/x86/lib/csum-wrappers_64.c
19611 @@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
19612 len -= 2;
19613 }
19614 }
19615 - isum = csum_partial_copy_generic((__force const void *)src,
19616 +
19617 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19618 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
19619 + src += PAX_USER_SHADOW_BASE;
19620 +#endif
19621 +
19622 + isum = csum_partial_copy_generic((const void __force_kernel *)src,
19623 dst, len, isum, errp, NULL);
19624 if (unlikely(*errp))
19625 goto out_err;
19626 @@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
19627 }
19628
19629 *errp = 0;
19630 - return csum_partial_copy_generic(src, (void __force *)dst,
19631 +
19632 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19633 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
19634 + dst += PAX_USER_SHADOW_BASE;
19635 +#endif
19636 +
19637 + return csum_partial_copy_generic(src, (void __force_kernel *)dst,
19638 len, isum, NULL, errp);
19639 }
19640 EXPORT_SYMBOL(csum_partial_copy_to_user);
19641 diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
19642 index 51f1504..ddac4c1 100644
19643 --- a/arch/x86/lib/getuser.S
19644 +++ b/arch/x86/lib/getuser.S
19645 @@ -33,15 +33,38 @@
19646 #include <asm/asm-offsets.h>
19647 #include <asm/thread_info.h>
19648 #include <asm/asm.h>
19649 +#include <asm/segment.h>
19650 +#include <asm/pgtable.h>
19651 +#include <asm/alternative-asm.h>
19652 +
19653 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19654 +#define __copyuser_seg gs;
19655 +#else
19656 +#define __copyuser_seg
19657 +#endif
19658
19659 .text
19660 ENTRY(__get_user_1)
19661 CFI_STARTPROC
19662 +
19663 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19664 GET_THREAD_INFO(%_ASM_DX)
19665 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19666 jae bad_get_user
19667 -1: movzb (%_ASM_AX),%edx
19668 +
19669 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19670 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19671 + cmp %_ASM_DX,%_ASM_AX
19672 + jae 1234f
19673 + add %_ASM_DX,%_ASM_AX
19674 +1234:
19675 +#endif
19676 +
19677 +#endif
19678 +
19679 +1: __copyuser_seg movzb (%_ASM_AX),%edx
19680 xor %eax,%eax
19681 + pax_force_retaddr
19682 ret
19683 CFI_ENDPROC
19684 ENDPROC(__get_user_1)
19685 @@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
19686 ENTRY(__get_user_2)
19687 CFI_STARTPROC
19688 add $1,%_ASM_AX
19689 +
19690 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19691 jc bad_get_user
19692 GET_THREAD_INFO(%_ASM_DX)
19693 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19694 jae bad_get_user
19695 -2: movzwl -1(%_ASM_AX),%edx
19696 +
19697 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19698 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19699 + cmp %_ASM_DX,%_ASM_AX
19700 + jae 1234f
19701 + add %_ASM_DX,%_ASM_AX
19702 +1234:
19703 +#endif
19704 +
19705 +#endif
19706 +
19707 +2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
19708 xor %eax,%eax
19709 + pax_force_retaddr
19710 ret
19711 CFI_ENDPROC
19712 ENDPROC(__get_user_2)
19713 @@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
19714 ENTRY(__get_user_4)
19715 CFI_STARTPROC
19716 add $3,%_ASM_AX
19717 +
19718 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19719 jc bad_get_user
19720 GET_THREAD_INFO(%_ASM_DX)
19721 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19722 jae bad_get_user
19723 -3: mov -3(%_ASM_AX),%edx
19724 +
19725 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19726 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19727 + cmp %_ASM_DX,%_ASM_AX
19728 + jae 1234f
19729 + add %_ASM_DX,%_ASM_AX
19730 +1234:
19731 +#endif
19732 +
19733 +#endif
19734 +
19735 +3: __copyuser_seg mov -3(%_ASM_AX),%edx
19736 xor %eax,%eax
19737 + pax_force_retaddr
19738 ret
19739 CFI_ENDPROC
19740 ENDPROC(__get_user_4)
19741 @@ -80,8 +131,18 @@ ENTRY(__get_user_8)
19742 GET_THREAD_INFO(%_ASM_DX)
19743 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19744 jae bad_get_user
19745 +
19746 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19747 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19748 + cmp %_ASM_DX,%_ASM_AX
19749 + jae 1234f
19750 + add %_ASM_DX,%_ASM_AX
19751 +1234:
19752 +#endif
19753 +
19754 4: movq -7(%_ASM_AX),%_ASM_DX
19755 xor %eax,%eax
19756 + pax_force_retaddr
19757 ret
19758 CFI_ENDPROC
19759 ENDPROC(__get_user_8)
19760 @@ -91,6 +152,7 @@ bad_get_user:
19761 CFI_STARTPROC
19762 xor %edx,%edx
19763 mov $(-EFAULT),%_ASM_AX
19764 + pax_force_retaddr
19765 ret
19766 CFI_ENDPROC
19767 END(bad_get_user)
19768 diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
19769 index 374562e..a75830b 100644
19770 --- a/arch/x86/lib/insn.c
19771 +++ b/arch/x86/lib/insn.c
19772 @@ -21,6 +21,11 @@
19773 #include <linux/string.h>
19774 #include <asm/inat.h>
19775 #include <asm/insn.h>
19776 +#ifdef __KERNEL__
19777 +#include <asm/pgtable_types.h>
19778 +#else
19779 +#define ktla_ktva(addr) addr
19780 +#endif
19781
19782 /* Verify next sizeof(t) bytes can be on the same instruction */
19783 #define validate_next(t, insn, n) \
19784 @@ -49,8 +54,8 @@
19785 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
19786 {
19787 memset(insn, 0, sizeof(*insn));
19788 - insn->kaddr = kaddr;
19789 - insn->next_byte = kaddr;
19790 + insn->kaddr = ktla_ktva(kaddr);
19791 + insn->next_byte = ktla_ktva(kaddr);
19792 insn->x86_64 = x86_64 ? 1 : 0;
19793 insn->opnd_bytes = 4;
19794 if (x86_64)
19795 diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
19796 index 05a95e7..326f2fa 100644
19797 --- a/arch/x86/lib/iomap_copy_64.S
19798 +++ b/arch/x86/lib/iomap_copy_64.S
19799 @@ -17,6 +17,7 @@
19800
19801 #include <linux/linkage.h>
19802 #include <asm/dwarf2.h>
19803 +#include <asm/alternative-asm.h>
19804
19805 /*
19806 * override generic version in lib/iomap_copy.c
19807 @@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
19808 CFI_STARTPROC
19809 movl %edx,%ecx
19810 rep movsd
19811 + pax_force_retaddr
19812 ret
19813 CFI_ENDPROC
19814 ENDPROC(__iowrite32_copy)
19815 diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
19816 index efbf2a0..8893637 100644
19817 --- a/arch/x86/lib/memcpy_64.S
19818 +++ b/arch/x86/lib/memcpy_64.S
19819 @@ -34,6 +34,7 @@
19820 rep movsq
19821 movl %edx, %ecx
19822 rep movsb
19823 + pax_force_retaddr
19824 ret
19825 .Lmemcpy_e:
19826 .previous
19827 @@ -51,6 +52,7 @@
19828
19829 movl %edx, %ecx
19830 rep movsb
19831 + pax_force_retaddr
19832 ret
19833 .Lmemcpy_e_e:
19834 .previous
19835 @@ -81,13 +83,13 @@ ENTRY(memcpy)
19836 */
19837 movq 0*8(%rsi), %r8
19838 movq 1*8(%rsi), %r9
19839 - movq 2*8(%rsi), %r10
19840 + movq 2*8(%rsi), %rcx
19841 movq 3*8(%rsi), %r11
19842 leaq 4*8(%rsi), %rsi
19843
19844 movq %r8, 0*8(%rdi)
19845 movq %r9, 1*8(%rdi)
19846 - movq %r10, 2*8(%rdi)
19847 + movq %rcx, 2*8(%rdi)
19848 movq %r11, 3*8(%rdi)
19849 leaq 4*8(%rdi), %rdi
19850 jae .Lcopy_forward_loop
19851 @@ -110,12 +112,12 @@ ENTRY(memcpy)
19852 subq $0x20, %rdx
19853 movq -1*8(%rsi), %r8
19854 movq -2*8(%rsi), %r9
19855 - movq -3*8(%rsi), %r10
19856 + movq -3*8(%rsi), %rcx
19857 movq -4*8(%rsi), %r11
19858 leaq -4*8(%rsi), %rsi
19859 movq %r8, -1*8(%rdi)
19860 movq %r9, -2*8(%rdi)
19861 - movq %r10, -3*8(%rdi)
19862 + movq %rcx, -3*8(%rdi)
19863 movq %r11, -4*8(%rdi)
19864 leaq -4*8(%rdi), %rdi
19865 jae .Lcopy_backward_loop
19866 @@ -135,12 +137,13 @@ ENTRY(memcpy)
19867 */
19868 movq 0*8(%rsi), %r8
19869 movq 1*8(%rsi), %r9
19870 - movq -2*8(%rsi, %rdx), %r10
19871 + movq -2*8(%rsi, %rdx), %rcx
19872 movq -1*8(%rsi, %rdx), %r11
19873 movq %r8, 0*8(%rdi)
19874 movq %r9, 1*8(%rdi)
19875 - movq %r10, -2*8(%rdi, %rdx)
19876 + movq %rcx, -2*8(%rdi, %rdx)
19877 movq %r11, -1*8(%rdi, %rdx)
19878 + pax_force_retaddr
19879 retq
19880 .p2align 4
19881 .Lless_16bytes:
19882 @@ -153,6 +156,7 @@ ENTRY(memcpy)
19883 movq -1*8(%rsi, %rdx), %r9
19884 movq %r8, 0*8(%rdi)
19885 movq %r9, -1*8(%rdi, %rdx)
19886 + pax_force_retaddr
19887 retq
19888 .p2align 4
19889 .Lless_8bytes:
19890 @@ -166,6 +170,7 @@ ENTRY(memcpy)
19891 movl -4(%rsi, %rdx), %r8d
19892 movl %ecx, (%rdi)
19893 movl %r8d, -4(%rdi, %rdx)
19894 + pax_force_retaddr
19895 retq
19896 .p2align 4
19897 .Lless_3bytes:
19898 @@ -183,6 +188,7 @@ ENTRY(memcpy)
19899 jnz .Lloop_1
19900
19901 .Lend:
19902 + pax_force_retaddr
19903 retq
19904 CFI_ENDPROC
19905 ENDPROC(memcpy)
19906 diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
19907 index ee16461..c39c199 100644
19908 --- a/arch/x86/lib/memmove_64.S
19909 +++ b/arch/x86/lib/memmove_64.S
19910 @@ -61,13 +61,13 @@ ENTRY(memmove)
19911 5:
19912 sub $0x20, %rdx
19913 movq 0*8(%rsi), %r11
19914 - movq 1*8(%rsi), %r10
19915 + movq 1*8(%rsi), %rcx
19916 movq 2*8(%rsi), %r9
19917 movq 3*8(%rsi), %r8
19918 leaq 4*8(%rsi), %rsi
19919
19920 movq %r11, 0*8(%rdi)
19921 - movq %r10, 1*8(%rdi)
19922 + movq %rcx, 1*8(%rdi)
19923 movq %r9, 2*8(%rdi)
19924 movq %r8, 3*8(%rdi)
19925 leaq 4*8(%rdi), %rdi
19926 @@ -81,10 +81,10 @@ ENTRY(memmove)
19927 4:
19928 movq %rdx, %rcx
19929 movq -8(%rsi, %rdx), %r11
19930 - lea -8(%rdi, %rdx), %r10
19931 + lea -8(%rdi, %rdx), %r9
19932 shrq $3, %rcx
19933 rep movsq
19934 - movq %r11, (%r10)
19935 + movq %r11, (%r9)
19936 jmp 13f
19937 .Lmemmove_end_forward:
19938
19939 @@ -95,14 +95,14 @@ ENTRY(memmove)
19940 7:
19941 movq %rdx, %rcx
19942 movq (%rsi), %r11
19943 - movq %rdi, %r10
19944 + movq %rdi, %r9
19945 leaq -8(%rsi, %rdx), %rsi
19946 leaq -8(%rdi, %rdx), %rdi
19947 shrq $3, %rcx
19948 std
19949 rep movsq
19950 cld
19951 - movq %r11, (%r10)
19952 + movq %r11, (%r9)
19953 jmp 13f
19954
19955 /*
19956 @@ -127,13 +127,13 @@ ENTRY(memmove)
19957 8:
19958 subq $0x20, %rdx
19959 movq -1*8(%rsi), %r11
19960 - movq -2*8(%rsi), %r10
19961 + movq -2*8(%rsi), %rcx
19962 movq -3*8(%rsi), %r9
19963 movq -4*8(%rsi), %r8
19964 leaq -4*8(%rsi), %rsi
19965
19966 movq %r11, -1*8(%rdi)
19967 - movq %r10, -2*8(%rdi)
19968 + movq %rcx, -2*8(%rdi)
19969 movq %r9, -3*8(%rdi)
19970 movq %r8, -4*8(%rdi)
19971 leaq -4*8(%rdi), %rdi
19972 @@ -151,11 +151,11 @@ ENTRY(memmove)
19973 * Move data from 16 bytes to 31 bytes.
19974 */
19975 movq 0*8(%rsi), %r11
19976 - movq 1*8(%rsi), %r10
19977 + movq 1*8(%rsi), %rcx
19978 movq -2*8(%rsi, %rdx), %r9
19979 movq -1*8(%rsi, %rdx), %r8
19980 movq %r11, 0*8(%rdi)
19981 - movq %r10, 1*8(%rdi)
19982 + movq %rcx, 1*8(%rdi)
19983 movq %r9, -2*8(%rdi, %rdx)
19984 movq %r8, -1*8(%rdi, %rdx)
19985 jmp 13f
19986 @@ -167,9 +167,9 @@ ENTRY(memmove)
19987 * Move data from 8 bytes to 15 bytes.
19988 */
19989 movq 0*8(%rsi), %r11
19990 - movq -1*8(%rsi, %rdx), %r10
19991 + movq -1*8(%rsi, %rdx), %r9
19992 movq %r11, 0*8(%rdi)
19993 - movq %r10, -1*8(%rdi, %rdx)
19994 + movq %r9, -1*8(%rdi, %rdx)
19995 jmp 13f
19996 10:
19997 cmpq $4, %rdx
19998 @@ -178,9 +178,9 @@ ENTRY(memmove)
19999 * Move data from 4 bytes to 7 bytes.
20000 */
20001 movl (%rsi), %r11d
20002 - movl -4(%rsi, %rdx), %r10d
20003 + movl -4(%rsi, %rdx), %r9d
20004 movl %r11d, (%rdi)
20005 - movl %r10d, -4(%rdi, %rdx)
20006 + movl %r9d, -4(%rdi, %rdx)
20007 jmp 13f
20008 11:
20009 cmp $2, %rdx
20010 @@ -189,9 +189,9 @@ ENTRY(memmove)
20011 * Move data from 2 bytes to 3 bytes.
20012 */
20013 movw (%rsi), %r11w
20014 - movw -2(%rsi, %rdx), %r10w
20015 + movw -2(%rsi, %rdx), %r9w
20016 movw %r11w, (%rdi)
20017 - movw %r10w, -2(%rdi, %rdx)
20018 + movw %r9w, -2(%rdi, %rdx)
20019 jmp 13f
20020 12:
20021 cmp $1, %rdx
20022 @@ -202,6 +202,7 @@ ENTRY(memmove)
20023 movb (%rsi), %r11b
20024 movb %r11b, (%rdi)
20025 13:
20026 + pax_force_retaddr
20027 retq
20028 CFI_ENDPROC
20029
20030 @@ -210,6 +211,7 @@ ENTRY(memmove)
20031 /* Forward moving data. */
20032 movq %rdx, %rcx
20033 rep movsb
20034 + pax_force_retaddr
20035 retq
20036 .Lmemmove_end_forward_efs:
20037 .previous
20038 diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
20039 index 79bd454..dff325a 100644
20040 --- a/arch/x86/lib/memset_64.S
20041 +++ b/arch/x86/lib/memset_64.S
20042 @@ -31,6 +31,7 @@
20043 movl %r8d,%ecx
20044 rep stosb
20045 movq %r9,%rax
20046 + pax_force_retaddr
20047 ret
20048 .Lmemset_e:
20049 .previous
20050 @@ -53,6 +54,7 @@
20051 movl %edx,%ecx
20052 rep stosb
20053 movq %r9,%rax
20054 + pax_force_retaddr
20055 ret
20056 .Lmemset_e_e:
20057 .previous
20058 @@ -60,13 +62,13 @@
20059 ENTRY(memset)
20060 ENTRY(__memset)
20061 CFI_STARTPROC
20062 - movq %rdi,%r10
20063 movq %rdx,%r11
20064
20065 /* expand byte value */
20066 movzbl %sil,%ecx
20067 movabs $0x0101010101010101,%rax
20068 mul %rcx /* with rax, clobbers rdx */
20069 + movq %rdi,%rdx
20070
20071 /* align dst */
20072 movl %edi,%r9d
20073 @@ -120,7 +122,8 @@ ENTRY(__memset)
20074 jnz .Lloop_1
20075
20076 .Lende:
20077 - movq %r10,%rax
20078 + movq %rdx,%rax
20079 + pax_force_retaddr
20080 ret
20081
20082 CFI_RESTORE_STATE
20083 diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
20084 index c9f2d9b..e7fd2c0 100644
20085 --- a/arch/x86/lib/mmx_32.c
20086 +++ b/arch/x86/lib/mmx_32.c
20087 @@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
20088 {
20089 void *p;
20090 int i;
20091 + unsigned long cr0;
20092
20093 if (unlikely(in_interrupt()))
20094 return __memcpy(to, from, len);
20095 @@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
20096 kernel_fpu_begin();
20097
20098 __asm__ __volatile__ (
20099 - "1: prefetch (%0)\n" /* This set is 28 bytes */
20100 - " prefetch 64(%0)\n"
20101 - " prefetch 128(%0)\n"
20102 - " prefetch 192(%0)\n"
20103 - " prefetch 256(%0)\n"
20104 + "1: prefetch (%1)\n" /* This set is 28 bytes */
20105 + " prefetch 64(%1)\n"
20106 + " prefetch 128(%1)\n"
20107 + " prefetch 192(%1)\n"
20108 + " prefetch 256(%1)\n"
20109 "2: \n"
20110 ".section .fixup, \"ax\"\n"
20111 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20112 + "3: \n"
20113 +
20114 +#ifdef CONFIG_PAX_KERNEXEC
20115 + " movl %%cr0, %0\n"
20116 + " movl %0, %%eax\n"
20117 + " andl $0xFFFEFFFF, %%eax\n"
20118 + " movl %%eax, %%cr0\n"
20119 +#endif
20120 +
20121 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20122 +
20123 +#ifdef CONFIG_PAX_KERNEXEC
20124 + " movl %0, %%cr0\n"
20125 +#endif
20126 +
20127 " jmp 2b\n"
20128 ".previous\n"
20129 _ASM_EXTABLE(1b, 3b)
20130 - : : "r" (from));
20131 + : "=&r" (cr0) : "r" (from) : "ax");
20132
20133 for ( ; i > 5; i--) {
20134 __asm__ __volatile__ (
20135 - "1: prefetch 320(%0)\n"
20136 - "2: movq (%0), %%mm0\n"
20137 - " movq 8(%0), %%mm1\n"
20138 - " movq 16(%0), %%mm2\n"
20139 - " movq 24(%0), %%mm3\n"
20140 - " movq %%mm0, (%1)\n"
20141 - " movq %%mm1, 8(%1)\n"
20142 - " movq %%mm2, 16(%1)\n"
20143 - " movq %%mm3, 24(%1)\n"
20144 - " movq 32(%0), %%mm0\n"
20145 - " movq 40(%0), %%mm1\n"
20146 - " movq 48(%0), %%mm2\n"
20147 - " movq 56(%0), %%mm3\n"
20148 - " movq %%mm0, 32(%1)\n"
20149 - " movq %%mm1, 40(%1)\n"
20150 - " movq %%mm2, 48(%1)\n"
20151 - " movq %%mm3, 56(%1)\n"
20152 + "1: prefetch 320(%1)\n"
20153 + "2: movq (%1), %%mm0\n"
20154 + " movq 8(%1), %%mm1\n"
20155 + " movq 16(%1), %%mm2\n"
20156 + " movq 24(%1), %%mm3\n"
20157 + " movq %%mm0, (%2)\n"
20158 + " movq %%mm1, 8(%2)\n"
20159 + " movq %%mm2, 16(%2)\n"
20160 + " movq %%mm3, 24(%2)\n"
20161 + " movq 32(%1), %%mm0\n"
20162 + " movq 40(%1), %%mm1\n"
20163 + " movq 48(%1), %%mm2\n"
20164 + " movq 56(%1), %%mm3\n"
20165 + " movq %%mm0, 32(%2)\n"
20166 + " movq %%mm1, 40(%2)\n"
20167 + " movq %%mm2, 48(%2)\n"
20168 + " movq %%mm3, 56(%2)\n"
20169 ".section .fixup, \"ax\"\n"
20170 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20171 + "3:\n"
20172 +
20173 +#ifdef CONFIG_PAX_KERNEXEC
20174 + " movl %%cr0, %0\n"
20175 + " movl %0, %%eax\n"
20176 + " andl $0xFFFEFFFF, %%eax\n"
20177 + " movl %%eax, %%cr0\n"
20178 +#endif
20179 +
20180 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20181 +
20182 +#ifdef CONFIG_PAX_KERNEXEC
20183 + " movl %0, %%cr0\n"
20184 +#endif
20185 +
20186 " jmp 2b\n"
20187 ".previous\n"
20188 _ASM_EXTABLE(1b, 3b)
20189 - : : "r" (from), "r" (to) : "memory");
20190 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
20191
20192 from += 64;
20193 to += 64;
20194 @@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
20195 static void fast_copy_page(void *to, void *from)
20196 {
20197 int i;
20198 + unsigned long cr0;
20199
20200 kernel_fpu_begin();
20201
20202 @@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
20203 * but that is for later. -AV
20204 */
20205 __asm__ __volatile__(
20206 - "1: prefetch (%0)\n"
20207 - " prefetch 64(%0)\n"
20208 - " prefetch 128(%0)\n"
20209 - " prefetch 192(%0)\n"
20210 - " prefetch 256(%0)\n"
20211 + "1: prefetch (%1)\n"
20212 + " prefetch 64(%1)\n"
20213 + " prefetch 128(%1)\n"
20214 + " prefetch 192(%1)\n"
20215 + " prefetch 256(%1)\n"
20216 "2: \n"
20217 ".section .fixup, \"ax\"\n"
20218 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20219 + "3: \n"
20220 +
20221 +#ifdef CONFIG_PAX_KERNEXEC
20222 + " movl %%cr0, %0\n"
20223 + " movl %0, %%eax\n"
20224 + " andl $0xFFFEFFFF, %%eax\n"
20225 + " movl %%eax, %%cr0\n"
20226 +#endif
20227 +
20228 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20229 +
20230 +#ifdef CONFIG_PAX_KERNEXEC
20231 + " movl %0, %%cr0\n"
20232 +#endif
20233 +
20234 " jmp 2b\n"
20235 ".previous\n"
20236 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
20237 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
20238
20239 for (i = 0; i < (4096-320)/64; i++) {
20240 __asm__ __volatile__ (
20241 - "1: prefetch 320(%0)\n"
20242 - "2: movq (%0), %%mm0\n"
20243 - " movntq %%mm0, (%1)\n"
20244 - " movq 8(%0), %%mm1\n"
20245 - " movntq %%mm1, 8(%1)\n"
20246 - " movq 16(%0), %%mm2\n"
20247 - " movntq %%mm2, 16(%1)\n"
20248 - " movq 24(%0), %%mm3\n"
20249 - " movntq %%mm3, 24(%1)\n"
20250 - " movq 32(%0), %%mm4\n"
20251 - " movntq %%mm4, 32(%1)\n"
20252 - " movq 40(%0), %%mm5\n"
20253 - " movntq %%mm5, 40(%1)\n"
20254 - " movq 48(%0), %%mm6\n"
20255 - " movntq %%mm6, 48(%1)\n"
20256 - " movq 56(%0), %%mm7\n"
20257 - " movntq %%mm7, 56(%1)\n"
20258 + "1: prefetch 320(%1)\n"
20259 + "2: movq (%1), %%mm0\n"
20260 + " movntq %%mm0, (%2)\n"
20261 + " movq 8(%1), %%mm1\n"
20262 + " movntq %%mm1, 8(%2)\n"
20263 + " movq 16(%1), %%mm2\n"
20264 + " movntq %%mm2, 16(%2)\n"
20265 + " movq 24(%1), %%mm3\n"
20266 + " movntq %%mm3, 24(%2)\n"
20267 + " movq 32(%1), %%mm4\n"
20268 + " movntq %%mm4, 32(%2)\n"
20269 + " movq 40(%1), %%mm5\n"
20270 + " movntq %%mm5, 40(%2)\n"
20271 + " movq 48(%1), %%mm6\n"
20272 + " movntq %%mm6, 48(%2)\n"
20273 + " movq 56(%1), %%mm7\n"
20274 + " movntq %%mm7, 56(%2)\n"
20275 ".section .fixup, \"ax\"\n"
20276 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20277 + "3:\n"
20278 +
20279 +#ifdef CONFIG_PAX_KERNEXEC
20280 + " movl %%cr0, %0\n"
20281 + " movl %0, %%eax\n"
20282 + " andl $0xFFFEFFFF, %%eax\n"
20283 + " movl %%eax, %%cr0\n"
20284 +#endif
20285 +
20286 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20287 +
20288 +#ifdef CONFIG_PAX_KERNEXEC
20289 + " movl %0, %%cr0\n"
20290 +#endif
20291 +
20292 " jmp 2b\n"
20293 ".previous\n"
20294 - _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
20295 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
20296
20297 from += 64;
20298 to += 64;
20299 @@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
20300 static void fast_copy_page(void *to, void *from)
20301 {
20302 int i;
20303 + unsigned long cr0;
20304
20305 kernel_fpu_begin();
20306
20307 __asm__ __volatile__ (
20308 - "1: prefetch (%0)\n"
20309 - " prefetch 64(%0)\n"
20310 - " prefetch 128(%0)\n"
20311 - " prefetch 192(%0)\n"
20312 - " prefetch 256(%0)\n"
20313 + "1: prefetch (%1)\n"
20314 + " prefetch 64(%1)\n"
20315 + " prefetch 128(%1)\n"
20316 + " prefetch 192(%1)\n"
20317 + " prefetch 256(%1)\n"
20318 "2: \n"
20319 ".section .fixup, \"ax\"\n"
20320 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20321 + "3: \n"
20322 +
20323 +#ifdef CONFIG_PAX_KERNEXEC
20324 + " movl %%cr0, %0\n"
20325 + " movl %0, %%eax\n"
20326 + " andl $0xFFFEFFFF, %%eax\n"
20327 + " movl %%eax, %%cr0\n"
20328 +#endif
20329 +
20330 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20331 +
20332 +#ifdef CONFIG_PAX_KERNEXEC
20333 + " movl %0, %%cr0\n"
20334 +#endif
20335 +
20336 " jmp 2b\n"
20337 ".previous\n"
20338 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
20339 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
20340
20341 for (i = 0; i < 4096/64; i++) {
20342 __asm__ __volatile__ (
20343 - "1: prefetch 320(%0)\n"
20344 - "2: movq (%0), %%mm0\n"
20345 - " movq 8(%0), %%mm1\n"
20346 - " movq 16(%0), %%mm2\n"
20347 - " movq 24(%0), %%mm3\n"
20348 - " movq %%mm0, (%1)\n"
20349 - " movq %%mm1, 8(%1)\n"
20350 - " movq %%mm2, 16(%1)\n"
20351 - " movq %%mm3, 24(%1)\n"
20352 - " movq 32(%0), %%mm0\n"
20353 - " movq 40(%0), %%mm1\n"
20354 - " movq 48(%0), %%mm2\n"
20355 - " movq 56(%0), %%mm3\n"
20356 - " movq %%mm0, 32(%1)\n"
20357 - " movq %%mm1, 40(%1)\n"
20358 - " movq %%mm2, 48(%1)\n"
20359 - " movq %%mm3, 56(%1)\n"
20360 + "1: prefetch 320(%1)\n"
20361 + "2: movq (%1), %%mm0\n"
20362 + " movq 8(%1), %%mm1\n"
20363 + " movq 16(%1), %%mm2\n"
20364 + " movq 24(%1), %%mm3\n"
20365 + " movq %%mm0, (%2)\n"
20366 + " movq %%mm1, 8(%2)\n"
20367 + " movq %%mm2, 16(%2)\n"
20368 + " movq %%mm3, 24(%2)\n"
20369 + " movq 32(%1), %%mm0\n"
20370 + " movq 40(%1), %%mm1\n"
20371 + " movq 48(%1), %%mm2\n"
20372 + " movq 56(%1), %%mm3\n"
20373 + " movq %%mm0, 32(%2)\n"
20374 + " movq %%mm1, 40(%2)\n"
20375 + " movq %%mm2, 48(%2)\n"
20376 + " movq %%mm3, 56(%2)\n"
20377 ".section .fixup, \"ax\"\n"
20378 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20379 + "3:\n"
20380 +
20381 +#ifdef CONFIG_PAX_KERNEXEC
20382 + " movl %%cr0, %0\n"
20383 + " movl %0, %%eax\n"
20384 + " andl $0xFFFEFFFF, %%eax\n"
20385 + " movl %%eax, %%cr0\n"
20386 +#endif
20387 +
20388 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20389 +
20390 +#ifdef CONFIG_PAX_KERNEXEC
20391 + " movl %0, %%cr0\n"
20392 +#endif
20393 +
20394 " jmp 2b\n"
20395 ".previous\n"
20396 _ASM_EXTABLE(1b, 3b)
20397 - : : "r" (from), "r" (to) : "memory");
20398 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
20399
20400 from += 64;
20401 to += 64;
20402 diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
20403 index 69fa106..adda88b 100644
20404 --- a/arch/x86/lib/msr-reg.S
20405 +++ b/arch/x86/lib/msr-reg.S
20406 @@ -3,6 +3,7 @@
20407 #include <asm/dwarf2.h>
20408 #include <asm/asm.h>
20409 #include <asm/msr.h>
20410 +#include <asm/alternative-asm.h>
20411
20412 #ifdef CONFIG_X86_64
20413 /*
20414 @@ -16,7 +17,7 @@ ENTRY(native_\op\()_safe_regs)
20415 CFI_STARTPROC
20416 pushq_cfi %rbx
20417 pushq_cfi %rbp
20418 - movq %rdi, %r10 /* Save pointer */
20419 + movq %rdi, %r9 /* Save pointer */
20420 xorl %r11d, %r11d /* Return value */
20421 movl (%rdi), %eax
20422 movl 4(%rdi), %ecx
20423 @@ -27,16 +28,17 @@ ENTRY(native_\op\()_safe_regs)
20424 movl 28(%rdi), %edi
20425 CFI_REMEMBER_STATE
20426 1: \op
20427 -2: movl %eax, (%r10)
20428 +2: movl %eax, (%r9)
20429 movl %r11d, %eax /* Return value */
20430 - movl %ecx, 4(%r10)
20431 - movl %edx, 8(%r10)
20432 - movl %ebx, 12(%r10)
20433 - movl %ebp, 20(%r10)
20434 - movl %esi, 24(%r10)
20435 - movl %edi, 28(%r10)
20436 + movl %ecx, 4(%r9)
20437 + movl %edx, 8(%r9)
20438 + movl %ebx, 12(%r9)
20439 + movl %ebp, 20(%r9)
20440 + movl %esi, 24(%r9)
20441 + movl %edi, 28(%r9)
20442 popq_cfi %rbp
20443 popq_cfi %rbx
20444 + pax_force_retaddr
20445 ret
20446 3:
20447 CFI_RESTORE_STATE
20448 diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
20449 index 36b0d15..d381858 100644
20450 --- a/arch/x86/lib/putuser.S
20451 +++ b/arch/x86/lib/putuser.S
20452 @@ -15,7 +15,9 @@
20453 #include <asm/thread_info.h>
20454 #include <asm/errno.h>
20455 #include <asm/asm.h>
20456 -
20457 +#include <asm/segment.h>
20458 +#include <asm/pgtable.h>
20459 +#include <asm/alternative-asm.h>
20460
20461 /*
20462 * __put_user_X
20463 @@ -29,52 +31,119 @@
20464 * as they get called from within inline assembly.
20465 */
20466
20467 -#define ENTER CFI_STARTPROC ; \
20468 - GET_THREAD_INFO(%_ASM_BX)
20469 -#define EXIT ret ; \
20470 +#define ENTER CFI_STARTPROC
20471 +#define EXIT pax_force_retaddr; ret ; \
20472 CFI_ENDPROC
20473
20474 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20475 +#define _DEST %_ASM_CX,%_ASM_BX
20476 +#else
20477 +#define _DEST %_ASM_CX
20478 +#endif
20479 +
20480 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
20481 +#define __copyuser_seg gs;
20482 +#else
20483 +#define __copyuser_seg
20484 +#endif
20485 +
20486 .text
20487 ENTRY(__put_user_1)
20488 ENTER
20489 +
20490 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20491 + GET_THREAD_INFO(%_ASM_BX)
20492 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
20493 jae bad_put_user
20494 -1: movb %al,(%_ASM_CX)
20495 +
20496 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20497 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20498 + cmp %_ASM_BX,%_ASM_CX
20499 + jb 1234f
20500 + xor %ebx,%ebx
20501 +1234:
20502 +#endif
20503 +
20504 +#endif
20505 +
20506 +1: __copyuser_seg movb %al,(_DEST)
20507 xor %eax,%eax
20508 EXIT
20509 ENDPROC(__put_user_1)
20510
20511 ENTRY(__put_user_2)
20512 ENTER
20513 +
20514 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20515 + GET_THREAD_INFO(%_ASM_BX)
20516 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20517 sub $1,%_ASM_BX
20518 cmp %_ASM_BX,%_ASM_CX
20519 jae bad_put_user
20520 -2: movw %ax,(%_ASM_CX)
20521 +
20522 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20523 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20524 + cmp %_ASM_BX,%_ASM_CX
20525 + jb 1234f
20526 + xor %ebx,%ebx
20527 +1234:
20528 +#endif
20529 +
20530 +#endif
20531 +
20532 +2: __copyuser_seg movw %ax,(_DEST)
20533 xor %eax,%eax
20534 EXIT
20535 ENDPROC(__put_user_2)
20536
20537 ENTRY(__put_user_4)
20538 ENTER
20539 +
20540 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20541 + GET_THREAD_INFO(%_ASM_BX)
20542 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20543 sub $3,%_ASM_BX
20544 cmp %_ASM_BX,%_ASM_CX
20545 jae bad_put_user
20546 -3: movl %eax,(%_ASM_CX)
20547 +
20548 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20549 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20550 + cmp %_ASM_BX,%_ASM_CX
20551 + jb 1234f
20552 + xor %ebx,%ebx
20553 +1234:
20554 +#endif
20555 +
20556 +#endif
20557 +
20558 +3: __copyuser_seg movl %eax,(_DEST)
20559 xor %eax,%eax
20560 EXIT
20561 ENDPROC(__put_user_4)
20562
20563 ENTRY(__put_user_8)
20564 ENTER
20565 +
20566 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20567 + GET_THREAD_INFO(%_ASM_BX)
20568 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20569 sub $7,%_ASM_BX
20570 cmp %_ASM_BX,%_ASM_CX
20571 jae bad_put_user
20572 -4: mov %_ASM_AX,(%_ASM_CX)
20573 +
20574 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20575 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20576 + cmp %_ASM_BX,%_ASM_CX
20577 + jb 1234f
20578 + xor %ebx,%ebx
20579 +1234:
20580 +#endif
20581 +
20582 +#endif
20583 +
20584 +4: __copyuser_seg mov %_ASM_AX,(_DEST)
20585 #ifdef CONFIG_X86_32
20586 -5: movl %edx,4(%_ASM_CX)
20587 +5: __copyuser_seg movl %edx,4(_DEST)
20588 #endif
20589 xor %eax,%eax
20590 EXIT
20591 diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
20592 index 1cad221..de671ee 100644
20593 --- a/arch/x86/lib/rwlock.S
20594 +++ b/arch/x86/lib/rwlock.S
20595 @@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
20596 FRAME
20597 0: LOCK_PREFIX
20598 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
20599 +
20600 +#ifdef CONFIG_PAX_REFCOUNT
20601 + jno 1234f
20602 + LOCK_PREFIX
20603 + WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
20604 + int $4
20605 +1234:
20606 + _ASM_EXTABLE(1234b, 1234b)
20607 +#endif
20608 +
20609 1: rep; nop
20610 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
20611 jne 1b
20612 LOCK_PREFIX
20613 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
20614 +
20615 +#ifdef CONFIG_PAX_REFCOUNT
20616 + jno 1234f
20617 + LOCK_PREFIX
20618 + WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
20619 + int $4
20620 +1234:
20621 + _ASM_EXTABLE(1234b, 1234b)
20622 +#endif
20623 +
20624 jnz 0b
20625 ENDFRAME
20626 + pax_force_retaddr
20627 ret
20628 CFI_ENDPROC
20629 END(__write_lock_failed)
20630 @@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
20631 FRAME
20632 0: LOCK_PREFIX
20633 READ_LOCK_SIZE(inc) (%__lock_ptr)
20634 +
20635 +#ifdef CONFIG_PAX_REFCOUNT
20636 + jno 1234f
20637 + LOCK_PREFIX
20638 + READ_LOCK_SIZE(dec) (%__lock_ptr)
20639 + int $4
20640 +1234:
20641 + _ASM_EXTABLE(1234b, 1234b)
20642 +#endif
20643 +
20644 1: rep; nop
20645 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
20646 js 1b
20647 LOCK_PREFIX
20648 READ_LOCK_SIZE(dec) (%__lock_ptr)
20649 +
20650 +#ifdef CONFIG_PAX_REFCOUNT
20651 + jno 1234f
20652 + LOCK_PREFIX
20653 + READ_LOCK_SIZE(inc) (%__lock_ptr)
20654 + int $4
20655 +1234:
20656 + _ASM_EXTABLE(1234b, 1234b)
20657 +#endif
20658 +
20659 js 0b
20660 ENDFRAME
20661 + pax_force_retaddr
20662 ret
20663 CFI_ENDPROC
20664 END(__read_lock_failed)
20665 diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
20666 index 5dff5f0..cadebf4 100644
20667 --- a/arch/x86/lib/rwsem.S
20668 +++ b/arch/x86/lib/rwsem.S
20669 @@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
20670 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
20671 CFI_RESTORE __ASM_REG(dx)
20672 restore_common_regs
20673 + pax_force_retaddr
20674 ret
20675 CFI_ENDPROC
20676 ENDPROC(call_rwsem_down_read_failed)
20677 @@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
20678 movq %rax,%rdi
20679 call rwsem_down_write_failed
20680 restore_common_regs
20681 + pax_force_retaddr
20682 ret
20683 CFI_ENDPROC
20684 ENDPROC(call_rwsem_down_write_failed)
20685 @@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
20686 movq %rax,%rdi
20687 call rwsem_wake
20688 restore_common_regs
20689 -1: ret
20690 +1: pax_force_retaddr
20691 + ret
20692 CFI_ENDPROC
20693 ENDPROC(call_rwsem_wake)
20694
20695 @@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
20696 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
20697 CFI_RESTORE __ASM_REG(dx)
20698 restore_common_regs
20699 + pax_force_retaddr
20700 ret
20701 CFI_ENDPROC
20702 ENDPROC(call_rwsem_downgrade_wake)
20703 diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
20704 index a63efd6..ccecad8 100644
20705 --- a/arch/x86/lib/thunk_64.S
20706 +++ b/arch/x86/lib/thunk_64.S
20707 @@ -8,6 +8,7 @@
20708 #include <linux/linkage.h>
20709 #include <asm/dwarf2.h>
20710 #include <asm/calling.h>
20711 +#include <asm/alternative-asm.h>
20712
20713 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
20714 .macro THUNK name, func, put_ret_addr_in_rdi=0
20715 @@ -41,5 +42,6 @@
20716 SAVE_ARGS
20717 restore:
20718 RESTORE_ARGS
20719 + pax_force_retaddr
20720 ret
20721 CFI_ENDPROC
20722 diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
20723 index e218d5d..35679b4 100644
20724 --- a/arch/x86/lib/usercopy_32.c
20725 +++ b/arch/x86/lib/usercopy_32.c
20726 @@ -43,7 +43,7 @@ do { \
20727 __asm__ __volatile__( \
20728 " testl %1,%1\n" \
20729 " jz 2f\n" \
20730 - "0: lodsb\n" \
20731 + "0: "__copyuser_seg"lodsb\n" \
20732 " stosb\n" \
20733 " testb %%al,%%al\n" \
20734 " jz 1f\n" \
20735 @@ -128,10 +128,12 @@ do { \
20736 int __d0; \
20737 might_fault(); \
20738 __asm__ __volatile__( \
20739 + __COPYUSER_SET_ES \
20740 "0: rep; stosl\n" \
20741 " movl %2,%0\n" \
20742 "1: rep; stosb\n" \
20743 "2:\n" \
20744 + __COPYUSER_RESTORE_ES \
20745 ".section .fixup,\"ax\"\n" \
20746 "3: lea 0(%2,%0,4),%0\n" \
20747 " jmp 2b\n" \
20748 @@ -200,6 +202,7 @@ long strnlen_user(const char __user *s, long n)
20749 might_fault();
20750
20751 __asm__ __volatile__(
20752 + __COPYUSER_SET_ES
20753 " testl %0, %0\n"
20754 " jz 3f\n"
20755 " andl %0,%%ecx\n"
20756 @@ -208,6 +211,7 @@ long strnlen_user(const char __user *s, long n)
20757 " subl %%ecx,%0\n"
20758 " addl %0,%%eax\n"
20759 "1:\n"
20760 + __COPYUSER_RESTORE_ES
20761 ".section .fixup,\"ax\"\n"
20762 "2: xorl %%eax,%%eax\n"
20763 " jmp 1b\n"
20764 @@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
20765
20766 #ifdef CONFIG_X86_INTEL_USERCOPY
20767 static unsigned long
20768 -__copy_user_intel(void __user *to, const void *from, unsigned long size)
20769 +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
20770 {
20771 int d0, d1;
20772 __asm__ __volatile__(
20773 @@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
20774 " .align 2,0x90\n"
20775 "3: movl 0(%4), %%eax\n"
20776 "4: movl 4(%4), %%edx\n"
20777 - "5: movl %%eax, 0(%3)\n"
20778 - "6: movl %%edx, 4(%3)\n"
20779 + "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
20780 + "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
20781 "7: movl 8(%4), %%eax\n"
20782 "8: movl 12(%4),%%edx\n"
20783 - "9: movl %%eax, 8(%3)\n"
20784 - "10: movl %%edx, 12(%3)\n"
20785 + "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
20786 + "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
20787 "11: movl 16(%4), %%eax\n"
20788 "12: movl 20(%4), %%edx\n"
20789 - "13: movl %%eax, 16(%3)\n"
20790 - "14: movl %%edx, 20(%3)\n"
20791 + "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
20792 + "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
20793 "15: movl 24(%4), %%eax\n"
20794 "16: movl 28(%4), %%edx\n"
20795 - "17: movl %%eax, 24(%3)\n"
20796 - "18: movl %%edx, 28(%3)\n"
20797 + "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
20798 + "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
20799 "19: movl 32(%4), %%eax\n"
20800 "20: movl 36(%4), %%edx\n"
20801 - "21: movl %%eax, 32(%3)\n"
20802 - "22: movl %%edx, 36(%3)\n"
20803 + "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
20804 + "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
20805 "23: movl 40(%4), %%eax\n"
20806 "24: movl 44(%4), %%edx\n"
20807 - "25: movl %%eax, 40(%3)\n"
20808 - "26: movl %%edx, 44(%3)\n"
20809 + "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
20810 + "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
20811 "27: movl 48(%4), %%eax\n"
20812 "28: movl 52(%4), %%edx\n"
20813 - "29: movl %%eax, 48(%3)\n"
20814 - "30: movl %%edx, 52(%3)\n"
20815 + "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
20816 + "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
20817 "31: movl 56(%4), %%eax\n"
20818 "32: movl 60(%4), %%edx\n"
20819 - "33: movl %%eax, 56(%3)\n"
20820 - "34: movl %%edx, 60(%3)\n"
20821 + "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
20822 + "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
20823 " addl $-64, %0\n"
20824 " addl $64, %4\n"
20825 " addl $64, %3\n"
20826 @@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
20827 " shrl $2, %0\n"
20828 " andl $3, %%eax\n"
20829 " cld\n"
20830 + __COPYUSER_SET_ES
20831 "99: rep; movsl\n"
20832 "36: movl %%eax, %0\n"
20833 "37: rep; movsb\n"
20834 "100:\n"
20835 + __COPYUSER_RESTORE_ES
20836 + ".section .fixup,\"ax\"\n"
20837 + "101: lea 0(%%eax,%0,4),%0\n"
20838 + " jmp 100b\n"
20839 + ".previous\n"
20840 + ".section __ex_table,\"a\"\n"
20841 + " .align 4\n"
20842 + " .long 1b,100b\n"
20843 + " .long 2b,100b\n"
20844 + " .long 3b,100b\n"
20845 + " .long 4b,100b\n"
20846 + " .long 5b,100b\n"
20847 + " .long 6b,100b\n"
20848 + " .long 7b,100b\n"
20849 + " .long 8b,100b\n"
20850 + " .long 9b,100b\n"
20851 + " .long 10b,100b\n"
20852 + " .long 11b,100b\n"
20853 + " .long 12b,100b\n"
20854 + " .long 13b,100b\n"
20855 + " .long 14b,100b\n"
20856 + " .long 15b,100b\n"
20857 + " .long 16b,100b\n"
20858 + " .long 17b,100b\n"
20859 + " .long 18b,100b\n"
20860 + " .long 19b,100b\n"
20861 + " .long 20b,100b\n"
20862 + " .long 21b,100b\n"
20863 + " .long 22b,100b\n"
20864 + " .long 23b,100b\n"
20865 + " .long 24b,100b\n"
20866 + " .long 25b,100b\n"
20867 + " .long 26b,100b\n"
20868 + " .long 27b,100b\n"
20869 + " .long 28b,100b\n"
20870 + " .long 29b,100b\n"
20871 + " .long 30b,100b\n"
20872 + " .long 31b,100b\n"
20873 + " .long 32b,100b\n"
20874 + " .long 33b,100b\n"
20875 + " .long 34b,100b\n"
20876 + " .long 35b,100b\n"
20877 + " .long 36b,100b\n"
20878 + " .long 37b,100b\n"
20879 + " .long 99b,101b\n"
20880 + ".previous"
20881 + : "=&c"(size), "=&D" (d0), "=&S" (d1)
20882 + : "1"(to), "2"(from), "0"(size)
20883 + : "eax", "edx", "memory");
20884 + return size;
20885 +}
20886 +
20887 +static unsigned long
20888 +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
20889 +{
20890 + int d0, d1;
20891 + __asm__ __volatile__(
20892 + " .align 2,0x90\n"
20893 + "1: "__copyuser_seg" movl 32(%4), %%eax\n"
20894 + " cmpl $67, %0\n"
20895 + " jbe 3f\n"
20896 + "2: "__copyuser_seg" movl 64(%4), %%eax\n"
20897 + " .align 2,0x90\n"
20898 + "3: "__copyuser_seg" movl 0(%4), %%eax\n"
20899 + "4: "__copyuser_seg" movl 4(%4), %%edx\n"
20900 + "5: movl %%eax, 0(%3)\n"
20901 + "6: movl %%edx, 4(%3)\n"
20902 + "7: "__copyuser_seg" movl 8(%4), %%eax\n"
20903 + "8: "__copyuser_seg" movl 12(%4),%%edx\n"
20904 + "9: movl %%eax, 8(%3)\n"
20905 + "10: movl %%edx, 12(%3)\n"
20906 + "11: "__copyuser_seg" movl 16(%4), %%eax\n"
20907 + "12: "__copyuser_seg" movl 20(%4), %%edx\n"
20908 + "13: movl %%eax, 16(%3)\n"
20909 + "14: movl %%edx, 20(%3)\n"
20910 + "15: "__copyuser_seg" movl 24(%4), %%eax\n"
20911 + "16: "__copyuser_seg" movl 28(%4), %%edx\n"
20912 + "17: movl %%eax, 24(%3)\n"
20913 + "18: movl %%edx, 28(%3)\n"
20914 + "19: "__copyuser_seg" movl 32(%4), %%eax\n"
20915 + "20: "__copyuser_seg" movl 36(%4), %%edx\n"
20916 + "21: movl %%eax, 32(%3)\n"
20917 + "22: movl %%edx, 36(%3)\n"
20918 + "23: "__copyuser_seg" movl 40(%4), %%eax\n"
20919 + "24: "__copyuser_seg" movl 44(%4), %%edx\n"
20920 + "25: movl %%eax, 40(%3)\n"
20921 + "26: movl %%edx, 44(%3)\n"
20922 + "27: "__copyuser_seg" movl 48(%4), %%eax\n"
20923 + "28: "__copyuser_seg" movl 52(%4), %%edx\n"
20924 + "29: movl %%eax, 48(%3)\n"
20925 + "30: movl %%edx, 52(%3)\n"
20926 + "31: "__copyuser_seg" movl 56(%4), %%eax\n"
20927 + "32: "__copyuser_seg" movl 60(%4), %%edx\n"
20928 + "33: movl %%eax, 56(%3)\n"
20929 + "34: movl %%edx, 60(%3)\n"
20930 + " addl $-64, %0\n"
20931 + " addl $64, %4\n"
20932 + " addl $64, %3\n"
20933 + " cmpl $63, %0\n"
20934 + " ja 1b\n"
20935 + "35: movl %0, %%eax\n"
20936 + " shrl $2, %0\n"
20937 + " andl $3, %%eax\n"
20938 + " cld\n"
20939 + "99: rep; "__copyuser_seg" movsl\n"
20940 + "36: movl %%eax, %0\n"
20941 + "37: rep; "__copyuser_seg" movsb\n"
20942 + "100:\n"
20943 ".section .fixup,\"ax\"\n"
20944 "101: lea 0(%%eax,%0,4),%0\n"
20945 " jmp 100b\n"
20946 @@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
20947 int d0, d1;
20948 __asm__ __volatile__(
20949 " .align 2,0x90\n"
20950 - "0: movl 32(%4), %%eax\n"
20951 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
20952 " cmpl $67, %0\n"
20953 " jbe 2f\n"
20954 - "1: movl 64(%4), %%eax\n"
20955 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
20956 " .align 2,0x90\n"
20957 - "2: movl 0(%4), %%eax\n"
20958 - "21: movl 4(%4), %%edx\n"
20959 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
20960 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
20961 " movl %%eax, 0(%3)\n"
20962 " movl %%edx, 4(%3)\n"
20963 - "3: movl 8(%4), %%eax\n"
20964 - "31: movl 12(%4),%%edx\n"
20965 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
20966 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
20967 " movl %%eax, 8(%3)\n"
20968 " movl %%edx, 12(%3)\n"
20969 - "4: movl 16(%4), %%eax\n"
20970 - "41: movl 20(%4), %%edx\n"
20971 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
20972 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
20973 " movl %%eax, 16(%3)\n"
20974 " movl %%edx, 20(%3)\n"
20975 - "10: movl 24(%4), %%eax\n"
20976 - "51: movl 28(%4), %%edx\n"
20977 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
20978 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
20979 " movl %%eax, 24(%3)\n"
20980 " movl %%edx, 28(%3)\n"
20981 - "11: movl 32(%4), %%eax\n"
20982 - "61: movl 36(%4), %%edx\n"
20983 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
20984 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
20985 " movl %%eax, 32(%3)\n"
20986 " movl %%edx, 36(%3)\n"
20987 - "12: movl 40(%4), %%eax\n"
20988 - "71: movl 44(%4), %%edx\n"
20989 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
20990 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
20991 " movl %%eax, 40(%3)\n"
20992 " movl %%edx, 44(%3)\n"
20993 - "13: movl 48(%4), %%eax\n"
20994 - "81: movl 52(%4), %%edx\n"
20995 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
20996 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
20997 " movl %%eax, 48(%3)\n"
20998 " movl %%edx, 52(%3)\n"
20999 - "14: movl 56(%4), %%eax\n"
21000 - "91: movl 60(%4), %%edx\n"
21001 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
21002 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
21003 " movl %%eax, 56(%3)\n"
21004 " movl %%edx, 60(%3)\n"
21005 " addl $-64, %0\n"
21006 @@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
21007 " shrl $2, %0\n"
21008 " andl $3, %%eax\n"
21009 " cld\n"
21010 - "6: rep; movsl\n"
21011 + "6: rep; "__copyuser_seg" movsl\n"
21012 " movl %%eax,%0\n"
21013 - "7: rep; movsb\n"
21014 + "7: rep; "__copyuser_seg" movsb\n"
21015 "8:\n"
21016 ".section .fixup,\"ax\"\n"
21017 "9: lea 0(%%eax,%0,4),%0\n"
21018 @@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
21019
21020 __asm__ __volatile__(
21021 " .align 2,0x90\n"
21022 - "0: movl 32(%4), %%eax\n"
21023 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
21024 " cmpl $67, %0\n"
21025 " jbe 2f\n"
21026 - "1: movl 64(%4), %%eax\n"
21027 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
21028 " .align 2,0x90\n"
21029 - "2: movl 0(%4), %%eax\n"
21030 - "21: movl 4(%4), %%edx\n"
21031 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
21032 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
21033 " movnti %%eax, 0(%3)\n"
21034 " movnti %%edx, 4(%3)\n"
21035 - "3: movl 8(%4), %%eax\n"
21036 - "31: movl 12(%4),%%edx\n"
21037 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
21038 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
21039 " movnti %%eax, 8(%3)\n"
21040 " movnti %%edx, 12(%3)\n"
21041 - "4: movl 16(%4), %%eax\n"
21042 - "41: movl 20(%4), %%edx\n"
21043 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
21044 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
21045 " movnti %%eax, 16(%3)\n"
21046 " movnti %%edx, 20(%3)\n"
21047 - "10: movl 24(%4), %%eax\n"
21048 - "51: movl 28(%4), %%edx\n"
21049 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
21050 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
21051 " movnti %%eax, 24(%3)\n"
21052 " movnti %%edx, 28(%3)\n"
21053 - "11: movl 32(%4), %%eax\n"
21054 - "61: movl 36(%4), %%edx\n"
21055 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
21056 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
21057 " movnti %%eax, 32(%3)\n"
21058 " movnti %%edx, 36(%3)\n"
21059 - "12: movl 40(%4), %%eax\n"
21060 - "71: movl 44(%4), %%edx\n"
21061 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
21062 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
21063 " movnti %%eax, 40(%3)\n"
21064 " movnti %%edx, 44(%3)\n"
21065 - "13: movl 48(%4), %%eax\n"
21066 - "81: movl 52(%4), %%edx\n"
21067 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
21068 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
21069 " movnti %%eax, 48(%3)\n"
21070 " movnti %%edx, 52(%3)\n"
21071 - "14: movl 56(%4), %%eax\n"
21072 - "91: movl 60(%4), %%edx\n"
21073 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
21074 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
21075 " movnti %%eax, 56(%3)\n"
21076 " movnti %%edx, 60(%3)\n"
21077 " addl $-64, %0\n"
21078 @@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
21079 " shrl $2, %0\n"
21080 " andl $3, %%eax\n"
21081 " cld\n"
21082 - "6: rep; movsl\n"
21083 + "6: rep; "__copyuser_seg" movsl\n"
21084 " movl %%eax,%0\n"
21085 - "7: rep; movsb\n"
21086 + "7: rep; "__copyuser_seg" movsb\n"
21087 "8:\n"
21088 ".section .fixup,\"ax\"\n"
21089 "9: lea 0(%%eax,%0,4),%0\n"
21090 @@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
21091
21092 __asm__ __volatile__(
21093 " .align 2,0x90\n"
21094 - "0: movl 32(%4), %%eax\n"
21095 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
21096 " cmpl $67, %0\n"
21097 " jbe 2f\n"
21098 - "1: movl 64(%4), %%eax\n"
21099 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
21100 " .align 2,0x90\n"
21101 - "2: movl 0(%4), %%eax\n"
21102 - "21: movl 4(%4), %%edx\n"
21103 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
21104 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
21105 " movnti %%eax, 0(%3)\n"
21106 " movnti %%edx, 4(%3)\n"
21107 - "3: movl 8(%4), %%eax\n"
21108 - "31: movl 12(%4),%%edx\n"
21109 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
21110 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
21111 " movnti %%eax, 8(%3)\n"
21112 " movnti %%edx, 12(%3)\n"
21113 - "4: movl 16(%4), %%eax\n"
21114 - "41: movl 20(%4), %%edx\n"
21115 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
21116 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
21117 " movnti %%eax, 16(%3)\n"
21118 " movnti %%edx, 20(%3)\n"
21119 - "10: movl 24(%4), %%eax\n"
21120 - "51: movl 28(%4), %%edx\n"
21121 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
21122 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
21123 " movnti %%eax, 24(%3)\n"
21124 " movnti %%edx, 28(%3)\n"
21125 - "11: movl 32(%4), %%eax\n"
21126 - "61: movl 36(%4), %%edx\n"
21127 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
21128 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
21129 " movnti %%eax, 32(%3)\n"
21130 " movnti %%edx, 36(%3)\n"
21131 - "12: movl 40(%4), %%eax\n"
21132 - "71: movl 44(%4), %%edx\n"
21133 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
21134 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
21135 " movnti %%eax, 40(%3)\n"
21136 " movnti %%edx, 44(%3)\n"
21137 - "13: movl 48(%4), %%eax\n"
21138 - "81: movl 52(%4), %%edx\n"
21139 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
21140 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
21141 " movnti %%eax, 48(%3)\n"
21142 " movnti %%edx, 52(%3)\n"
21143 - "14: movl 56(%4), %%eax\n"
21144 - "91: movl 60(%4), %%edx\n"
21145 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
21146 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
21147 " movnti %%eax, 56(%3)\n"
21148 " movnti %%edx, 60(%3)\n"
21149 " addl $-64, %0\n"
21150 @@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
21151 " shrl $2, %0\n"
21152 " andl $3, %%eax\n"
21153 " cld\n"
21154 - "6: rep; movsl\n"
21155 + "6: rep; "__copyuser_seg" movsl\n"
21156 " movl %%eax,%0\n"
21157 - "7: rep; movsb\n"
21158 + "7: rep; "__copyuser_seg" movsb\n"
21159 "8:\n"
21160 ".section .fixup,\"ax\"\n"
21161 "9: lea 0(%%eax,%0,4),%0\n"
21162 @@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
21163 */
21164 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
21165 unsigned long size);
21166 -unsigned long __copy_user_intel(void __user *to, const void *from,
21167 +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
21168 + unsigned long size);
21169 +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
21170 unsigned long size);
21171 unsigned long __copy_user_zeroing_intel_nocache(void *to,
21172 const void __user *from, unsigned long size);
21173 #endif /* CONFIG_X86_INTEL_USERCOPY */
21174
21175 /* Generic arbitrary sized copy. */
21176 -#define __copy_user(to, from, size) \
21177 +#define __copy_user(to, from, size, prefix, set, restore) \
21178 do { \
21179 int __d0, __d1, __d2; \
21180 __asm__ __volatile__( \
21181 + set \
21182 " cmp $7,%0\n" \
21183 " jbe 1f\n" \
21184 " movl %1,%0\n" \
21185 " negl %0\n" \
21186 " andl $7,%0\n" \
21187 " subl %0,%3\n" \
21188 - "4: rep; movsb\n" \
21189 + "4: rep; "prefix"movsb\n" \
21190 " movl %3,%0\n" \
21191 " shrl $2,%0\n" \
21192 " andl $3,%3\n" \
21193 " .align 2,0x90\n" \
21194 - "0: rep; movsl\n" \
21195 + "0: rep; "prefix"movsl\n" \
21196 " movl %3,%0\n" \
21197 - "1: rep; movsb\n" \
21198 + "1: rep; "prefix"movsb\n" \
21199 "2:\n" \
21200 + restore \
21201 ".section .fixup,\"ax\"\n" \
21202 "5: addl %3,%0\n" \
21203 " jmp 2b\n" \
21204 @@ -682,14 +799,14 @@ do { \
21205 " negl %0\n" \
21206 " andl $7,%0\n" \
21207 " subl %0,%3\n" \
21208 - "4: rep; movsb\n" \
21209 + "4: rep; "__copyuser_seg"movsb\n" \
21210 " movl %3,%0\n" \
21211 " shrl $2,%0\n" \
21212 " andl $3,%3\n" \
21213 " .align 2,0x90\n" \
21214 - "0: rep; movsl\n" \
21215 + "0: rep; "__copyuser_seg"movsl\n" \
21216 " movl %3,%0\n" \
21217 - "1: rep; movsb\n" \
21218 + "1: rep; "__copyuser_seg"movsb\n" \
21219 "2:\n" \
21220 ".section .fixup,\"ax\"\n" \
21221 "5: addl %3,%0\n" \
21222 @@ -775,9 +892,9 @@ survive:
21223 }
21224 #endif
21225 if (movsl_is_ok(to, from, n))
21226 - __copy_user(to, from, n);
21227 + __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
21228 else
21229 - n = __copy_user_intel(to, from, n);
21230 + n = __generic_copy_to_user_intel(to, from, n);
21231 return n;
21232 }
21233 EXPORT_SYMBOL(__copy_to_user_ll);
21234 @@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
21235 unsigned long n)
21236 {
21237 if (movsl_is_ok(to, from, n))
21238 - __copy_user(to, from, n);
21239 + __copy_user(to, from, n, __copyuser_seg, "", "");
21240 else
21241 - n = __copy_user_intel((void __user *)to,
21242 - (const void *)from, n);
21243 + n = __generic_copy_from_user_intel(to, from, n);
21244 return n;
21245 }
21246 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
21247 @@ -827,65 +943,50 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
21248 if (n > 64 && cpu_has_xmm2)
21249 n = __copy_user_intel_nocache(to, from, n);
21250 else
21251 - __copy_user(to, from, n);
21252 + __copy_user(to, from, n, __copyuser_seg, "", "");
21253 #else
21254 - __copy_user(to, from, n);
21255 + __copy_user(to, from, n, __copyuser_seg, "", "");
21256 #endif
21257 return n;
21258 }
21259 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
21260
21261 -/**
21262 - * copy_to_user: - Copy a block of data into user space.
21263 - * @to: Destination address, in user space.
21264 - * @from: Source address, in kernel space.
21265 - * @n: Number of bytes to copy.
21266 - *
21267 - * Context: User context only. This function may sleep.
21268 - *
21269 - * Copy data from kernel space to user space.
21270 - *
21271 - * Returns number of bytes that could not be copied.
21272 - * On success, this will be zero.
21273 - */
21274 -unsigned long
21275 -copy_to_user(void __user *to, const void *from, unsigned long n)
21276 -{
21277 - if (access_ok(VERIFY_WRITE, to, n))
21278 - n = __copy_to_user(to, from, n);
21279 - return n;
21280 -}
21281 -EXPORT_SYMBOL(copy_to_user);
21282 -
21283 -/**
21284 - * copy_from_user: - Copy a block of data from user space.
21285 - * @to: Destination address, in kernel space.
21286 - * @from: Source address, in user space.
21287 - * @n: Number of bytes to copy.
21288 - *
21289 - * Context: User context only. This function may sleep.
21290 - *
21291 - * Copy data from user space to kernel space.
21292 - *
21293 - * Returns number of bytes that could not be copied.
21294 - * On success, this will be zero.
21295 - *
21296 - * If some data could not be copied, this function will pad the copied
21297 - * data to the requested size using zero bytes.
21298 - */
21299 -unsigned long
21300 -_copy_from_user(void *to, const void __user *from, unsigned long n)
21301 -{
21302 - if (access_ok(VERIFY_READ, from, n))
21303 - n = __copy_from_user(to, from, n);
21304 - else
21305 - memset(to, 0, n);
21306 - return n;
21307 -}
21308 -EXPORT_SYMBOL(_copy_from_user);
21309 -
21310 void copy_from_user_overflow(void)
21311 {
21312 WARN(1, "Buffer overflow detected!\n");
21313 }
21314 EXPORT_SYMBOL(copy_from_user_overflow);
21315 +
21316 +void copy_to_user_overflow(void)
21317 +{
21318 + WARN(1, "Buffer overflow detected!\n");
21319 +}
21320 +EXPORT_SYMBOL(copy_to_user_overflow);
21321 +
21322 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21323 +void __set_fs(mm_segment_t x)
21324 +{
21325 + switch (x.seg) {
21326 + case 0:
21327 + loadsegment(gs, 0);
21328 + break;
21329 + case TASK_SIZE_MAX:
21330 + loadsegment(gs, __USER_DS);
21331 + break;
21332 + case -1UL:
21333 + loadsegment(gs, __KERNEL_DS);
21334 + break;
21335 + default:
21336 + BUG();
21337 + }
21338 + return;
21339 +}
21340 +EXPORT_SYMBOL(__set_fs);
21341 +
21342 +void set_fs(mm_segment_t x)
21343 +{
21344 + current_thread_info()->addr_limit = x;
21345 + __set_fs(x);
21346 +}
21347 +EXPORT_SYMBOL(set_fs);
21348 +#endif
21349 diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
21350 index b7c2849..8633ad8 100644
21351 --- a/arch/x86/lib/usercopy_64.c
21352 +++ b/arch/x86/lib/usercopy_64.c
21353 @@ -42,6 +42,12 @@ long
21354 __strncpy_from_user(char *dst, const char __user *src, long count)
21355 {
21356 long res;
21357 +
21358 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21359 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
21360 + src += PAX_USER_SHADOW_BASE;
21361 +#endif
21362 +
21363 __do_strncpy_from_user(dst, src, count, res);
21364 return res;
21365 }
21366 @@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
21367 {
21368 long __d0;
21369 might_fault();
21370 +
21371 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21372 + if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
21373 + addr += PAX_USER_SHADOW_BASE;
21374 +#endif
21375 +
21376 /* no memory constraint because it doesn't change any memory gcc knows
21377 about */
21378 asm volatile(
21379 @@ -149,12 +161,20 @@ long strlen_user(const char __user *s)
21380 }
21381 EXPORT_SYMBOL(strlen_user);
21382
21383 -unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
21384 +unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
21385 {
21386 - if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
21387 - return copy_user_generic((__force void *)to, (__force void *)from, len);
21388 - }
21389 - return len;
21390 + if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
21391 +
21392 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21393 + if ((unsigned long)to < PAX_USER_SHADOW_BASE)
21394 + to += PAX_USER_SHADOW_BASE;
21395 + if ((unsigned long)from < PAX_USER_SHADOW_BASE)
21396 + from += PAX_USER_SHADOW_BASE;
21397 +#endif
21398 +
21399 + return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
21400 + }
21401 + return len;
21402 }
21403 EXPORT_SYMBOL(copy_in_user);
21404
21405 @@ -164,7 +184,7 @@ EXPORT_SYMBOL(copy_in_user);
21406 * it is not necessary to optimize tail handling.
21407 */
21408 unsigned long
21409 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
21410 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
21411 {
21412 char c;
21413 unsigned zero_len;
21414 diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
21415 index d0474ad..36e9257 100644
21416 --- a/arch/x86/mm/extable.c
21417 +++ b/arch/x86/mm/extable.c
21418 @@ -8,7 +8,7 @@ int fixup_exception(struct pt_regs *regs)
21419 const struct exception_table_entry *fixup;
21420
21421 #ifdef CONFIG_PNPBIOS
21422 - if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
21423 + if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
21424 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
21425 extern u32 pnp_bios_is_utter_crap;
21426 pnp_bios_is_utter_crap = 1;
21427 diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
21428 index 5db0490..13bd09c 100644
21429 --- a/arch/x86/mm/fault.c
21430 +++ b/arch/x86/mm/fault.c
21431 @@ -13,11 +13,18 @@
21432 #include <linux/perf_event.h> /* perf_sw_event */
21433 #include <linux/hugetlb.h> /* hstate_index_to_shift */
21434 #include <linux/prefetch.h> /* prefetchw */
21435 +#include <linux/unistd.h>
21436 +#include <linux/compiler.h>
21437
21438 #include <asm/traps.h> /* dotraplinkage, ... */
21439 #include <asm/pgalloc.h> /* pgd_*(), ... */
21440 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
21441 #include <asm/fixmap.h> /* VSYSCALL_START */
21442 +#include <asm/tlbflush.h>
21443 +
21444 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21445 +#include <asm/stacktrace.h>
21446 +#endif
21447
21448 /*
21449 * Page fault error code bits:
21450 @@ -55,7 +62,7 @@ static inline int __kprobes notify_page_fault(struct pt_regs *regs)
21451 int ret = 0;
21452
21453 /* kprobe_running() needs smp_processor_id() */
21454 - if (kprobes_built_in() && !user_mode_vm(regs)) {
21455 + if (kprobes_built_in() && !user_mode(regs)) {
21456 preempt_disable();
21457 if (kprobe_running() && kprobe_fault_handler(regs, 14))
21458 ret = 1;
21459 @@ -116,7 +123,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
21460 return !instr_lo || (instr_lo>>1) == 1;
21461 case 0x00:
21462 /* Prefetch instruction is 0x0F0D or 0x0F18 */
21463 - if (probe_kernel_address(instr, opcode))
21464 + if (user_mode(regs)) {
21465 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
21466 + return 0;
21467 + } else if (probe_kernel_address(instr, opcode))
21468 return 0;
21469
21470 *prefetch = (instr_lo == 0xF) &&
21471 @@ -150,7 +160,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
21472 while (instr < max_instr) {
21473 unsigned char opcode;
21474
21475 - if (probe_kernel_address(instr, opcode))
21476 + if (user_mode(regs)) {
21477 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
21478 + break;
21479 + } else if (probe_kernel_address(instr, opcode))
21480 break;
21481
21482 instr++;
21483 @@ -181,6 +194,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
21484 force_sig_info(si_signo, &info, tsk);
21485 }
21486
21487 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21488 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
21489 +#endif
21490 +
21491 +#ifdef CONFIG_PAX_EMUTRAMP
21492 +static int pax_handle_fetch_fault(struct pt_regs *regs);
21493 +#endif
21494 +
21495 +#ifdef CONFIG_PAX_PAGEEXEC
21496 +static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
21497 +{
21498 + pgd_t *pgd;
21499 + pud_t *pud;
21500 + pmd_t *pmd;
21501 +
21502 + pgd = pgd_offset(mm, address);
21503 + if (!pgd_present(*pgd))
21504 + return NULL;
21505 + pud = pud_offset(pgd, address);
21506 + if (!pud_present(*pud))
21507 + return NULL;
21508 + pmd = pmd_offset(pud, address);
21509 + if (!pmd_present(*pmd))
21510 + return NULL;
21511 + return pmd;
21512 +}
21513 +#endif
21514 +
21515 DEFINE_SPINLOCK(pgd_lock);
21516 LIST_HEAD(pgd_list);
21517
21518 @@ -231,10 +272,22 @@ void vmalloc_sync_all(void)
21519 for (address = VMALLOC_START & PMD_MASK;
21520 address >= TASK_SIZE && address < FIXADDR_TOP;
21521 address += PMD_SIZE) {
21522 +
21523 +#ifdef CONFIG_PAX_PER_CPU_PGD
21524 + unsigned long cpu;
21525 +#else
21526 struct page *page;
21527 +#endif
21528
21529 spin_lock(&pgd_lock);
21530 +
21531 +#ifdef CONFIG_PAX_PER_CPU_PGD
21532 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
21533 + pgd_t *pgd = get_cpu_pgd(cpu);
21534 + pmd_t *ret;
21535 +#else
21536 list_for_each_entry(page, &pgd_list, lru) {
21537 + pgd_t *pgd = page_address(page);
21538 spinlock_t *pgt_lock;
21539 pmd_t *ret;
21540
21541 @@ -242,8 +295,13 @@ void vmalloc_sync_all(void)
21542 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
21543
21544 spin_lock(pgt_lock);
21545 - ret = vmalloc_sync_one(page_address(page), address);
21546 +#endif
21547 +
21548 + ret = vmalloc_sync_one(pgd, address);
21549 +
21550 +#ifndef CONFIG_PAX_PER_CPU_PGD
21551 spin_unlock(pgt_lock);
21552 +#endif
21553
21554 if (!ret)
21555 break;
21556 @@ -277,6 +335,11 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
21557 * an interrupt in the middle of a task switch..
21558 */
21559 pgd_paddr = read_cr3();
21560 +
21561 +#ifdef CONFIG_PAX_PER_CPU_PGD
21562 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
21563 +#endif
21564 +
21565 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
21566 if (!pmd_k)
21567 return -1;
21568 @@ -372,7 +435,14 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
21569 * happen within a race in page table update. In the later
21570 * case just flush:
21571 */
21572 +
21573 +#ifdef CONFIG_PAX_PER_CPU_PGD
21574 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
21575 + pgd = pgd_offset_cpu(smp_processor_id(), address);
21576 +#else
21577 pgd = pgd_offset(current->active_mm, address);
21578 +#endif
21579 +
21580 pgd_ref = pgd_offset_k(address);
21581 if (pgd_none(*pgd_ref))
21582 return -1;
21583 @@ -540,7 +610,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
21584 static int is_errata100(struct pt_regs *regs, unsigned long address)
21585 {
21586 #ifdef CONFIG_X86_64
21587 - if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
21588 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
21589 return 1;
21590 #endif
21591 return 0;
21592 @@ -567,7 +637,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
21593 }
21594
21595 static const char nx_warning[] = KERN_CRIT
21596 -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
21597 +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
21598
21599 static void
21600 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
21601 @@ -576,15 +646,26 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
21602 if (!oops_may_print())
21603 return;
21604
21605 - if (error_code & PF_INSTR) {
21606 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
21607 unsigned int level;
21608
21609 pte_t *pte = lookup_address(address, &level);
21610
21611 if (pte && pte_present(*pte) && !pte_exec(*pte))
21612 - printk(nx_warning, current_uid());
21613 + printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
21614 }
21615
21616 +#ifdef CONFIG_PAX_KERNEXEC
21617 + if (init_mm.start_code <= address && address < init_mm.end_code) {
21618 + if (current->signal->curr_ip)
21619 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
21620 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
21621 + else
21622 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
21623 + current->comm, task_pid_nr(current), current_uid(), current_euid());
21624 + }
21625 +#endif
21626 +
21627 printk(KERN_ALERT "BUG: unable to handle kernel ");
21628 if (address < PAGE_SIZE)
21629 printk(KERN_CONT "NULL pointer dereference");
21630 @@ -739,6 +820,21 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
21631 }
21632 #endif
21633
21634 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21635 + if (pax_is_fetch_fault(regs, error_code, address)) {
21636 +
21637 +#ifdef CONFIG_PAX_EMUTRAMP
21638 + switch (pax_handle_fetch_fault(regs)) {
21639 + case 2:
21640 + return;
21641 + }
21642 +#endif
21643 +
21644 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
21645 + do_group_exit(SIGKILL);
21646 + }
21647 +#endif
21648 +
21649 if (unlikely(show_unhandled_signals))
21650 show_signal_msg(regs, error_code, address, tsk);
21651
21652 @@ -835,7 +931,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
21653 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
21654 printk(KERN_ERR
21655 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
21656 - tsk->comm, tsk->pid, address);
21657 + tsk->comm, task_pid_nr(tsk), address);
21658 code = BUS_MCEERR_AR;
21659 }
21660 #endif
21661 @@ -890,6 +986,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
21662 return 1;
21663 }
21664
21665 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
21666 +static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
21667 +{
21668 + pte_t *pte;
21669 + pmd_t *pmd;
21670 + spinlock_t *ptl;
21671 + unsigned char pte_mask;
21672 +
21673 + if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
21674 + !(mm->pax_flags & MF_PAX_PAGEEXEC))
21675 + return 0;
21676 +
21677 + /* PaX: it's our fault, let's handle it if we can */
21678 +
21679 + /* PaX: take a look at read faults before acquiring any locks */
21680 + if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
21681 + /* instruction fetch attempt from a protected page in user mode */
21682 + up_read(&mm->mmap_sem);
21683 +
21684 +#ifdef CONFIG_PAX_EMUTRAMP
21685 + switch (pax_handle_fetch_fault(regs)) {
21686 + case 2:
21687 + return 1;
21688 + }
21689 +#endif
21690 +
21691 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
21692 + do_group_exit(SIGKILL);
21693 + }
21694 +
21695 + pmd = pax_get_pmd(mm, address);
21696 + if (unlikely(!pmd))
21697 + return 0;
21698 +
21699 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
21700 + if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
21701 + pte_unmap_unlock(pte, ptl);
21702 + return 0;
21703 + }
21704 +
21705 + if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
21706 + /* write attempt to a protected page in user mode */
21707 + pte_unmap_unlock(pte, ptl);
21708 + return 0;
21709 + }
21710 +
21711 +#ifdef CONFIG_SMP
21712 + if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
21713 +#else
21714 + if (likely(address > get_limit(regs->cs)))
21715 +#endif
21716 + {
21717 + set_pte(pte, pte_mkread(*pte));
21718 + __flush_tlb_one(address);
21719 + pte_unmap_unlock(pte, ptl);
21720 + up_read(&mm->mmap_sem);
21721 + return 1;
21722 + }
21723 +
21724 + pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
21725 +
21726 + /*
21727 + * PaX: fill DTLB with user rights and retry
21728 + */
21729 + __asm__ __volatile__ (
21730 + "orb %2,(%1)\n"
21731 +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
21732 +/*
21733 + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
21734 + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
21735 + * page fault when examined during a TLB load attempt. this is true not only
21736 + * for PTEs holding a non-present entry but also present entries that will
21737 + * raise a page fault (such as those set up by PaX, or the copy-on-write
21738 + * mechanism). in effect it means that we do *not* need to flush the TLBs
21739 + * for our target pages since their PTEs are simply not in the TLBs at all.
21740 +
21741 + * the best thing in omitting it is that we gain around 15-20% speed in the
21742 + * fast path of the page fault handler and can get rid of tracing since we
21743 + * can no longer flush unintended entries.
21744 + */
21745 + "invlpg (%0)\n"
21746 +#endif
21747 + __copyuser_seg"testb $0,(%0)\n"
21748 + "xorb %3,(%1)\n"
21749 + :
21750 + : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
21751 + : "memory", "cc");
21752 + pte_unmap_unlock(pte, ptl);
21753 + up_read(&mm->mmap_sem);
21754 + return 1;
21755 +}
21756 +#endif
21757 +
21758 /*
21759 * Handle a spurious fault caused by a stale TLB entry.
21760 *
21761 @@ -962,6 +1151,9 @@ int show_unhandled_signals = 1;
21762 static inline int
21763 access_error(unsigned long error_code, struct vm_area_struct *vma)
21764 {
21765 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
21766 + return 1;
21767 +
21768 if (error_code & PF_WRITE) {
21769 /* write, present and write, not present: */
21770 if (unlikely(!(vma->vm_flags & VM_WRITE)))
21771 @@ -995,18 +1187,32 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
21772 {
21773 struct vm_area_struct *vma;
21774 struct task_struct *tsk;
21775 - unsigned long address;
21776 struct mm_struct *mm;
21777 int fault;
21778 int write = error_code & PF_WRITE;
21779 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
21780 (write ? FAULT_FLAG_WRITE : 0);
21781
21782 - tsk = current;
21783 - mm = tsk->mm;
21784 -
21785 /* Get the faulting address: */
21786 - address = read_cr2();
21787 + unsigned long address = read_cr2();
21788 +
21789 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21790 + if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
21791 + if (!search_exception_tables(regs->ip)) {
21792 + bad_area_nosemaphore(regs, error_code, address);
21793 + return;
21794 + }
21795 + if (address < PAX_USER_SHADOW_BASE) {
21796 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
21797 + printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
21798 + show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
21799 + } else
21800 + address -= PAX_USER_SHADOW_BASE;
21801 + }
21802 +#endif
21803 +
21804 + tsk = current;
21805 + mm = tsk->mm;
21806
21807 /*
21808 * Detect and handle instructions that would cause a page fault for
21809 @@ -1067,7 +1273,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
21810 * User-mode registers count as a user access even for any
21811 * potential system fault or CPU buglet:
21812 */
21813 - if (user_mode_vm(regs)) {
21814 + if (user_mode(regs)) {
21815 local_irq_enable();
21816 error_code |= PF_USER;
21817 } else {
21818 @@ -1122,6 +1328,11 @@ retry:
21819 might_sleep();
21820 }
21821
21822 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
21823 + if (pax_handle_pageexec_fault(regs, mm, address, error_code))
21824 + return;
21825 +#endif
21826 +
21827 vma = find_vma(mm, address);
21828 if (unlikely(!vma)) {
21829 bad_area(regs, error_code, address);
21830 @@ -1133,18 +1344,24 @@ retry:
21831 bad_area(regs, error_code, address);
21832 return;
21833 }
21834 - if (error_code & PF_USER) {
21835 - /*
21836 - * Accessing the stack below %sp is always a bug.
21837 - * The large cushion allows instructions like enter
21838 - * and pusha to work. ("enter $65535, $31" pushes
21839 - * 32 pointers and then decrements %sp by 65535.)
21840 - */
21841 - if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
21842 - bad_area(regs, error_code, address);
21843 - return;
21844 - }
21845 + /*
21846 + * Accessing the stack below %sp is always a bug.
21847 + * The large cushion allows instructions like enter
21848 + * and pusha to work. ("enter $65535, $31" pushes
21849 + * 32 pointers and then decrements %sp by 65535.)
21850 + */
21851 + if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
21852 + bad_area(regs, error_code, address);
21853 + return;
21854 }
21855 +
21856 +#ifdef CONFIG_PAX_SEGMEXEC
21857 + if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
21858 + bad_area(regs, error_code, address);
21859 + return;
21860 + }
21861 +#endif
21862 +
21863 if (unlikely(expand_stack(vma, address))) {
21864 bad_area(regs, error_code, address);
21865 return;
21866 @@ -1199,3 +1416,292 @@ good_area:
21867
21868 up_read(&mm->mmap_sem);
21869 }
21870 +
21871 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21872 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
21873 +{
21874 + struct mm_struct *mm = current->mm;
21875 + unsigned long ip = regs->ip;
21876 +
21877 + if (v8086_mode(regs))
21878 + ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
21879 +
21880 +#ifdef CONFIG_PAX_PAGEEXEC
21881 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
21882 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
21883 + return true;
21884 + if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
21885 + return true;
21886 + return false;
21887 + }
21888 +#endif
21889 +
21890 +#ifdef CONFIG_PAX_SEGMEXEC
21891 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
21892 + if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
21893 + return true;
21894 + return false;
21895 + }
21896 +#endif
21897 +
21898 + return false;
21899 +}
21900 +#endif
21901 +
21902 +#ifdef CONFIG_PAX_EMUTRAMP
21903 +static int pax_handle_fetch_fault_32(struct pt_regs *regs)
21904 +{
21905 + int err;
21906 +
21907 + do { /* PaX: libffi trampoline emulation */
21908 + unsigned char mov, jmp;
21909 + unsigned int addr1, addr2;
21910 +
21911 +#ifdef CONFIG_X86_64
21912 + if ((regs->ip + 9) >> 32)
21913 + break;
21914 +#endif
21915 +
21916 + err = get_user(mov, (unsigned char __user *)regs->ip);
21917 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
21918 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
21919 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
21920 +
21921 + if (err)
21922 + break;
21923 +
21924 + if (mov == 0xB8 && jmp == 0xE9) {
21925 + regs->ax = addr1;
21926 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
21927 + return 2;
21928 + }
21929 + } while (0);
21930 +
21931 + do { /* PaX: gcc trampoline emulation #1 */
21932 + unsigned char mov1, mov2;
21933 + unsigned short jmp;
21934 + unsigned int addr1, addr2;
21935 +
21936 +#ifdef CONFIG_X86_64
21937 + if ((regs->ip + 11) >> 32)
21938 + break;
21939 +#endif
21940 +
21941 + err = get_user(mov1, (unsigned char __user *)regs->ip);
21942 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
21943 + err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
21944 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
21945 + err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
21946 +
21947 + if (err)
21948 + break;
21949 +
21950 + if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
21951 + regs->cx = addr1;
21952 + regs->ax = addr2;
21953 + regs->ip = addr2;
21954 + return 2;
21955 + }
21956 + } while (0);
21957 +
21958 + do { /* PaX: gcc trampoline emulation #2 */
21959 + unsigned char mov, jmp;
21960 + unsigned int addr1, addr2;
21961 +
21962 +#ifdef CONFIG_X86_64
21963 + if ((regs->ip + 9) >> 32)
21964 + break;
21965 +#endif
21966 +
21967 + err = get_user(mov, (unsigned char __user *)regs->ip);
21968 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
21969 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
21970 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
21971 +
21972 + if (err)
21973 + break;
21974 +
21975 + if (mov == 0xB9 && jmp == 0xE9) {
21976 + regs->cx = addr1;
21977 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
21978 + return 2;
21979 + }
21980 + } while (0);
21981 +
21982 + return 1; /* PaX in action */
21983 +}
21984 +
21985 +#ifdef CONFIG_X86_64
21986 +static int pax_handle_fetch_fault_64(struct pt_regs *regs)
21987 +{
21988 + int err;
21989 +
21990 + do { /* PaX: libffi trampoline emulation */
21991 + unsigned short mov1, mov2, jmp1;
21992 + unsigned char stcclc, jmp2;
21993 + unsigned long addr1, addr2;
21994 +
21995 + err = get_user(mov1, (unsigned short __user *)regs->ip);
21996 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
21997 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
21998 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
21999 + err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
22000 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
22001 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
22002 +
22003 + if (err)
22004 + break;
22005 +
22006 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
22007 + regs->r11 = addr1;
22008 + regs->r10 = addr2;
22009 + if (stcclc == 0xF8)
22010 + regs->flags &= ~X86_EFLAGS_CF;
22011 + else
22012 + regs->flags |= X86_EFLAGS_CF;
22013 + regs->ip = addr1;
22014 + return 2;
22015 + }
22016 + } while (0);
22017 +
22018 + do { /* PaX: gcc trampoline emulation #1 */
22019 + unsigned short mov1, mov2, jmp1;
22020 + unsigned char jmp2;
22021 + unsigned int addr1;
22022 + unsigned long addr2;
22023 +
22024 + err = get_user(mov1, (unsigned short __user *)regs->ip);
22025 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
22026 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
22027 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
22028 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
22029 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
22030 +
22031 + if (err)
22032 + break;
22033 +
22034 + if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
22035 + regs->r11 = addr1;
22036 + regs->r10 = addr2;
22037 + regs->ip = addr1;
22038 + return 2;
22039 + }
22040 + } while (0);
22041 +
22042 + do { /* PaX: gcc trampoline emulation #2 */
22043 + unsigned short mov1, mov2, jmp1;
22044 + unsigned char jmp2;
22045 + unsigned long addr1, addr2;
22046 +
22047 + err = get_user(mov1, (unsigned short __user *)regs->ip);
22048 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
22049 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
22050 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
22051 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
22052 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
22053 +
22054 + if (err)
22055 + break;
22056 +
22057 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
22058 + regs->r11 = addr1;
22059 + regs->r10 = addr2;
22060 + regs->ip = addr1;
22061 + return 2;
22062 + }
22063 + } while (0);
22064 +
22065 + return 1; /* PaX in action */
22066 +}
22067 +#endif
22068 +
22069 +/*
22070 + * PaX: decide what to do with offenders (regs->ip = fault address)
22071 + *
22072 + * returns 1 when task should be killed
22073 + * 2 when gcc trampoline was detected
22074 + */
22075 +static int pax_handle_fetch_fault(struct pt_regs *regs)
22076 +{
22077 + if (v8086_mode(regs))
22078 + return 1;
22079 +
22080 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
22081 + return 1;
22082 +
22083 +#ifdef CONFIG_X86_32
22084 + return pax_handle_fetch_fault_32(regs);
22085 +#else
22086 + if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
22087 + return pax_handle_fetch_fault_32(regs);
22088 + else
22089 + return pax_handle_fetch_fault_64(regs);
22090 +#endif
22091 +}
22092 +#endif
22093 +
22094 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
22095 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
22096 +{
22097 + long i;
22098 +
22099 + printk(KERN_ERR "PAX: bytes at PC: ");
22100 + for (i = 0; i < 20; i++) {
22101 + unsigned char c;
22102 + if (get_user(c, (unsigned char __force_user *)pc+i))
22103 + printk(KERN_CONT "?? ");
22104 + else
22105 + printk(KERN_CONT "%02x ", c);
22106 + }
22107 + printk("\n");
22108 +
22109 + printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
22110 + for (i = -1; i < 80 / (long)sizeof(long); i++) {
22111 + unsigned long c;
22112 + if (get_user(c, (unsigned long __force_user *)sp+i)) {
22113 +#ifdef CONFIG_X86_32
22114 + printk(KERN_CONT "???????? ");
22115 +#else
22116 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
22117 + printk(KERN_CONT "???????? ???????? ");
22118 + else
22119 + printk(KERN_CONT "???????????????? ");
22120 +#endif
22121 + } else {
22122 +#ifdef CONFIG_X86_64
22123 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
22124 + printk(KERN_CONT "%08x ", (unsigned int)c);
22125 + printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
22126 + } else
22127 +#endif
22128 + printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
22129 + }
22130 + }
22131 + printk("\n");
22132 +}
22133 +#endif
22134 +
22135 +/**
22136 + * probe_kernel_write(): safely attempt to write to a location
22137 + * @dst: address to write to
22138 + * @src: pointer to the data that shall be written
22139 + * @size: size of the data chunk
22140 + *
22141 + * Safely write to address @dst from the buffer at @src. If a kernel fault
22142 + * happens, handle that and return -EFAULT.
22143 + */
22144 +long notrace probe_kernel_write(void *dst, const void *src, size_t size)
22145 +{
22146 + long ret;
22147 + mm_segment_t old_fs = get_fs();
22148 +
22149 + set_fs(KERNEL_DS);
22150 + pagefault_disable();
22151 + pax_open_kernel();
22152 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
22153 + pax_close_kernel();
22154 + pagefault_enable();
22155 + set_fs(old_fs);
22156 +
22157 + return ret ? -EFAULT : 0;
22158 +}
22159 diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
22160 index dd74e46..7d26398 100644
22161 --- a/arch/x86/mm/gup.c
22162 +++ b/arch/x86/mm/gup.c
22163 @@ -255,7 +255,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
22164 addr = start;
22165 len = (unsigned long) nr_pages << PAGE_SHIFT;
22166 end = start + len;
22167 - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
22168 + if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
22169 (void __user *)start, len)))
22170 return 0;
22171
22172 diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
22173 index f4f29b1..5cac4fb 100644
22174 --- a/arch/x86/mm/highmem_32.c
22175 +++ b/arch/x86/mm/highmem_32.c
22176 @@ -44,7 +44,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
22177 idx = type + KM_TYPE_NR*smp_processor_id();
22178 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
22179 BUG_ON(!pte_none(*(kmap_pte-idx)));
22180 +
22181 + pax_open_kernel();
22182 set_pte(kmap_pte-idx, mk_pte(page, prot));
22183 + pax_close_kernel();
22184 +
22185 arch_flush_lazy_mmu_mode();
22186
22187 return (void *)vaddr;
22188 diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
22189 index f581a18..29efd37 100644
22190 --- a/arch/x86/mm/hugetlbpage.c
22191 +++ b/arch/x86/mm/hugetlbpage.c
22192 @@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
22193 struct hstate *h = hstate_file(file);
22194 struct mm_struct *mm = current->mm;
22195 struct vm_area_struct *vma;
22196 - unsigned long start_addr;
22197 + unsigned long start_addr, pax_task_size = TASK_SIZE;
22198 +
22199 +#ifdef CONFIG_PAX_SEGMEXEC
22200 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
22201 + pax_task_size = SEGMEXEC_TASK_SIZE;
22202 +#endif
22203 +
22204 + pax_task_size -= PAGE_SIZE;
22205
22206 if (len > mm->cached_hole_size) {
22207 - start_addr = mm->free_area_cache;
22208 + start_addr = mm->free_area_cache;
22209 } else {
22210 - start_addr = TASK_UNMAPPED_BASE;
22211 - mm->cached_hole_size = 0;
22212 + start_addr = mm->mmap_base;
22213 + mm->cached_hole_size = 0;
22214 }
22215
22216 full_search:
22217 @@ -280,26 +287,27 @@ full_search:
22218
22219 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
22220 /* At this point: (!vma || addr < vma->vm_end). */
22221 - if (TASK_SIZE - len < addr) {
22222 + if (pax_task_size - len < addr) {
22223 /*
22224 * Start a new search - just in case we missed
22225 * some holes.
22226 */
22227 - if (start_addr != TASK_UNMAPPED_BASE) {
22228 - start_addr = TASK_UNMAPPED_BASE;
22229 + if (start_addr != mm->mmap_base) {
22230 + start_addr = mm->mmap_base;
22231 mm->cached_hole_size = 0;
22232 goto full_search;
22233 }
22234 return -ENOMEM;
22235 }
22236 - if (!vma || addr + len <= vma->vm_start) {
22237 - mm->free_area_cache = addr + len;
22238 - return addr;
22239 - }
22240 + if (check_heap_stack_gap(vma, addr, len))
22241 + break;
22242 if (addr + mm->cached_hole_size < vma->vm_start)
22243 mm->cached_hole_size = vma->vm_start - addr;
22244 addr = ALIGN(vma->vm_end, huge_page_size(h));
22245 }
22246 +
22247 + mm->free_area_cache = addr + len;
22248 + return addr;
22249 }
22250
22251 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
22252 @@ -308,10 +316,9 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
22253 {
22254 struct hstate *h = hstate_file(file);
22255 struct mm_struct *mm = current->mm;
22256 - struct vm_area_struct *vma, *prev_vma;
22257 - unsigned long base = mm->mmap_base, addr = addr0;
22258 + struct vm_area_struct *vma;
22259 + unsigned long base = mm->mmap_base, addr;
22260 unsigned long largest_hole = mm->cached_hole_size;
22261 - int first_time = 1;
22262
22263 /* don't allow allocations above current base */
22264 if (mm->free_area_cache > base)
22265 @@ -321,64 +328,63 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
22266 largest_hole = 0;
22267 mm->free_area_cache = base;
22268 }
22269 -try_again:
22270 +
22271 /* make sure it can fit in the remaining address space */
22272 if (mm->free_area_cache < len)
22273 goto fail;
22274
22275 /* either no address requested or can't fit in requested address hole */
22276 - addr = (mm->free_area_cache - len) & huge_page_mask(h);
22277 + addr = (mm->free_area_cache - len);
22278 do {
22279 + addr &= huge_page_mask(h);
22280 + vma = find_vma(mm, addr);
22281 /*
22282 * Lookup failure means no vma is above this address,
22283 * i.e. return with success:
22284 - */
22285 - if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
22286 - return addr;
22287 -
22288 - /*
22289 * new region fits between prev_vma->vm_end and
22290 * vma->vm_start, use it:
22291 */
22292 - if (addr + len <= vma->vm_start &&
22293 - (!prev_vma || (addr >= prev_vma->vm_end))) {
22294 + if (check_heap_stack_gap(vma, addr, len)) {
22295 /* remember the address as a hint for next time */
22296 - mm->cached_hole_size = largest_hole;
22297 - return (mm->free_area_cache = addr);
22298 - } else {
22299 - /* pull free_area_cache down to the first hole */
22300 - if (mm->free_area_cache == vma->vm_end) {
22301 - mm->free_area_cache = vma->vm_start;
22302 - mm->cached_hole_size = largest_hole;
22303 - }
22304 + mm->cached_hole_size = largest_hole;
22305 + return (mm->free_area_cache = addr);
22306 + }
22307 + /* pull free_area_cache down to the first hole */
22308 + if (mm->free_area_cache == vma->vm_end) {
22309 + mm->free_area_cache = vma->vm_start;
22310 + mm->cached_hole_size = largest_hole;
22311 }
22312
22313 /* remember the largest hole we saw so far */
22314 if (addr + largest_hole < vma->vm_start)
22315 - largest_hole = vma->vm_start - addr;
22316 + largest_hole = vma->vm_start - addr;
22317
22318 /* try just below the current vma->vm_start */
22319 - addr = (vma->vm_start - len) & huge_page_mask(h);
22320 - } while (len <= vma->vm_start);
22321 + addr = skip_heap_stack_gap(vma, len);
22322 + } while (!IS_ERR_VALUE(addr));
22323
22324 fail:
22325 /*
22326 - * if hint left us with no space for the requested
22327 - * mapping then try again:
22328 - */
22329 - if (first_time) {
22330 - mm->free_area_cache = base;
22331 - largest_hole = 0;
22332 - first_time = 0;
22333 - goto try_again;
22334 - }
22335 - /*
22336 * A failed mmap() very likely causes application failure,
22337 * so fall back to the bottom-up function here. This scenario
22338 * can happen with large stack limits and large mmap()
22339 * allocations.
22340 */
22341 - mm->free_area_cache = TASK_UNMAPPED_BASE;
22342 +
22343 +#ifdef CONFIG_PAX_SEGMEXEC
22344 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
22345 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
22346 + else
22347 +#endif
22348 +
22349 + mm->mmap_base = TASK_UNMAPPED_BASE;
22350 +
22351 +#ifdef CONFIG_PAX_RANDMMAP
22352 + if (mm->pax_flags & MF_PAX_RANDMMAP)
22353 + mm->mmap_base += mm->delta_mmap;
22354 +#endif
22355 +
22356 + mm->free_area_cache = mm->mmap_base;
22357 mm->cached_hole_size = ~0UL;
22358 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
22359 len, pgoff, flags);
22360 @@ -386,6 +392,7 @@ fail:
22361 /*
22362 * Restore the topdown base:
22363 */
22364 + mm->mmap_base = base;
22365 mm->free_area_cache = base;
22366 mm->cached_hole_size = ~0UL;
22367
22368 @@ -399,10 +406,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
22369 struct hstate *h = hstate_file(file);
22370 struct mm_struct *mm = current->mm;
22371 struct vm_area_struct *vma;
22372 + unsigned long pax_task_size = TASK_SIZE;
22373
22374 if (len & ~huge_page_mask(h))
22375 return -EINVAL;
22376 - if (len > TASK_SIZE)
22377 +
22378 +#ifdef CONFIG_PAX_SEGMEXEC
22379 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
22380 + pax_task_size = SEGMEXEC_TASK_SIZE;
22381 +#endif
22382 +
22383 + pax_task_size -= PAGE_SIZE;
22384 +
22385 + if (len > pax_task_size)
22386 return -ENOMEM;
22387
22388 if (flags & MAP_FIXED) {
22389 @@ -414,8 +430,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
22390 if (addr) {
22391 addr = ALIGN(addr, huge_page_size(h));
22392 vma = find_vma(mm, addr);
22393 - if (TASK_SIZE - len >= addr &&
22394 - (!vma || addr + len <= vma->vm_start))
22395 + if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
22396 return addr;
22397 }
22398 if (mm->get_unmapped_area == arch_get_unmapped_area)
22399 diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
22400 index 87488b9..a55509f 100644
22401 --- a/arch/x86/mm/init.c
22402 +++ b/arch/x86/mm/init.c
22403 @@ -15,6 +15,7 @@
22404 #include <asm/tlbflush.h>
22405 #include <asm/tlb.h>
22406 #include <asm/proto.h>
22407 +#include <asm/desc.h>
22408
22409 unsigned long __initdata pgt_buf_start;
22410 unsigned long __meminitdata pgt_buf_end;
22411 @@ -31,7 +32,7 @@ int direct_gbpages
22412 static void __init find_early_table_space(unsigned long end, int use_pse,
22413 int use_gbpages)
22414 {
22415 - unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
22416 + unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
22417 phys_addr_t base;
22418
22419 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
22420 @@ -312,8 +313,29 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
22421 */
22422 int devmem_is_allowed(unsigned long pagenr)
22423 {
22424 +#ifdef CONFIG_GRKERNSEC_KMEM
22425 + /* allow BDA */
22426 + if (!pagenr)
22427 + return 1;
22428 + /* allow EBDA */
22429 + if ((0x9f000 >> PAGE_SHIFT) == pagenr)
22430 + return 1;
22431 +#else
22432 + if (!pagenr)
22433 + return 1;
22434 +#ifdef CONFIG_VM86
22435 + if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
22436 + return 1;
22437 +#endif
22438 +#endif
22439 +
22440 + if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
22441 + return 1;
22442 +#ifdef CONFIG_GRKERNSEC_KMEM
22443 + /* throw out everything else below 1MB */
22444 if (pagenr <= 256)
22445 - return 1;
22446 + return 0;
22447 +#endif
22448 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
22449 return 0;
22450 if (!page_is_ram(pagenr))
22451 @@ -372,6 +394,86 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
22452
22453 void free_initmem(void)
22454 {
22455 +
22456 +#ifdef CONFIG_PAX_KERNEXEC
22457 +#ifdef CONFIG_X86_32
22458 + /* PaX: limit KERNEL_CS to actual size */
22459 + unsigned long addr, limit;
22460 + struct desc_struct d;
22461 + int cpu;
22462 +
22463 + limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
22464 + limit = (limit - 1UL) >> PAGE_SHIFT;
22465 +
22466 + memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
22467 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
22468 + pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
22469 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
22470 + }
22471 +
22472 + /* PaX: make KERNEL_CS read-only */
22473 + addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
22474 + if (!paravirt_enabled())
22475 + set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
22476 +/*
22477 + for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
22478 + pgd = pgd_offset_k(addr);
22479 + pud = pud_offset(pgd, addr);
22480 + pmd = pmd_offset(pud, addr);
22481 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22482 + }
22483 +*/
22484 +#ifdef CONFIG_X86_PAE
22485 + set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
22486 +/*
22487 + for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
22488 + pgd = pgd_offset_k(addr);
22489 + pud = pud_offset(pgd, addr);
22490 + pmd = pmd_offset(pud, addr);
22491 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
22492 + }
22493 +*/
22494 +#endif
22495 +
22496 +#ifdef CONFIG_MODULES
22497 + set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
22498 +#endif
22499 +
22500 +#else
22501 + pgd_t *pgd;
22502 + pud_t *pud;
22503 + pmd_t *pmd;
22504 + unsigned long addr, end;
22505 +
22506 + /* PaX: make kernel code/rodata read-only, rest non-executable */
22507 + for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
22508 + pgd = pgd_offset_k(addr);
22509 + pud = pud_offset(pgd, addr);
22510 + pmd = pmd_offset(pud, addr);
22511 + if (!pmd_present(*pmd))
22512 + continue;
22513 + if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
22514 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22515 + else
22516 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
22517 + }
22518 +
22519 + addr = (unsigned long)__va(__pa(__START_KERNEL_map));
22520 + end = addr + KERNEL_IMAGE_SIZE;
22521 + for (; addr < end; addr += PMD_SIZE) {
22522 + pgd = pgd_offset_k(addr);
22523 + pud = pud_offset(pgd, addr);
22524 + pmd = pmd_offset(pud, addr);
22525 + if (!pmd_present(*pmd))
22526 + continue;
22527 + if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
22528 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22529 + }
22530 +#endif
22531 +
22532 + flush_tlb_all();
22533 +#endif
22534 +
22535 free_init_pages("unused kernel memory",
22536 (unsigned long)(&__init_begin),
22537 (unsigned long)(&__init_end));
22538 diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
22539 index 29f7c6d..b46b35b 100644
22540 --- a/arch/x86/mm/init_32.c
22541 +++ b/arch/x86/mm/init_32.c
22542 @@ -74,36 +74,6 @@ static __init void *alloc_low_page(void)
22543 }
22544
22545 /*
22546 - * Creates a middle page table and puts a pointer to it in the
22547 - * given global directory entry. This only returns the gd entry
22548 - * in non-PAE compilation mode, since the middle layer is folded.
22549 - */
22550 -static pmd_t * __init one_md_table_init(pgd_t *pgd)
22551 -{
22552 - pud_t *pud;
22553 - pmd_t *pmd_table;
22554 -
22555 -#ifdef CONFIG_X86_PAE
22556 - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
22557 - if (after_bootmem)
22558 - pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
22559 - else
22560 - pmd_table = (pmd_t *)alloc_low_page();
22561 - paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
22562 - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
22563 - pud = pud_offset(pgd, 0);
22564 - BUG_ON(pmd_table != pmd_offset(pud, 0));
22565 -
22566 - return pmd_table;
22567 - }
22568 -#endif
22569 - pud = pud_offset(pgd, 0);
22570 - pmd_table = pmd_offset(pud, 0);
22571 -
22572 - return pmd_table;
22573 -}
22574 -
22575 -/*
22576 * Create a page table and place a pointer to it in a middle page
22577 * directory entry:
22578 */
22579 @@ -123,13 +93,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
22580 page_table = (pte_t *)alloc_low_page();
22581
22582 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
22583 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
22584 + set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
22585 +#else
22586 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
22587 +#endif
22588 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
22589 }
22590
22591 return pte_offset_kernel(pmd, 0);
22592 }
22593
22594 +static pmd_t * __init one_md_table_init(pgd_t *pgd)
22595 +{
22596 + pud_t *pud;
22597 + pmd_t *pmd_table;
22598 +
22599 + pud = pud_offset(pgd, 0);
22600 + pmd_table = pmd_offset(pud, 0);
22601 +
22602 + return pmd_table;
22603 +}
22604 +
22605 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
22606 {
22607 int pgd_idx = pgd_index(vaddr);
22608 @@ -203,6 +188,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
22609 int pgd_idx, pmd_idx;
22610 unsigned long vaddr;
22611 pgd_t *pgd;
22612 + pud_t *pud;
22613 pmd_t *pmd;
22614 pte_t *pte = NULL;
22615
22616 @@ -212,8 +198,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
22617 pgd = pgd_base + pgd_idx;
22618
22619 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
22620 - pmd = one_md_table_init(pgd);
22621 - pmd = pmd + pmd_index(vaddr);
22622 + pud = pud_offset(pgd, vaddr);
22623 + pmd = pmd_offset(pud, vaddr);
22624 +
22625 +#ifdef CONFIG_X86_PAE
22626 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
22627 +#endif
22628 +
22629 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
22630 pmd++, pmd_idx++) {
22631 pte = page_table_kmap_check(one_page_table_init(pmd),
22632 @@ -225,11 +216,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
22633 }
22634 }
22635
22636 -static inline int is_kernel_text(unsigned long addr)
22637 +static inline int is_kernel_text(unsigned long start, unsigned long end)
22638 {
22639 - if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
22640 - return 1;
22641 - return 0;
22642 + if ((start > ktla_ktva((unsigned long)_etext) ||
22643 + end <= ktla_ktva((unsigned long)_stext)) &&
22644 + (start > ktla_ktva((unsigned long)_einittext) ||
22645 + end <= ktla_ktva((unsigned long)_sinittext)) &&
22646 +
22647 +#ifdef CONFIG_ACPI_SLEEP
22648 + (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
22649 +#endif
22650 +
22651 + (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
22652 + return 0;
22653 + return 1;
22654 }
22655
22656 /*
22657 @@ -246,9 +246,10 @@ kernel_physical_mapping_init(unsigned long start,
22658 unsigned long last_map_addr = end;
22659 unsigned long start_pfn, end_pfn;
22660 pgd_t *pgd_base = swapper_pg_dir;
22661 - int pgd_idx, pmd_idx, pte_ofs;
22662 + unsigned int pgd_idx, pmd_idx, pte_ofs;
22663 unsigned long pfn;
22664 pgd_t *pgd;
22665 + pud_t *pud;
22666 pmd_t *pmd;
22667 pte_t *pte;
22668 unsigned pages_2m, pages_4k;
22669 @@ -281,8 +282,13 @@ repeat:
22670 pfn = start_pfn;
22671 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
22672 pgd = pgd_base + pgd_idx;
22673 - for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
22674 - pmd = one_md_table_init(pgd);
22675 + for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
22676 + pud = pud_offset(pgd, 0);
22677 + pmd = pmd_offset(pud, 0);
22678 +
22679 +#ifdef CONFIG_X86_PAE
22680 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
22681 +#endif
22682
22683 if (pfn >= end_pfn)
22684 continue;
22685 @@ -294,14 +300,13 @@ repeat:
22686 #endif
22687 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
22688 pmd++, pmd_idx++) {
22689 - unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
22690 + unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
22691
22692 /*
22693 * Map with big pages if possible, otherwise
22694 * create normal page tables:
22695 */
22696 if (use_pse) {
22697 - unsigned int addr2;
22698 pgprot_t prot = PAGE_KERNEL_LARGE;
22699 /*
22700 * first pass will use the same initial
22701 @@ -311,11 +316,7 @@ repeat:
22702 __pgprot(PTE_IDENT_ATTR |
22703 _PAGE_PSE);
22704
22705 - addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
22706 - PAGE_OFFSET + PAGE_SIZE-1;
22707 -
22708 - if (is_kernel_text(addr) ||
22709 - is_kernel_text(addr2))
22710 + if (is_kernel_text(address, address + PMD_SIZE))
22711 prot = PAGE_KERNEL_LARGE_EXEC;
22712
22713 pages_2m++;
22714 @@ -332,7 +333,7 @@ repeat:
22715 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
22716 pte += pte_ofs;
22717 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
22718 - pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
22719 + pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
22720 pgprot_t prot = PAGE_KERNEL;
22721 /*
22722 * first pass will use the same initial
22723 @@ -340,7 +341,7 @@ repeat:
22724 */
22725 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
22726
22727 - if (is_kernel_text(addr))
22728 + if (is_kernel_text(address, address + PAGE_SIZE))
22729 prot = PAGE_KERNEL_EXEC;
22730
22731 pages_4k++;
22732 @@ -472,7 +473,7 @@ void __init native_pagetable_setup_start(pgd_t *base)
22733
22734 pud = pud_offset(pgd, va);
22735 pmd = pmd_offset(pud, va);
22736 - if (!pmd_present(*pmd))
22737 + if (!pmd_present(*pmd) || pmd_huge(*pmd))
22738 break;
22739
22740 pte = pte_offset_kernel(pmd, va);
22741 @@ -524,12 +525,10 @@ void __init early_ioremap_page_table_range_init(void)
22742
22743 static void __init pagetable_init(void)
22744 {
22745 - pgd_t *pgd_base = swapper_pg_dir;
22746 -
22747 - permanent_kmaps_init(pgd_base);
22748 + permanent_kmaps_init(swapper_pg_dir);
22749 }
22750
22751 -pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
22752 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
22753 EXPORT_SYMBOL_GPL(__supported_pte_mask);
22754
22755 /* user-defined highmem size */
22756 @@ -757,6 +756,12 @@ void __init mem_init(void)
22757
22758 pci_iommu_alloc();
22759
22760 +#ifdef CONFIG_PAX_PER_CPU_PGD
22761 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
22762 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
22763 + KERNEL_PGD_PTRS);
22764 +#endif
22765 +
22766 #ifdef CONFIG_FLATMEM
22767 BUG_ON(!mem_map);
22768 #endif
22769 @@ -774,7 +779,7 @@ void __init mem_init(void)
22770 set_highmem_pages_init();
22771
22772 codesize = (unsigned long) &_etext - (unsigned long) &_text;
22773 - datasize = (unsigned long) &_edata - (unsigned long) &_etext;
22774 + datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
22775 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
22776
22777 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
22778 @@ -815,10 +820,10 @@ void __init mem_init(void)
22779 ((unsigned long)&__init_end -
22780 (unsigned long)&__init_begin) >> 10,
22781
22782 - (unsigned long)&_etext, (unsigned long)&_edata,
22783 - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
22784 + (unsigned long)&_sdata, (unsigned long)&_edata,
22785 + ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
22786
22787 - (unsigned long)&_text, (unsigned long)&_etext,
22788 + ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
22789 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
22790
22791 /*
22792 @@ -896,6 +901,7 @@ void set_kernel_text_rw(void)
22793 if (!kernel_set_to_readonly)
22794 return;
22795
22796 + start = ktla_ktva(start);
22797 pr_debug("Set kernel text: %lx - %lx for read write\n",
22798 start, start+size);
22799
22800 @@ -910,6 +916,7 @@ void set_kernel_text_ro(void)
22801 if (!kernel_set_to_readonly)
22802 return;
22803
22804 + start = ktla_ktva(start);
22805 pr_debug("Set kernel text: %lx - %lx for read only\n",
22806 start, start+size);
22807
22808 @@ -938,6 +945,7 @@ void mark_rodata_ro(void)
22809 unsigned long start = PFN_ALIGN(_text);
22810 unsigned long size = PFN_ALIGN(_etext) - start;
22811
22812 + start = ktla_ktva(start);
22813 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
22814 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
22815 size >> 10);
22816 diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
22817 index bbaaa00..16dffad 100644
22818 --- a/arch/x86/mm/init_64.c
22819 +++ b/arch/x86/mm/init_64.c
22820 @@ -75,7 +75,7 @@ early_param("gbpages", parse_direct_gbpages_on);
22821 * around without checking the pgd every time.
22822 */
22823
22824 -pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
22825 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
22826 EXPORT_SYMBOL_GPL(__supported_pte_mask);
22827
22828 int force_personality32;
22829 @@ -108,12 +108,22 @@ void sync_global_pgds(unsigned long start, unsigned long end)
22830
22831 for (address = start; address <= end; address += PGDIR_SIZE) {
22832 const pgd_t *pgd_ref = pgd_offset_k(address);
22833 +
22834 +#ifdef CONFIG_PAX_PER_CPU_PGD
22835 + unsigned long cpu;
22836 +#else
22837 struct page *page;
22838 +#endif
22839
22840 if (pgd_none(*pgd_ref))
22841 continue;
22842
22843 spin_lock(&pgd_lock);
22844 +
22845 +#ifdef CONFIG_PAX_PER_CPU_PGD
22846 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
22847 + pgd_t *pgd = pgd_offset_cpu(cpu, address);
22848 +#else
22849 list_for_each_entry(page, &pgd_list, lru) {
22850 pgd_t *pgd;
22851 spinlock_t *pgt_lock;
22852 @@ -122,6 +132,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
22853 /* the pgt_lock only for Xen */
22854 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
22855 spin_lock(pgt_lock);
22856 +#endif
22857
22858 if (pgd_none(*pgd))
22859 set_pgd(pgd, *pgd_ref);
22860 @@ -129,7 +140,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
22861 BUG_ON(pgd_page_vaddr(*pgd)
22862 != pgd_page_vaddr(*pgd_ref));
22863
22864 +#ifndef CONFIG_PAX_PER_CPU_PGD
22865 spin_unlock(pgt_lock);
22866 +#endif
22867 +
22868 }
22869 spin_unlock(&pgd_lock);
22870 }
22871 @@ -203,7 +217,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
22872 pmd = fill_pmd(pud, vaddr);
22873 pte = fill_pte(pmd, vaddr);
22874
22875 + pax_open_kernel();
22876 set_pte(pte, new_pte);
22877 + pax_close_kernel();
22878
22879 /*
22880 * It's enough to flush this one mapping.
22881 @@ -262,14 +278,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
22882 pgd = pgd_offset_k((unsigned long)__va(phys));
22883 if (pgd_none(*pgd)) {
22884 pud = (pud_t *) spp_getpage();
22885 - set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
22886 - _PAGE_USER));
22887 + set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
22888 }
22889 pud = pud_offset(pgd, (unsigned long)__va(phys));
22890 if (pud_none(*pud)) {
22891 pmd = (pmd_t *) spp_getpage();
22892 - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
22893 - _PAGE_USER));
22894 + set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
22895 }
22896 pmd = pmd_offset(pud, phys);
22897 BUG_ON(!pmd_none(*pmd));
22898 @@ -330,7 +344,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
22899 if (pfn >= pgt_buf_top)
22900 panic("alloc_low_page: ran out of memory");
22901
22902 - adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
22903 + adr = (void __force_kernel *)early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
22904 clear_page(adr);
22905 *phys = pfn * PAGE_SIZE;
22906 return adr;
22907 @@ -346,7 +360,7 @@ static __ref void *map_low_page(void *virt)
22908
22909 phys = __pa(virt);
22910 left = phys & (PAGE_SIZE - 1);
22911 - adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
22912 + adr = (void __force_kernel *)early_memremap(phys & PAGE_MASK, PAGE_SIZE);
22913 adr = (void *)(((unsigned long)adr) | left);
22914
22915 return adr;
22916 @@ -693,6 +707,12 @@ void __init mem_init(void)
22917
22918 pci_iommu_alloc();
22919
22920 +#ifdef CONFIG_PAX_PER_CPU_PGD
22921 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
22922 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
22923 + KERNEL_PGD_PTRS);
22924 +#endif
22925 +
22926 /* clear_bss() already clear the empty_zero_page */
22927
22928 reservedpages = 0;
22929 @@ -853,8 +873,8 @@ int kern_addr_valid(unsigned long addr)
22930 static struct vm_area_struct gate_vma = {
22931 .vm_start = VSYSCALL_START,
22932 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
22933 - .vm_page_prot = PAGE_READONLY_EXEC,
22934 - .vm_flags = VM_READ | VM_EXEC
22935 + .vm_page_prot = PAGE_READONLY,
22936 + .vm_flags = VM_READ
22937 };
22938
22939 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
22940 @@ -888,7 +908,7 @@ int in_gate_area_no_mm(unsigned long addr)
22941
22942 const char *arch_vma_name(struct vm_area_struct *vma)
22943 {
22944 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
22945 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
22946 return "[vdso]";
22947 if (vma == &gate_vma)
22948 return "[vsyscall]";
22949 diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
22950 index 7b179b4..6bd1777 100644
22951 --- a/arch/x86/mm/iomap_32.c
22952 +++ b/arch/x86/mm/iomap_32.c
22953 @@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
22954 type = kmap_atomic_idx_push();
22955 idx = type + KM_TYPE_NR * smp_processor_id();
22956 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
22957 +
22958 + pax_open_kernel();
22959 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
22960 + pax_close_kernel();
22961 +
22962 arch_flush_lazy_mmu_mode();
22963
22964 return (void *)vaddr;
22965 diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
22966 index be1ef57..9680edc 100644
22967 --- a/arch/x86/mm/ioremap.c
22968 +++ b/arch/x86/mm/ioremap.c
22969 @@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
22970 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
22971 int is_ram = page_is_ram(pfn);
22972
22973 - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
22974 + if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
22975 return NULL;
22976 WARN_ON_ONCE(is_ram);
22977 }
22978 @@ -344,7 +344,7 @@ static int __init early_ioremap_debug_setup(char *str)
22979 early_param("early_ioremap_debug", early_ioremap_debug_setup);
22980
22981 static __initdata int after_paging_init;
22982 -static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
22983 +static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
22984
22985 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
22986 {
22987 @@ -381,8 +381,7 @@ void __init early_ioremap_init(void)
22988 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
22989
22990 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
22991 - memset(bm_pte, 0, sizeof(bm_pte));
22992 - pmd_populate_kernel(&init_mm, pmd, bm_pte);
22993 + pmd_populate_user(&init_mm, pmd, bm_pte);
22994
22995 /*
22996 * The boot-ioremap range spans multiple pmds, for which
22997 diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
22998 index d87dd6d..bf3fa66 100644
22999 --- a/arch/x86/mm/kmemcheck/kmemcheck.c
23000 +++ b/arch/x86/mm/kmemcheck/kmemcheck.c
23001 @@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
23002 * memory (e.g. tracked pages)? For now, we need this to avoid
23003 * invoking kmemcheck for PnP BIOS calls.
23004 */
23005 - if (regs->flags & X86_VM_MASK)
23006 + if (v8086_mode(regs))
23007 return false;
23008 - if (regs->cs != __KERNEL_CS)
23009 + if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
23010 return false;
23011
23012 pte = kmemcheck_pte_lookup(address);
23013 diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
23014 index 845df68..1d8d29f 100644
23015 --- a/arch/x86/mm/mmap.c
23016 +++ b/arch/x86/mm/mmap.c
23017 @@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
23018 * Leave an at least ~128 MB hole with possible stack randomization.
23019 */
23020 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
23021 -#define MAX_GAP (TASK_SIZE/6*5)
23022 +#define MAX_GAP (pax_task_size/6*5)
23023
23024 static int mmap_is_legacy(void)
23025 {
23026 @@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
23027 return rnd << PAGE_SHIFT;
23028 }
23029
23030 -static unsigned long mmap_base(void)
23031 +static unsigned long mmap_base(struct mm_struct *mm)
23032 {
23033 unsigned long gap = rlimit(RLIMIT_STACK);
23034 + unsigned long pax_task_size = TASK_SIZE;
23035 +
23036 +#ifdef CONFIG_PAX_SEGMEXEC
23037 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
23038 + pax_task_size = SEGMEXEC_TASK_SIZE;
23039 +#endif
23040
23041 if (gap < MIN_GAP)
23042 gap = MIN_GAP;
23043 else if (gap > MAX_GAP)
23044 gap = MAX_GAP;
23045
23046 - return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
23047 + return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
23048 }
23049
23050 /*
23051 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
23052 * does, but not when emulating X86_32
23053 */
23054 -static unsigned long mmap_legacy_base(void)
23055 +static unsigned long mmap_legacy_base(struct mm_struct *mm)
23056 {
23057 - if (mmap_is_ia32())
23058 + if (mmap_is_ia32()) {
23059 +
23060 +#ifdef CONFIG_PAX_SEGMEXEC
23061 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
23062 + return SEGMEXEC_TASK_UNMAPPED_BASE;
23063 + else
23064 +#endif
23065 +
23066 return TASK_UNMAPPED_BASE;
23067 - else
23068 + } else
23069 return TASK_UNMAPPED_BASE + mmap_rnd();
23070 }
23071
23072 @@ -113,11 +126,23 @@ static unsigned long mmap_legacy_base(void)
23073 void arch_pick_mmap_layout(struct mm_struct *mm)
23074 {
23075 if (mmap_is_legacy()) {
23076 - mm->mmap_base = mmap_legacy_base();
23077 + mm->mmap_base = mmap_legacy_base(mm);
23078 +
23079 +#ifdef CONFIG_PAX_RANDMMAP
23080 + if (mm->pax_flags & MF_PAX_RANDMMAP)
23081 + mm->mmap_base += mm->delta_mmap;
23082 +#endif
23083 +
23084 mm->get_unmapped_area = arch_get_unmapped_area;
23085 mm->unmap_area = arch_unmap_area;
23086 } else {
23087 - mm->mmap_base = mmap_base();
23088 + mm->mmap_base = mmap_base(mm);
23089 +
23090 +#ifdef CONFIG_PAX_RANDMMAP
23091 + if (mm->pax_flags & MF_PAX_RANDMMAP)
23092 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
23093 +#endif
23094 +
23095 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
23096 mm->unmap_area = arch_unmap_area_topdown;
23097 }
23098 diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
23099 index de54b9b..799051e 100644
23100 --- a/arch/x86/mm/mmio-mod.c
23101 +++ b/arch/x86/mm/mmio-mod.c
23102 @@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
23103 break;
23104 default:
23105 {
23106 - unsigned char *ip = (unsigned char *)instptr;
23107 + unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
23108 my_trace->opcode = MMIO_UNKNOWN_OP;
23109 my_trace->width = 0;
23110 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
23111 @@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
23112 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
23113 void __iomem *addr)
23114 {
23115 - static atomic_t next_id;
23116 + static atomic_unchecked_t next_id;
23117 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
23118 /* These are page-unaligned. */
23119 struct mmiotrace_map map = {
23120 @@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
23121 .private = trace
23122 },
23123 .phys = offset,
23124 - .id = atomic_inc_return(&next_id)
23125 + .id = atomic_inc_return_unchecked(&next_id)
23126 };
23127 map.map_id = trace->id;
23128
23129 diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
23130 index b008656..773eac2 100644
23131 --- a/arch/x86/mm/pageattr-test.c
23132 +++ b/arch/x86/mm/pageattr-test.c
23133 @@ -36,7 +36,7 @@ enum {
23134
23135 static int pte_testbit(pte_t pte)
23136 {
23137 - return pte_flags(pte) & _PAGE_UNUSED1;
23138 + return pte_flags(pte) & _PAGE_CPA_TEST;
23139 }
23140
23141 struct split_state {
23142 diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
23143 index f9e5267..6f6e27f 100644
23144 --- a/arch/x86/mm/pageattr.c
23145 +++ b/arch/x86/mm/pageattr.c
23146 @@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
23147 */
23148 #ifdef CONFIG_PCI_BIOS
23149 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
23150 - pgprot_val(forbidden) |= _PAGE_NX;
23151 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
23152 #endif
23153
23154 /*
23155 @@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
23156 * Does not cover __inittext since that is gone later on. On
23157 * 64bit we do not enforce !NX on the low mapping
23158 */
23159 - if (within(address, (unsigned long)_text, (unsigned long)_etext))
23160 - pgprot_val(forbidden) |= _PAGE_NX;
23161 + if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
23162 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
23163
23164 +#ifdef CONFIG_DEBUG_RODATA
23165 /*
23166 * The .rodata section needs to be read-only. Using the pfn
23167 * catches all aliases.
23168 @@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
23169 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
23170 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
23171 pgprot_val(forbidden) |= _PAGE_RW;
23172 +#endif
23173
23174 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
23175 /*
23176 @@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
23177 }
23178 #endif
23179
23180 +#ifdef CONFIG_PAX_KERNEXEC
23181 + if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
23182 + pgprot_val(forbidden) |= _PAGE_RW;
23183 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
23184 + }
23185 +#endif
23186 +
23187 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
23188
23189 return prot;
23190 @@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
23191 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
23192 {
23193 /* change init_mm */
23194 + pax_open_kernel();
23195 set_pte_atomic(kpte, pte);
23196 +
23197 #ifdef CONFIG_X86_32
23198 if (!SHARED_KERNEL_PMD) {
23199 +
23200 +#ifdef CONFIG_PAX_PER_CPU_PGD
23201 + unsigned long cpu;
23202 +#else
23203 struct page *page;
23204 +#endif
23205
23206 +#ifdef CONFIG_PAX_PER_CPU_PGD
23207 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
23208 + pgd_t *pgd = get_cpu_pgd(cpu);
23209 +#else
23210 list_for_each_entry(page, &pgd_list, lru) {
23211 - pgd_t *pgd;
23212 + pgd_t *pgd = (pgd_t *)page_address(page);
23213 +#endif
23214 +
23215 pud_t *pud;
23216 pmd_t *pmd;
23217
23218 - pgd = (pgd_t *)page_address(page) + pgd_index(address);
23219 + pgd += pgd_index(address);
23220 pud = pud_offset(pgd, address);
23221 pmd = pmd_offset(pud, address);
23222 set_pte_atomic((pte_t *)pmd, pte);
23223 }
23224 }
23225 #endif
23226 + pax_close_kernel();
23227 }
23228
23229 static int
23230 diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
23231 index f6ff57b..481690f 100644
23232 --- a/arch/x86/mm/pat.c
23233 +++ b/arch/x86/mm/pat.c
23234 @@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
23235
23236 if (!entry) {
23237 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
23238 - current->comm, current->pid, start, end);
23239 + current->comm, task_pid_nr(current), start, end);
23240 return -EINVAL;
23241 }
23242
23243 @@ -492,8 +492,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
23244 while (cursor < to) {
23245 if (!devmem_is_allowed(pfn)) {
23246 printk(KERN_INFO
23247 - "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
23248 - current->comm, from, to);
23249 + "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
23250 + current->comm, from, to, cursor);
23251 return 0;
23252 }
23253 cursor += PAGE_SIZE;
23254 @@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
23255 printk(KERN_INFO
23256 "%s:%d ioremap_change_attr failed %s "
23257 "for %Lx-%Lx\n",
23258 - current->comm, current->pid,
23259 + current->comm, task_pid_nr(current),
23260 cattr_name(flags),
23261 base, (unsigned long long)(base + size));
23262 return -EINVAL;
23263 @@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
23264 if (want_flags != flags) {
23265 printk(KERN_WARNING
23266 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
23267 - current->comm, current->pid,
23268 + current->comm, task_pid_nr(current),
23269 cattr_name(want_flags),
23270 (unsigned long long)paddr,
23271 (unsigned long long)(paddr + size),
23272 @@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
23273 free_memtype(paddr, paddr + size);
23274 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
23275 " for %Lx-%Lx, got %s\n",
23276 - current->comm, current->pid,
23277 + current->comm, task_pid_nr(current),
23278 cattr_name(want_flags),
23279 (unsigned long long)paddr,
23280 (unsigned long long)(paddr + size),
23281 diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
23282 index 9f0614d..92ae64a 100644
23283 --- a/arch/x86/mm/pf_in.c
23284 +++ b/arch/x86/mm/pf_in.c
23285 @@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
23286 int i;
23287 enum reason_type rv = OTHERS;
23288
23289 - p = (unsigned char *)ins_addr;
23290 + p = (unsigned char *)ktla_ktva(ins_addr);
23291 p += skip_prefix(p, &prf);
23292 p += get_opcode(p, &opcode);
23293
23294 @@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
23295 struct prefix_bits prf;
23296 int i;
23297
23298 - p = (unsigned char *)ins_addr;
23299 + p = (unsigned char *)ktla_ktva(ins_addr);
23300 p += skip_prefix(p, &prf);
23301 p += get_opcode(p, &opcode);
23302
23303 @@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
23304 struct prefix_bits prf;
23305 int i;
23306
23307 - p = (unsigned char *)ins_addr;
23308 + p = (unsigned char *)ktla_ktva(ins_addr);
23309 p += skip_prefix(p, &prf);
23310 p += get_opcode(p, &opcode);
23311
23312 @@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
23313 struct prefix_bits prf;
23314 int i;
23315
23316 - p = (unsigned char *)ins_addr;
23317 + p = (unsigned char *)ktla_ktva(ins_addr);
23318 p += skip_prefix(p, &prf);
23319 p += get_opcode(p, &opcode);
23320 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
23321 @@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
23322 struct prefix_bits prf;
23323 int i;
23324
23325 - p = (unsigned char *)ins_addr;
23326 + p = (unsigned char *)ktla_ktva(ins_addr);
23327 p += skip_prefix(p, &prf);
23328 p += get_opcode(p, &opcode);
23329 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
23330 diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
23331 index 8573b83..6372501 100644
23332 --- a/arch/x86/mm/pgtable.c
23333 +++ b/arch/x86/mm/pgtable.c
23334 @@ -84,10 +84,52 @@ static inline void pgd_list_del(pgd_t *pgd)
23335 list_del(&page->lru);
23336 }
23337
23338 -#define UNSHARED_PTRS_PER_PGD \
23339 - (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
23340 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23341 +pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
23342
23343 +void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
23344 +{
23345 + while (count--)
23346 + *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
23347 +}
23348 +#endif
23349
23350 +#ifdef CONFIG_PAX_PER_CPU_PGD
23351 +void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
23352 +{
23353 + while (count--)
23354 +
23355 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23356 + *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
23357 +#else
23358 + *dst++ = *src++;
23359 +#endif
23360 +
23361 +}
23362 +#endif
23363 +
23364 +#ifdef CONFIG_X86_64
23365 +#define pxd_t pud_t
23366 +#define pyd_t pgd_t
23367 +#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
23368 +#define pxd_free(mm, pud) pud_free((mm), (pud))
23369 +#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
23370 +#define pyd_offset(mm ,address) pgd_offset((mm), (address))
23371 +#define PYD_SIZE PGDIR_SIZE
23372 +#else
23373 +#define pxd_t pmd_t
23374 +#define pyd_t pud_t
23375 +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
23376 +#define pxd_free(mm, pud) pmd_free((mm), (pud))
23377 +#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
23378 +#define pyd_offset(mm ,address) pud_offset((mm), (address))
23379 +#define PYD_SIZE PUD_SIZE
23380 +#endif
23381 +
23382 +#ifdef CONFIG_PAX_PER_CPU_PGD
23383 +static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
23384 +static inline void pgd_dtor(pgd_t *pgd) {}
23385 +#else
23386 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
23387 {
23388 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
23389 @@ -128,6 +170,7 @@ static void pgd_dtor(pgd_t *pgd)
23390 pgd_list_del(pgd);
23391 spin_unlock(&pgd_lock);
23392 }
23393 +#endif
23394
23395 /*
23396 * List of all pgd's needed for non-PAE so it can invalidate entries
23397 @@ -140,7 +183,7 @@ static void pgd_dtor(pgd_t *pgd)
23398 * -- wli
23399 */
23400
23401 -#ifdef CONFIG_X86_PAE
23402 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
23403 /*
23404 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
23405 * updating the top-level pagetable entries to guarantee the
23406 @@ -152,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
23407 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
23408 * and initialize the kernel pmds here.
23409 */
23410 -#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
23411 +#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
23412
23413 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
23414 {
23415 @@ -170,36 +213,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
23416 */
23417 flush_tlb_mm(mm);
23418 }
23419 +#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
23420 +#define PREALLOCATED_PXDS USER_PGD_PTRS
23421 #else /* !CONFIG_X86_PAE */
23422
23423 /* No need to prepopulate any pagetable entries in non-PAE modes. */
23424 -#define PREALLOCATED_PMDS 0
23425 +#define PREALLOCATED_PXDS 0
23426
23427 #endif /* CONFIG_X86_PAE */
23428
23429 -static void free_pmds(pmd_t *pmds[])
23430 +static void free_pxds(pxd_t *pxds[])
23431 {
23432 int i;
23433
23434 - for(i = 0; i < PREALLOCATED_PMDS; i++)
23435 - if (pmds[i])
23436 - free_page((unsigned long)pmds[i]);
23437 + for(i = 0; i < PREALLOCATED_PXDS; i++)
23438 + if (pxds[i])
23439 + free_page((unsigned long)pxds[i]);
23440 }
23441
23442 -static int preallocate_pmds(pmd_t *pmds[])
23443 +static int preallocate_pxds(pxd_t *pxds[])
23444 {
23445 int i;
23446 bool failed = false;
23447
23448 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
23449 - pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
23450 - if (pmd == NULL)
23451 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
23452 + pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
23453 + if (pxd == NULL)
23454 failed = true;
23455 - pmds[i] = pmd;
23456 + pxds[i] = pxd;
23457 }
23458
23459 if (failed) {
23460 - free_pmds(pmds);
23461 + free_pxds(pxds);
23462 return -ENOMEM;
23463 }
23464
23465 @@ -212,51 +257,55 @@ static int preallocate_pmds(pmd_t *pmds[])
23466 * preallocate which never got a corresponding vma will need to be
23467 * freed manually.
23468 */
23469 -static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
23470 +static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
23471 {
23472 int i;
23473
23474 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
23475 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
23476 pgd_t pgd = pgdp[i];
23477
23478 if (pgd_val(pgd) != 0) {
23479 - pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
23480 + pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
23481
23482 - pgdp[i] = native_make_pgd(0);
23483 + set_pgd(pgdp + i, native_make_pgd(0));
23484
23485 - paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
23486 - pmd_free(mm, pmd);
23487 + paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
23488 + pxd_free(mm, pxd);
23489 }
23490 }
23491 }
23492
23493 -static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
23494 +static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
23495 {
23496 - pud_t *pud;
23497 + pyd_t *pyd;
23498 unsigned long addr;
23499 int i;
23500
23501 - if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
23502 + if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
23503 return;
23504
23505 - pud = pud_offset(pgd, 0);
23506 +#ifdef CONFIG_X86_64
23507 + pyd = pyd_offset(mm, 0L);
23508 +#else
23509 + pyd = pyd_offset(pgd, 0L);
23510 +#endif
23511
23512 - for (addr = i = 0; i < PREALLOCATED_PMDS;
23513 - i++, pud++, addr += PUD_SIZE) {
23514 - pmd_t *pmd = pmds[i];
23515 + for (addr = i = 0; i < PREALLOCATED_PXDS;
23516 + i++, pyd++, addr += PYD_SIZE) {
23517 + pxd_t *pxd = pxds[i];
23518
23519 if (i >= KERNEL_PGD_BOUNDARY)
23520 - memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
23521 - sizeof(pmd_t) * PTRS_PER_PMD);
23522 + memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
23523 + sizeof(pxd_t) * PTRS_PER_PMD);
23524
23525 - pud_populate(mm, pud, pmd);
23526 + pyd_populate(mm, pyd, pxd);
23527 }
23528 }
23529
23530 pgd_t *pgd_alloc(struct mm_struct *mm)
23531 {
23532 pgd_t *pgd;
23533 - pmd_t *pmds[PREALLOCATED_PMDS];
23534 + pxd_t *pxds[PREALLOCATED_PXDS];
23535
23536 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
23537
23538 @@ -265,11 +314,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
23539
23540 mm->pgd = pgd;
23541
23542 - if (preallocate_pmds(pmds) != 0)
23543 + if (preallocate_pxds(pxds) != 0)
23544 goto out_free_pgd;
23545
23546 if (paravirt_pgd_alloc(mm) != 0)
23547 - goto out_free_pmds;
23548 + goto out_free_pxds;
23549
23550 /*
23551 * Make sure that pre-populating the pmds is atomic with
23552 @@ -279,14 +328,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
23553 spin_lock(&pgd_lock);
23554
23555 pgd_ctor(mm, pgd);
23556 - pgd_prepopulate_pmd(mm, pgd, pmds);
23557 + pgd_prepopulate_pxd(mm, pgd, pxds);
23558
23559 spin_unlock(&pgd_lock);
23560
23561 return pgd;
23562
23563 -out_free_pmds:
23564 - free_pmds(pmds);
23565 +out_free_pxds:
23566 + free_pxds(pxds);
23567 out_free_pgd:
23568 free_page((unsigned long)pgd);
23569 out:
23570 @@ -295,7 +344,7 @@ out:
23571
23572 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
23573 {
23574 - pgd_mop_up_pmds(mm, pgd);
23575 + pgd_mop_up_pxds(mm, pgd);
23576 pgd_dtor(pgd);
23577 paravirt_pgd_free(mm, pgd);
23578 free_page((unsigned long)pgd);
23579 diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
23580 index cac7184..09a39fa 100644
23581 --- a/arch/x86/mm/pgtable_32.c
23582 +++ b/arch/x86/mm/pgtable_32.c
23583 @@ -48,10 +48,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
23584 return;
23585 }
23586 pte = pte_offset_kernel(pmd, vaddr);
23587 +
23588 + pax_open_kernel();
23589 if (pte_val(pteval))
23590 set_pte_at(&init_mm, vaddr, pte, pteval);
23591 else
23592 pte_clear(&init_mm, vaddr, pte);
23593 + pax_close_kernel();
23594
23595 /*
23596 * It's enough to flush this one mapping.
23597 diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
23598 index 410531d..0f16030 100644
23599 --- a/arch/x86/mm/setup_nx.c
23600 +++ b/arch/x86/mm/setup_nx.c
23601 @@ -5,8 +5,10 @@
23602 #include <asm/pgtable.h>
23603 #include <asm/proto.h>
23604
23605 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
23606 static int disable_nx __cpuinitdata;
23607
23608 +#ifndef CONFIG_PAX_PAGEEXEC
23609 /*
23610 * noexec = on|off
23611 *
23612 @@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
23613 return 0;
23614 }
23615 early_param("noexec", noexec_setup);
23616 +#endif
23617 +
23618 +#endif
23619
23620 void __cpuinit x86_configure_nx(void)
23621 {
23622 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
23623 if (cpu_has_nx && !disable_nx)
23624 __supported_pte_mask |= _PAGE_NX;
23625 else
23626 +#endif
23627 __supported_pte_mask &= ~_PAGE_NX;
23628 }
23629
23630 diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
23631 index d6c0418..06a0ad5 100644
23632 --- a/arch/x86/mm/tlb.c
23633 +++ b/arch/x86/mm/tlb.c
23634 @@ -65,7 +65,11 @@ void leave_mm(int cpu)
23635 BUG();
23636 cpumask_clear_cpu(cpu,
23637 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
23638 +
23639 +#ifndef CONFIG_PAX_PER_CPU_PGD
23640 load_cr3(swapper_pg_dir);
23641 +#endif
23642 +
23643 }
23644 EXPORT_SYMBOL_GPL(leave_mm);
23645
23646 diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
23647 index 6687022..ceabcfa 100644
23648 --- a/arch/x86/net/bpf_jit.S
23649 +++ b/arch/x86/net/bpf_jit.S
23650 @@ -9,6 +9,7 @@
23651 */
23652 #include <linux/linkage.h>
23653 #include <asm/dwarf2.h>
23654 +#include <asm/alternative-asm.h>
23655
23656 /*
23657 * Calling convention :
23658 @@ -35,6 +36,7 @@ sk_load_word:
23659 jle bpf_slow_path_word
23660 mov (SKBDATA,%rsi),%eax
23661 bswap %eax /* ntohl() */
23662 + pax_force_retaddr
23663 ret
23664
23665
23666 @@ -53,6 +55,7 @@ sk_load_half:
23667 jle bpf_slow_path_half
23668 movzwl (SKBDATA,%rsi),%eax
23669 rol $8,%ax # ntohs()
23670 + pax_force_retaddr
23671 ret
23672
23673 sk_load_byte_ind:
23674 @@ -66,6 +69,7 @@ sk_load_byte:
23675 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
23676 jle bpf_slow_path_byte
23677 movzbl (SKBDATA,%rsi),%eax
23678 + pax_force_retaddr
23679 ret
23680
23681 /**
23682 @@ -82,6 +86,7 @@ ENTRY(sk_load_byte_msh)
23683 movzbl (SKBDATA,%rsi),%ebx
23684 and $15,%bl
23685 shl $2,%bl
23686 + pax_force_retaddr
23687 ret
23688 CFI_ENDPROC
23689 ENDPROC(sk_load_byte_msh)
23690 @@ -91,6 +96,7 @@ bpf_error:
23691 xor %eax,%eax
23692 mov -8(%rbp),%rbx
23693 leaveq
23694 + pax_force_retaddr
23695 ret
23696
23697 /* rsi contains offset and can be scratched */
23698 @@ -113,6 +119,7 @@ bpf_slow_path_word:
23699 js bpf_error
23700 mov -12(%rbp),%eax
23701 bswap %eax
23702 + pax_force_retaddr
23703 ret
23704
23705 bpf_slow_path_half:
23706 @@ -121,12 +128,14 @@ bpf_slow_path_half:
23707 mov -12(%rbp),%ax
23708 rol $8,%ax
23709 movzwl %ax,%eax
23710 + pax_force_retaddr
23711 ret
23712
23713 bpf_slow_path_byte:
23714 bpf_slow_path_common(1)
23715 js bpf_error
23716 movzbl -12(%rbp),%eax
23717 + pax_force_retaddr
23718 ret
23719
23720 bpf_slow_path_byte_msh:
23721 @@ -137,4 +146,5 @@ bpf_slow_path_byte_msh:
23722 and $15,%al
23723 shl $2,%al
23724 xchg %eax,%ebx
23725 + pax_force_retaddr
23726 ret
23727 diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
23728 index 7b65f75..63097f6 100644
23729 --- a/arch/x86/net/bpf_jit_comp.c
23730 +++ b/arch/x86/net/bpf_jit_comp.c
23731 @@ -117,6 +117,10 @@ static inline void bpf_flush_icache(void *start, void *end)
23732 set_fs(old_fs);
23733 }
23734
23735 +struct bpf_jit_work {
23736 + struct work_struct work;
23737 + void *image;
23738 +};
23739
23740 void bpf_jit_compile(struct sk_filter *fp)
23741 {
23742 @@ -141,6 +145,10 @@ void bpf_jit_compile(struct sk_filter *fp)
23743 if (addrs == NULL)
23744 return;
23745
23746 + fp->work = kmalloc(sizeof(*fp->work), GFP_KERNEL);
23747 + if (!fp->work)
23748 + goto out;
23749 +
23750 /* Before first pass, make a rough estimation of addrs[]
23751 * each bpf instruction is translated to less than 64 bytes
23752 */
23753 @@ -585,11 +593,12 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
23754 if (image) {
23755 if (unlikely(proglen + ilen > oldproglen)) {
23756 pr_err("bpb_jit_compile fatal error\n");
23757 - kfree(addrs);
23758 - module_free(NULL, image);
23759 - return;
23760 + module_free_exec(NULL, image);
23761 + goto out;
23762 }
23763 + pax_open_kernel();
23764 memcpy(image + proglen, temp, ilen);
23765 + pax_close_kernel();
23766 }
23767 proglen += ilen;
23768 addrs[i] = proglen;
23769 @@ -609,7 +618,7 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
23770 break;
23771 }
23772 if (proglen == oldproglen) {
23773 - image = module_alloc(max_t(unsigned int,
23774 + image = module_alloc_exec(max_t(unsigned int,
23775 proglen,
23776 sizeof(struct work_struct)));
23777 if (!image)
23778 @@ -631,24 +640,27 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
23779 fp->bpf_func = (void *)image;
23780 }
23781 out:
23782 + kfree(fp->work);
23783 kfree(addrs);
23784 return;
23785 }
23786
23787 static void jit_free_defer(struct work_struct *arg)
23788 {
23789 - module_free(NULL, arg);
23790 + module_free_exec(NULL, ((struct bpf_jit_work*)arg)->image);
23791 + kfree(arg);
23792 }
23793
23794 /* run from softirq, we must use a work_struct to call
23795 - * module_free() from process context
23796 + * module_free_exec() from process context
23797 */
23798 void bpf_jit_free(struct sk_filter *fp)
23799 {
23800 if (fp->bpf_func != sk_run_filter) {
23801 - struct work_struct *work = (struct work_struct *)fp->bpf_func;
23802 + struct work_struct *work = &fp->work->work;
23803
23804 INIT_WORK(work, jit_free_defer);
23805 + fp->work->image = fp->bpf_func;
23806 schedule_work(work);
23807 }
23808 }
23809 diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
23810 index bff89df..377758a 100644
23811 --- a/arch/x86/oprofile/backtrace.c
23812 +++ b/arch/x86/oprofile/backtrace.c
23813 @@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
23814 struct stack_frame_ia32 *fp;
23815 unsigned long bytes;
23816
23817 - bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
23818 + bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
23819 if (bytes != sizeof(bufhead))
23820 return NULL;
23821
23822 - fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
23823 + fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
23824
23825 oprofile_add_trace(bufhead[0].return_address);
23826
23827 @@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
23828 struct stack_frame bufhead[2];
23829 unsigned long bytes;
23830
23831 - bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
23832 + bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
23833 if (bytes != sizeof(bufhead))
23834 return NULL;
23835
23836 @@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
23837 {
23838 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
23839
23840 - if (!user_mode_vm(regs)) {
23841 + if (!user_mode(regs)) {
23842 unsigned long stack = kernel_stack_pointer(regs);
23843 if (depth)
23844 dump_trace(NULL, regs, (unsigned long *)stack, 0,
23845 diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
23846 index cb29191..036766d 100644
23847 --- a/arch/x86/pci/mrst.c
23848 +++ b/arch/x86/pci/mrst.c
23849 @@ -234,7 +234,9 @@ int __init pci_mrst_init(void)
23850 printk(KERN_INFO "Moorestown platform detected, using MRST PCI ops\n");
23851 pci_mmcfg_late_init();
23852 pcibios_enable_irq = mrst_pci_irq_enable;
23853 - pci_root_ops = pci_mrst_ops;
23854 + pax_open_kernel();
23855 + memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
23856 + pax_close_kernel();
23857 /* Continue with standard init */
23858 return 1;
23859 }
23860 diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
23861 index db0e9a5..8844dea 100644
23862 --- a/arch/x86/pci/pcbios.c
23863 +++ b/arch/x86/pci/pcbios.c
23864 @@ -79,50 +79,93 @@ union bios32 {
23865 static struct {
23866 unsigned long address;
23867 unsigned short segment;
23868 -} bios32_indirect = { 0, __KERNEL_CS };
23869 +} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
23870
23871 /*
23872 * Returns the entry point for the given service, NULL on error
23873 */
23874
23875 -static unsigned long bios32_service(unsigned long service)
23876 +static unsigned long __devinit bios32_service(unsigned long service)
23877 {
23878 unsigned char return_code; /* %al */
23879 unsigned long address; /* %ebx */
23880 unsigned long length; /* %ecx */
23881 unsigned long entry; /* %edx */
23882 unsigned long flags;
23883 + struct desc_struct d, *gdt;
23884
23885 local_irq_save(flags);
23886 - __asm__("lcall *(%%edi); cld"
23887 +
23888 + gdt = get_cpu_gdt_table(smp_processor_id());
23889 +
23890 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
23891 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
23892 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
23893 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
23894 +
23895 + __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
23896 : "=a" (return_code),
23897 "=b" (address),
23898 "=c" (length),
23899 "=d" (entry)
23900 : "0" (service),
23901 "1" (0),
23902 - "D" (&bios32_indirect));
23903 + "D" (&bios32_indirect),
23904 + "r"(__PCIBIOS_DS)
23905 + : "memory");
23906 +
23907 + pax_open_kernel();
23908 + gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
23909 + gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
23910 + gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
23911 + gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
23912 + pax_close_kernel();
23913 +
23914 local_irq_restore(flags);
23915
23916 switch (return_code) {
23917 - case 0:
23918 - return address + entry;
23919 - case 0x80: /* Not present */
23920 - printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
23921 - return 0;
23922 - default: /* Shouldn't happen */
23923 - printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
23924 - service, return_code);
23925 + case 0: {
23926 + int cpu;
23927 + unsigned char flags;
23928 +
23929 + printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
23930 + if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
23931 + printk(KERN_WARNING "bios32_service: not valid\n");
23932 return 0;
23933 + }
23934 + address = address + PAGE_OFFSET;
23935 + length += 16UL; /* some BIOSs underreport this... */
23936 + flags = 4;
23937 + if (length >= 64*1024*1024) {
23938 + length >>= PAGE_SHIFT;
23939 + flags |= 8;
23940 + }
23941 +
23942 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
23943 + gdt = get_cpu_gdt_table(cpu);
23944 + pack_descriptor(&d, address, length, 0x9b, flags);
23945 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
23946 + pack_descriptor(&d, address, length, 0x93, flags);
23947 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
23948 + }
23949 + return entry;
23950 + }
23951 + case 0x80: /* Not present */
23952 + printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
23953 + return 0;
23954 + default: /* Shouldn't happen */
23955 + printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
23956 + service, return_code);
23957 + return 0;
23958 }
23959 }
23960
23961 static struct {
23962 unsigned long address;
23963 unsigned short segment;
23964 -} pci_indirect = { 0, __KERNEL_CS };
23965 +} pci_indirect __read_only = { 0, __PCIBIOS_CS };
23966
23967 -static int pci_bios_present;
23968 +static int pci_bios_present __read_only;
23969
23970 static int __devinit check_pcibios(void)
23971 {
23972 @@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
23973 unsigned long flags, pcibios_entry;
23974
23975 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
23976 - pci_indirect.address = pcibios_entry + PAGE_OFFSET;
23977 + pci_indirect.address = pcibios_entry;
23978
23979 local_irq_save(flags);
23980 - __asm__(
23981 - "lcall *(%%edi); cld\n\t"
23982 + __asm__("movw %w6, %%ds\n\t"
23983 + "lcall *%%ss:(%%edi); cld\n\t"
23984 + "push %%ss\n\t"
23985 + "pop %%ds\n\t"
23986 "jc 1f\n\t"
23987 "xor %%ah, %%ah\n"
23988 "1:"
23989 @@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
23990 "=b" (ebx),
23991 "=c" (ecx)
23992 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
23993 - "D" (&pci_indirect)
23994 + "D" (&pci_indirect),
23995 + "r" (__PCIBIOS_DS)
23996 : "memory");
23997 local_irq_restore(flags);
23998
23999 @@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24000
24001 switch (len) {
24002 case 1:
24003 - __asm__("lcall *(%%esi); cld\n\t"
24004 + __asm__("movw %w6, %%ds\n\t"
24005 + "lcall *%%ss:(%%esi); cld\n\t"
24006 + "push %%ss\n\t"
24007 + "pop %%ds\n\t"
24008 "jc 1f\n\t"
24009 "xor %%ah, %%ah\n"
24010 "1:"
24011 @@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24012 : "1" (PCIBIOS_READ_CONFIG_BYTE),
24013 "b" (bx),
24014 "D" ((long)reg),
24015 - "S" (&pci_indirect));
24016 + "S" (&pci_indirect),
24017 + "r" (__PCIBIOS_DS));
24018 /*
24019 * Zero-extend the result beyond 8 bits, do not trust the
24020 * BIOS having done it:
24021 @@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24022 *value &= 0xff;
24023 break;
24024 case 2:
24025 - __asm__("lcall *(%%esi); cld\n\t"
24026 + __asm__("movw %w6, %%ds\n\t"
24027 + "lcall *%%ss:(%%esi); cld\n\t"
24028 + "push %%ss\n\t"
24029 + "pop %%ds\n\t"
24030 "jc 1f\n\t"
24031 "xor %%ah, %%ah\n"
24032 "1:"
24033 @@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24034 : "1" (PCIBIOS_READ_CONFIG_WORD),
24035 "b" (bx),
24036 "D" ((long)reg),
24037 - "S" (&pci_indirect));
24038 + "S" (&pci_indirect),
24039 + "r" (__PCIBIOS_DS));
24040 /*
24041 * Zero-extend the result beyond 16 bits, do not trust the
24042 * BIOS having done it:
24043 @@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24044 *value &= 0xffff;
24045 break;
24046 case 4:
24047 - __asm__("lcall *(%%esi); cld\n\t"
24048 + __asm__("movw %w6, %%ds\n\t"
24049 + "lcall *%%ss:(%%esi); cld\n\t"
24050 + "push %%ss\n\t"
24051 + "pop %%ds\n\t"
24052 "jc 1f\n\t"
24053 "xor %%ah, %%ah\n"
24054 "1:"
24055 @@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24056 : "1" (PCIBIOS_READ_CONFIG_DWORD),
24057 "b" (bx),
24058 "D" ((long)reg),
24059 - "S" (&pci_indirect));
24060 + "S" (&pci_indirect),
24061 + "r" (__PCIBIOS_DS));
24062 break;
24063 }
24064
24065 @@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
24066
24067 switch (len) {
24068 case 1:
24069 - __asm__("lcall *(%%esi); cld\n\t"
24070 + __asm__("movw %w6, %%ds\n\t"
24071 + "lcall *%%ss:(%%esi); cld\n\t"
24072 + "push %%ss\n\t"
24073 + "pop %%ds\n\t"
24074 "jc 1f\n\t"
24075 "xor %%ah, %%ah\n"
24076 "1:"
24077 @@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
24078 "c" (value),
24079 "b" (bx),
24080 "D" ((long)reg),
24081 - "S" (&pci_indirect));
24082 + "S" (&pci_indirect),
24083 + "r" (__PCIBIOS_DS));
24084 break;
24085 case 2:
24086 - __asm__("lcall *(%%esi); cld\n\t"
24087 + __asm__("movw %w6, %%ds\n\t"
24088 + "lcall *%%ss:(%%esi); cld\n\t"
24089 + "push %%ss\n\t"
24090 + "pop %%ds\n\t"
24091 "jc 1f\n\t"
24092 "xor %%ah, %%ah\n"
24093 "1:"
24094 @@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
24095 "c" (value),
24096 "b" (bx),
24097 "D" ((long)reg),
24098 - "S" (&pci_indirect));
24099 + "S" (&pci_indirect),
24100 + "r" (__PCIBIOS_DS));
24101 break;
24102 case 4:
24103 - __asm__("lcall *(%%esi); cld\n\t"
24104 + __asm__("movw %w6, %%ds\n\t"
24105 + "lcall *%%ss:(%%esi); cld\n\t"
24106 + "push %%ss\n\t"
24107 + "pop %%ds\n\t"
24108 "jc 1f\n\t"
24109 "xor %%ah, %%ah\n"
24110 "1:"
24111 @@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
24112 "c" (value),
24113 "b" (bx),
24114 "D" ((long)reg),
24115 - "S" (&pci_indirect));
24116 + "S" (&pci_indirect),
24117 + "r" (__PCIBIOS_DS));
24118 break;
24119 }
24120
24121 @@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
24122
24123 DBG("PCI: Fetching IRQ routing table... ");
24124 __asm__("push %%es\n\t"
24125 + "movw %w8, %%ds\n\t"
24126 "push %%ds\n\t"
24127 "pop %%es\n\t"
24128 - "lcall *(%%esi); cld\n\t"
24129 + "lcall *%%ss:(%%esi); cld\n\t"
24130 "pop %%es\n\t"
24131 + "push %%ss\n\t"
24132 + "pop %%ds\n"
24133 "jc 1f\n\t"
24134 "xor %%ah, %%ah\n"
24135 "1:"
24136 @@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
24137 "1" (0),
24138 "D" ((long) &opt),
24139 "S" (&pci_indirect),
24140 - "m" (opt)
24141 + "m" (opt),
24142 + "r" (__PCIBIOS_DS)
24143 : "memory");
24144 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
24145 if (ret & 0xff00)
24146 @@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
24147 {
24148 int ret;
24149
24150 - __asm__("lcall *(%%esi); cld\n\t"
24151 + __asm__("movw %w5, %%ds\n\t"
24152 + "lcall *%%ss:(%%esi); cld\n\t"
24153 + "push %%ss\n\t"
24154 + "pop %%ds\n"
24155 "jc 1f\n\t"
24156 "xor %%ah, %%ah\n"
24157 "1:"
24158 @@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
24159 : "0" (PCIBIOS_SET_PCI_HW_INT),
24160 "b" ((dev->bus->number << 8) | dev->devfn),
24161 "c" ((irq << 8) | (pin + 10)),
24162 - "S" (&pci_indirect));
24163 + "S" (&pci_indirect),
24164 + "r" (__PCIBIOS_DS));
24165 return !(ret & 0xff00);
24166 }
24167 EXPORT_SYMBOL(pcibios_set_irq_routing);
24168 diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
24169 index 40e4469..1ab536e 100644
24170 --- a/arch/x86/platform/efi/efi_32.c
24171 +++ b/arch/x86/platform/efi/efi_32.c
24172 @@ -44,11 +44,22 @@ void efi_call_phys_prelog(void)
24173 {
24174 struct desc_ptr gdt_descr;
24175
24176 +#ifdef CONFIG_PAX_KERNEXEC
24177 + struct desc_struct d;
24178 +#endif
24179 +
24180 local_irq_save(efi_rt_eflags);
24181
24182 load_cr3(initial_page_table);
24183 __flush_tlb_all();
24184
24185 +#ifdef CONFIG_PAX_KERNEXEC
24186 + pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
24187 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
24188 + pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
24189 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
24190 +#endif
24191 +
24192 gdt_descr.address = __pa(get_cpu_gdt_table(0));
24193 gdt_descr.size = GDT_SIZE - 1;
24194 load_gdt(&gdt_descr);
24195 @@ -58,6 +69,14 @@ void efi_call_phys_epilog(void)
24196 {
24197 struct desc_ptr gdt_descr;
24198
24199 +#ifdef CONFIG_PAX_KERNEXEC
24200 + struct desc_struct d;
24201 +
24202 + memset(&d, 0, sizeof d);
24203 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
24204 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
24205 +#endif
24206 +
24207 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
24208 gdt_descr.size = GDT_SIZE - 1;
24209 load_gdt(&gdt_descr);
24210 diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
24211 index fbe66e6..c5c0dd2 100644
24212 --- a/arch/x86/platform/efi/efi_stub_32.S
24213 +++ b/arch/x86/platform/efi/efi_stub_32.S
24214 @@ -6,7 +6,9 @@
24215 */
24216
24217 #include <linux/linkage.h>
24218 +#include <linux/init.h>
24219 #include <asm/page_types.h>
24220 +#include <asm/segment.h>
24221
24222 /*
24223 * efi_call_phys(void *, ...) is a function with variable parameters.
24224 @@ -20,7 +22,7 @@
24225 * service functions will comply with gcc calling convention, too.
24226 */
24227
24228 -.text
24229 +__INIT
24230 ENTRY(efi_call_phys)
24231 /*
24232 * 0. The function can only be called in Linux kernel. So CS has been
24233 @@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
24234 * The mapping of lower virtual memory has been created in prelog and
24235 * epilog.
24236 */
24237 - movl $1f, %edx
24238 - subl $__PAGE_OFFSET, %edx
24239 - jmp *%edx
24240 + movl $(__KERNEXEC_EFI_DS), %edx
24241 + mov %edx, %ds
24242 + mov %edx, %es
24243 + mov %edx, %ss
24244 + ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
24245 1:
24246
24247 /*
24248 @@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
24249 * parameter 2, ..., param n. To make things easy, we save the return
24250 * address of efi_call_phys in a global variable.
24251 */
24252 - popl %edx
24253 - movl %edx, saved_return_addr
24254 - /* get the function pointer into ECX*/
24255 - popl %ecx
24256 - movl %ecx, efi_rt_function_ptr
24257 - movl $2f, %edx
24258 - subl $__PAGE_OFFSET, %edx
24259 - pushl %edx
24260 + popl (saved_return_addr)
24261 + popl (efi_rt_function_ptr)
24262
24263 /*
24264 * 3. Clear PG bit in %CR0.
24265 @@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
24266 /*
24267 * 5. Call the physical function.
24268 */
24269 - jmp *%ecx
24270 + call *(efi_rt_function_ptr-__PAGE_OFFSET)
24271
24272 -2:
24273 /*
24274 * 6. After EFI runtime service returns, control will return to
24275 * following instruction. We'd better readjust stack pointer first.
24276 @@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
24277 movl %cr0, %edx
24278 orl $0x80000000, %edx
24279 movl %edx, %cr0
24280 - jmp 1f
24281 -1:
24282 +
24283 /*
24284 * 8. Now restore the virtual mode from flat mode by
24285 * adding EIP with PAGE_OFFSET.
24286 */
24287 - movl $1f, %edx
24288 - jmp *%edx
24289 + ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
24290 1:
24291 + movl $(__KERNEL_DS), %edx
24292 + mov %edx, %ds
24293 + mov %edx, %es
24294 + mov %edx, %ss
24295
24296 /*
24297 * 9. Balance the stack. And because EAX contain the return value,
24298 * we'd better not clobber it.
24299 */
24300 - leal efi_rt_function_ptr, %edx
24301 - movl (%edx), %ecx
24302 - pushl %ecx
24303 + pushl (efi_rt_function_ptr)
24304
24305 /*
24306 - * 10. Push the saved return address onto the stack and return.
24307 + * 10. Return to the saved return address.
24308 */
24309 - leal saved_return_addr, %edx
24310 - movl (%edx), %ecx
24311 - pushl %ecx
24312 - ret
24313 + jmpl *(saved_return_addr)
24314 ENDPROC(efi_call_phys)
24315 .previous
24316
24317 -.data
24318 +__INITDATA
24319 saved_return_addr:
24320 .long 0
24321 efi_rt_function_ptr:
24322 diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
24323 index 4c07cca..2c8427d 100644
24324 --- a/arch/x86/platform/efi/efi_stub_64.S
24325 +++ b/arch/x86/platform/efi/efi_stub_64.S
24326 @@ -7,6 +7,7 @@
24327 */
24328
24329 #include <linux/linkage.h>
24330 +#include <asm/alternative-asm.h>
24331
24332 #define SAVE_XMM \
24333 mov %rsp, %rax; \
24334 @@ -40,6 +41,7 @@ ENTRY(efi_call0)
24335 call *%rdi
24336 addq $32, %rsp
24337 RESTORE_XMM
24338 + pax_force_retaddr 0, 1
24339 ret
24340 ENDPROC(efi_call0)
24341
24342 @@ -50,6 +52,7 @@ ENTRY(efi_call1)
24343 call *%rdi
24344 addq $32, %rsp
24345 RESTORE_XMM
24346 + pax_force_retaddr 0, 1
24347 ret
24348 ENDPROC(efi_call1)
24349
24350 @@ -60,6 +63,7 @@ ENTRY(efi_call2)
24351 call *%rdi
24352 addq $32, %rsp
24353 RESTORE_XMM
24354 + pax_force_retaddr 0, 1
24355 ret
24356 ENDPROC(efi_call2)
24357
24358 @@ -71,6 +75,7 @@ ENTRY(efi_call3)
24359 call *%rdi
24360 addq $32, %rsp
24361 RESTORE_XMM
24362 + pax_force_retaddr 0, 1
24363 ret
24364 ENDPROC(efi_call3)
24365
24366 @@ -83,6 +88,7 @@ ENTRY(efi_call4)
24367 call *%rdi
24368 addq $32, %rsp
24369 RESTORE_XMM
24370 + pax_force_retaddr 0, 1
24371 ret
24372 ENDPROC(efi_call4)
24373
24374 @@ -96,6 +102,7 @@ ENTRY(efi_call5)
24375 call *%rdi
24376 addq $48, %rsp
24377 RESTORE_XMM
24378 + pax_force_retaddr 0, 1
24379 ret
24380 ENDPROC(efi_call5)
24381
24382 @@ -112,5 +119,6 @@ ENTRY(efi_call6)
24383 call *%rdi
24384 addq $48, %rsp
24385 RESTORE_XMM
24386 + pax_force_retaddr 0, 1
24387 ret
24388 ENDPROC(efi_call6)
24389 diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
24390 index ad4ec1c..686479e 100644
24391 --- a/arch/x86/platform/mrst/mrst.c
24392 +++ b/arch/x86/platform/mrst/mrst.c
24393 @@ -76,18 +76,20 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
24394 EXPORT_SYMBOL_GPL(sfi_mrtc_array);
24395 int sfi_mrtc_num;
24396
24397 -static void mrst_power_off(void)
24398 +static __noreturn void mrst_power_off(void)
24399 {
24400 if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT)
24401 intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 1);
24402 + BUG();
24403 }
24404
24405 -static void mrst_reboot(void)
24406 +static __noreturn void mrst_reboot(void)
24407 {
24408 if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT)
24409 intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 0);
24410 else
24411 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
24412 + BUG();
24413 }
24414
24415 /* parse all the mtimer info to a static mtimer array */
24416 diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
24417 index f10c0af..3ec1f95 100644
24418 --- a/arch/x86/power/cpu.c
24419 +++ b/arch/x86/power/cpu.c
24420 @@ -131,7 +131,7 @@ static void do_fpu_end(void)
24421 static void fix_processor_context(void)
24422 {
24423 int cpu = smp_processor_id();
24424 - struct tss_struct *t = &per_cpu(init_tss, cpu);
24425 + struct tss_struct *t = init_tss + cpu;
24426
24427 set_tss_desc(cpu, t); /*
24428 * This just modifies memory; should not be
24429 @@ -141,7 +141,9 @@ static void fix_processor_context(void)
24430 */
24431
24432 #ifdef CONFIG_X86_64
24433 + pax_open_kernel();
24434 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
24435 + pax_close_kernel();
24436
24437 syscall_init(); /* This sets MSR_*STAR and related */
24438 #endif
24439 diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
24440 index 5d17950..2253fc9 100644
24441 --- a/arch/x86/vdso/Makefile
24442 +++ b/arch/x86/vdso/Makefile
24443 @@ -137,7 +137,7 @@ quiet_cmd_vdso = VDSO $@
24444 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
24445 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
24446
24447 -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
24448 +VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
24449 GCOV_PROFILE := n
24450
24451 #
24452 diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
24453 index 468d591..8e80a0a 100644
24454 --- a/arch/x86/vdso/vdso32-setup.c
24455 +++ b/arch/x86/vdso/vdso32-setup.c
24456 @@ -25,6 +25,7 @@
24457 #include <asm/tlbflush.h>
24458 #include <asm/vdso.h>
24459 #include <asm/proto.h>
24460 +#include <asm/mman.h>
24461
24462 enum {
24463 VDSO_DISABLED = 0,
24464 @@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
24465 void enable_sep_cpu(void)
24466 {
24467 int cpu = get_cpu();
24468 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
24469 + struct tss_struct *tss = init_tss + cpu;
24470
24471 if (!boot_cpu_has(X86_FEATURE_SEP)) {
24472 put_cpu();
24473 @@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
24474 gate_vma.vm_start = FIXADDR_USER_START;
24475 gate_vma.vm_end = FIXADDR_USER_END;
24476 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
24477 - gate_vma.vm_page_prot = __P101;
24478 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
24479 /*
24480 * Make sure the vDSO gets into every core dump.
24481 * Dumping its contents makes post-mortem fully interpretable later
24482 @@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
24483 if (compat)
24484 addr = VDSO_HIGH_BASE;
24485 else {
24486 - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
24487 + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
24488 if (IS_ERR_VALUE(addr)) {
24489 ret = addr;
24490 goto up_fail;
24491 }
24492 }
24493
24494 - current->mm->context.vdso = (void *)addr;
24495 + current->mm->context.vdso = addr;
24496
24497 if (compat_uses_vma || !compat) {
24498 /*
24499 @@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
24500 }
24501
24502 current_thread_info()->sysenter_return =
24503 - VDSO32_SYMBOL(addr, SYSENTER_RETURN);
24504 + (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
24505
24506 up_fail:
24507 if (ret)
24508 - current->mm->context.vdso = NULL;
24509 + current->mm->context.vdso = 0;
24510
24511 up_write(&mm->mmap_sem);
24512
24513 @@ -412,8 +413,14 @@ __initcall(ia32_binfmt_init);
24514
24515 const char *arch_vma_name(struct vm_area_struct *vma)
24516 {
24517 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
24518 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
24519 return "[vdso]";
24520 +
24521 +#ifdef CONFIG_PAX_SEGMEXEC
24522 + if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
24523 + return "[vdso]";
24524 +#endif
24525 +
24526 return NULL;
24527 }
24528
24529 @@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
24530 * Check to see if the corresponding task was created in compat vdso
24531 * mode.
24532 */
24533 - if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
24534 + if (mm && mm->context.vdso == VDSO_HIGH_BASE)
24535 return &gate_vma;
24536 return NULL;
24537 }
24538 diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
24539 index 153407c..611cba9 100644
24540 --- a/arch/x86/vdso/vma.c
24541 +++ b/arch/x86/vdso/vma.c
24542 @@ -16,8 +16,6 @@
24543 #include <asm/vdso.h>
24544 #include <asm/page.h>
24545
24546 -unsigned int __read_mostly vdso_enabled = 1;
24547 -
24548 extern char vdso_start[], vdso_end[];
24549 extern unsigned short vdso_sync_cpuid;
24550
24551 @@ -96,7 +94,6 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
24552 * unaligned here as a result of stack start randomization.
24553 */
24554 addr = PAGE_ALIGN(addr);
24555 - addr = align_addr(addr, NULL, ALIGN_VDSO);
24556
24557 return addr;
24558 }
24559 @@ -106,40 +103,35 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
24560 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
24561 {
24562 struct mm_struct *mm = current->mm;
24563 - unsigned long addr;
24564 + unsigned long addr = 0;
24565 int ret;
24566
24567 - if (!vdso_enabled)
24568 - return 0;
24569 -
24570 down_write(&mm->mmap_sem);
24571 +
24572 +#ifdef CONFIG_PAX_RANDMMAP
24573 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
24574 +#endif
24575 +
24576 addr = vdso_addr(mm->start_stack, vdso_size);
24577 + addr = align_addr(addr, NULL, ALIGN_VDSO);
24578 addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
24579 if (IS_ERR_VALUE(addr)) {
24580 ret = addr;
24581 goto up_fail;
24582 }
24583
24584 - current->mm->context.vdso = (void *)addr;
24585 + mm->context.vdso = addr;
24586
24587 ret = install_special_mapping(mm, addr, vdso_size,
24588 VM_READ|VM_EXEC|
24589 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
24590 VM_ALWAYSDUMP,
24591 vdso_pages);
24592 - if (ret) {
24593 - current->mm->context.vdso = NULL;
24594 - goto up_fail;
24595 - }
24596 +
24597 + if (ret)
24598 + mm->context.vdso = 0;
24599
24600 up_fail:
24601 up_write(&mm->mmap_sem);
24602 return ret;
24603 }
24604 -
24605 -static __init int vdso_setup(char *s)
24606 -{
24607 - vdso_enabled = simple_strtoul(s, NULL, 0);
24608 - return 0;
24609 -}
24610 -__setup("vdso=", vdso_setup);
24611 diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
24612 index 1f92865..c843b20 100644
24613 --- a/arch/x86/xen/enlighten.c
24614 +++ b/arch/x86/xen/enlighten.c
24615 @@ -85,8 +85,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
24616
24617 struct shared_info xen_dummy_shared_info;
24618
24619 -void *xen_initial_gdt;
24620 -
24621 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
24622 __read_mostly int xen_have_vector_callback;
24623 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
24624 @@ -1029,7 +1027,7 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
24625 #endif
24626 };
24627
24628 -static void xen_reboot(int reason)
24629 +static __noreturn void xen_reboot(int reason)
24630 {
24631 struct sched_shutdown r = { .reason = reason };
24632
24633 @@ -1037,17 +1035,17 @@ static void xen_reboot(int reason)
24634 BUG();
24635 }
24636
24637 -static void xen_restart(char *msg)
24638 +static __noreturn void xen_restart(char *msg)
24639 {
24640 xen_reboot(SHUTDOWN_reboot);
24641 }
24642
24643 -static void xen_emergency_restart(void)
24644 +static __noreturn void xen_emergency_restart(void)
24645 {
24646 xen_reboot(SHUTDOWN_reboot);
24647 }
24648
24649 -static void xen_machine_halt(void)
24650 +static __noreturn void xen_machine_halt(void)
24651 {
24652 xen_reboot(SHUTDOWN_poweroff);
24653 }
24654 @@ -1153,7 +1151,17 @@ asmlinkage void __init xen_start_kernel(void)
24655 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
24656
24657 /* Work out if we support NX */
24658 - x86_configure_nx();
24659 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
24660 + if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
24661 + (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
24662 + unsigned l, h;
24663 +
24664 + __supported_pte_mask |= _PAGE_NX;
24665 + rdmsr(MSR_EFER, l, h);
24666 + l |= EFER_NX;
24667 + wrmsr(MSR_EFER, l, h);
24668 + }
24669 +#endif
24670
24671 xen_setup_features();
24672
24673 @@ -1184,13 +1192,6 @@ asmlinkage void __init xen_start_kernel(void)
24674
24675 machine_ops = xen_machine_ops;
24676
24677 - /*
24678 - * The only reliable way to retain the initial address of the
24679 - * percpu gdt_page is to remember it here, so we can go and
24680 - * mark it RW later, when the initial percpu area is freed.
24681 - */
24682 - xen_initial_gdt = &per_cpu(gdt_page, 0);
24683 -
24684 xen_smp_init();
24685
24686 #ifdef CONFIG_ACPI_NUMA
24687 diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
24688 index 87f6673..e2555a6 100644
24689 --- a/arch/x86/xen/mmu.c
24690 +++ b/arch/x86/xen/mmu.c
24691 @@ -1733,6 +1733,9 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
24692 convert_pfn_mfn(init_level4_pgt);
24693 convert_pfn_mfn(level3_ident_pgt);
24694 convert_pfn_mfn(level3_kernel_pgt);
24695 + convert_pfn_mfn(level3_vmalloc_start_pgt);
24696 + convert_pfn_mfn(level3_vmalloc_end_pgt);
24697 + convert_pfn_mfn(level3_vmemmap_pgt);
24698
24699 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
24700 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
24701 @@ -1751,7 +1754,11 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
24702 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
24703 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
24704 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
24705 + set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
24706 + set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
24707 + set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
24708 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
24709 + set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
24710 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
24711 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
24712
24713 @@ -1962,6 +1969,7 @@ static void __init xen_post_allocator_init(void)
24714 pv_mmu_ops.set_pud = xen_set_pud;
24715 #if PAGETABLE_LEVELS == 4
24716 pv_mmu_ops.set_pgd = xen_set_pgd;
24717 + pv_mmu_ops.set_pgd_batched = xen_set_pgd;
24718 #endif
24719
24720 /* This will work as long as patching hasn't happened yet
24721 @@ -2043,6 +2051,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
24722 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
24723 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
24724 .set_pgd = xen_set_pgd_hyper,
24725 + .set_pgd_batched = xen_set_pgd_hyper,
24726
24727 .alloc_pud = xen_alloc_pmd_init,
24728 .release_pud = xen_release_pmd_init,
24729 diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
24730 index 041d4fe..7666b7e 100644
24731 --- a/arch/x86/xen/smp.c
24732 +++ b/arch/x86/xen/smp.c
24733 @@ -194,11 +194,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
24734 {
24735 BUG_ON(smp_processor_id() != 0);
24736 native_smp_prepare_boot_cpu();
24737 -
24738 - /* We've switched to the "real" per-cpu gdt, so make sure the
24739 - old memory can be recycled */
24740 - make_lowmem_page_readwrite(xen_initial_gdt);
24741 -
24742 xen_filter_cpu_maps();
24743 xen_setup_vcpu_info_placement();
24744 }
24745 @@ -275,12 +270,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
24746 gdt = get_cpu_gdt_table(cpu);
24747
24748 ctxt->flags = VGCF_IN_KERNEL;
24749 - ctxt->user_regs.ds = __USER_DS;
24750 - ctxt->user_regs.es = __USER_DS;
24751 + ctxt->user_regs.ds = __KERNEL_DS;
24752 + ctxt->user_regs.es = __KERNEL_DS;
24753 ctxt->user_regs.ss = __KERNEL_DS;
24754 #ifdef CONFIG_X86_32
24755 ctxt->user_regs.fs = __KERNEL_PERCPU;
24756 - ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
24757 + savesegment(gs, ctxt->user_regs.gs);
24758 #else
24759 ctxt->gs_base_kernel = per_cpu_offset(cpu);
24760 #endif
24761 @@ -331,13 +326,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
24762 int rc;
24763
24764 per_cpu(current_task, cpu) = idle;
24765 + per_cpu(current_tinfo, cpu) = &idle->tinfo;
24766 #ifdef CONFIG_X86_32
24767 irq_ctx_init(cpu);
24768 #else
24769 clear_tsk_thread_flag(idle, TIF_FORK);
24770 - per_cpu(kernel_stack, cpu) =
24771 - (unsigned long)task_stack_page(idle) -
24772 - KERNEL_STACK_OFFSET + THREAD_SIZE;
24773 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
24774 #endif
24775 xen_setup_runstate_info(cpu);
24776 xen_setup_timer(cpu);
24777 diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
24778 index b040b0e..8cc4fe0 100644
24779 --- a/arch/x86/xen/xen-asm_32.S
24780 +++ b/arch/x86/xen/xen-asm_32.S
24781 @@ -83,14 +83,14 @@ ENTRY(xen_iret)
24782 ESP_OFFSET=4 # bytes pushed onto stack
24783
24784 /*
24785 - * Store vcpu_info pointer for easy access. Do it this way to
24786 - * avoid having to reload %fs
24787 + * Store vcpu_info pointer for easy access.
24788 */
24789 #ifdef CONFIG_SMP
24790 - GET_THREAD_INFO(%eax)
24791 - movl TI_cpu(%eax), %eax
24792 - movl __per_cpu_offset(,%eax,4), %eax
24793 - mov xen_vcpu(%eax), %eax
24794 + push %fs
24795 + mov $(__KERNEL_PERCPU), %eax
24796 + mov %eax, %fs
24797 + mov PER_CPU_VAR(xen_vcpu), %eax
24798 + pop %fs
24799 #else
24800 movl xen_vcpu, %eax
24801 #endif
24802 diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
24803 index aaa7291..3f77960 100644
24804 --- a/arch/x86/xen/xen-head.S
24805 +++ b/arch/x86/xen/xen-head.S
24806 @@ -19,6 +19,17 @@ ENTRY(startup_xen)
24807 #ifdef CONFIG_X86_32
24808 mov %esi,xen_start_info
24809 mov $init_thread_union+THREAD_SIZE,%esp
24810 +#ifdef CONFIG_SMP
24811 + movl $cpu_gdt_table,%edi
24812 + movl $__per_cpu_load,%eax
24813 + movw %ax,__KERNEL_PERCPU + 2(%edi)
24814 + rorl $16,%eax
24815 + movb %al,__KERNEL_PERCPU + 4(%edi)
24816 + movb %ah,__KERNEL_PERCPU + 7(%edi)
24817 + movl $__per_cpu_end - 1,%eax
24818 + subl $__per_cpu_start,%eax
24819 + movw %ax,__KERNEL_PERCPU + 0(%edi)
24820 +#endif
24821 #else
24822 mov %rsi,xen_start_info
24823 mov $init_thread_union+THREAD_SIZE,%rsp
24824 diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
24825 index b095739..8c17bcd 100644
24826 --- a/arch/x86/xen/xen-ops.h
24827 +++ b/arch/x86/xen/xen-ops.h
24828 @@ -10,8 +10,6 @@
24829 extern const char xen_hypervisor_callback[];
24830 extern const char xen_failsafe_callback[];
24831
24832 -extern void *xen_initial_gdt;
24833 -
24834 struct trap_info;
24835 void xen_copy_trap_info(struct trap_info *traps);
24836
24837 diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
24838 index 58916af..9cb880b 100644
24839 --- a/block/blk-iopoll.c
24840 +++ b/block/blk-iopoll.c
24841 @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
24842 }
24843 EXPORT_SYMBOL(blk_iopoll_complete);
24844
24845 -static void blk_iopoll_softirq(struct softirq_action *h)
24846 +static void blk_iopoll_softirq(void)
24847 {
24848 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
24849 int rearm = 0, budget = blk_iopoll_budget;
24850 diff --git a/block/blk-map.c b/block/blk-map.c
24851 index 623e1cd..ca1e109 100644
24852 --- a/block/blk-map.c
24853 +++ b/block/blk-map.c
24854 @@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
24855 if (!len || !kbuf)
24856 return -EINVAL;
24857
24858 - do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
24859 + do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
24860 if (do_copy)
24861 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
24862 else
24863 diff --git a/block/blk-softirq.c b/block/blk-softirq.c
24864 index 1366a89..e17f54b 100644
24865 --- a/block/blk-softirq.c
24866 +++ b/block/blk-softirq.c
24867 @@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
24868 * Softirq action handler - move entries to local list and loop over them
24869 * while passing them to the queue registered handler.
24870 */
24871 -static void blk_done_softirq(struct softirq_action *h)
24872 +static void blk_done_softirq(void)
24873 {
24874 struct list_head *cpu_list, local_list;
24875
24876 diff --git a/block/bsg.c b/block/bsg.c
24877 index 702f131..37808bf 100644
24878 --- a/block/bsg.c
24879 +++ b/block/bsg.c
24880 @@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
24881 struct sg_io_v4 *hdr, struct bsg_device *bd,
24882 fmode_t has_write_perm)
24883 {
24884 + unsigned char tmpcmd[sizeof(rq->__cmd)];
24885 + unsigned char *cmdptr;
24886 +
24887 if (hdr->request_len > BLK_MAX_CDB) {
24888 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
24889 if (!rq->cmd)
24890 return -ENOMEM;
24891 - }
24892 + cmdptr = rq->cmd;
24893 + } else
24894 + cmdptr = tmpcmd;
24895
24896 - if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
24897 + if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
24898 hdr->request_len))
24899 return -EFAULT;
24900
24901 + if (cmdptr != rq->cmd)
24902 + memcpy(rq->cmd, cmdptr, hdr->request_len);
24903 +
24904 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
24905 if (blk_verify_command(rq->cmd, has_write_perm))
24906 return -EPERM;
24907 diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
24908 index 7b72502..646105c 100644
24909 --- a/block/compat_ioctl.c
24910 +++ b/block/compat_ioctl.c
24911 @@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
24912 err |= __get_user(f->spec1, &uf->spec1);
24913 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
24914 err |= __get_user(name, &uf->name);
24915 - f->name = compat_ptr(name);
24916 + f->name = (void __force_kernel *)compat_ptr(name);
24917 if (err) {
24918 err = -EFAULT;
24919 goto out;
24920 diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
24921 index 688be8a..8a37d98 100644
24922 --- a/block/scsi_ioctl.c
24923 +++ b/block/scsi_ioctl.c
24924 @@ -223,8 +223,20 @@ EXPORT_SYMBOL(blk_verify_command);
24925 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
24926 struct sg_io_hdr *hdr, fmode_t mode)
24927 {
24928 - if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
24929 + unsigned char tmpcmd[sizeof(rq->__cmd)];
24930 + unsigned char *cmdptr;
24931 +
24932 + if (rq->cmd != rq->__cmd)
24933 + cmdptr = rq->cmd;
24934 + else
24935 + cmdptr = tmpcmd;
24936 +
24937 + if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
24938 return -EFAULT;
24939 +
24940 + if (cmdptr != rq->cmd)
24941 + memcpy(rq->cmd, cmdptr, hdr->cmd_len);
24942 +
24943 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
24944 return -EPERM;
24945
24946 @@ -433,6 +445,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
24947 int err;
24948 unsigned int in_len, out_len, bytes, opcode, cmdlen;
24949 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
24950 + unsigned char tmpcmd[sizeof(rq->__cmd)];
24951 + unsigned char *cmdptr;
24952
24953 if (!sic)
24954 return -EINVAL;
24955 @@ -466,9 +480,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
24956 */
24957 err = -EFAULT;
24958 rq->cmd_len = cmdlen;
24959 - if (copy_from_user(rq->cmd, sic->data, cmdlen))
24960 +
24961 + if (rq->cmd != rq->__cmd)
24962 + cmdptr = rq->cmd;
24963 + else
24964 + cmdptr = tmpcmd;
24965 +
24966 + if (copy_from_user(cmdptr, sic->data, cmdlen))
24967 goto error;
24968
24969 + if (rq->cmd != cmdptr)
24970 + memcpy(rq->cmd, cmdptr, cmdlen);
24971 +
24972 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
24973 goto error;
24974
24975 diff --git a/crypto/cryptd.c b/crypto/cryptd.c
24976 index 671d4d6..5f24030 100644
24977 --- a/crypto/cryptd.c
24978 +++ b/crypto/cryptd.c
24979 @@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
24980
24981 struct cryptd_blkcipher_request_ctx {
24982 crypto_completion_t complete;
24983 -};
24984 +} __no_const;
24985
24986 struct cryptd_hash_ctx {
24987 struct crypto_shash *child;
24988 @@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
24989
24990 struct cryptd_aead_request_ctx {
24991 crypto_completion_t complete;
24992 -};
24993 +} __no_const;
24994
24995 static void cryptd_queue_worker(struct work_struct *work);
24996
24997 diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c
24998 index 9ed9f60..88f160b 100644
24999 --- a/crypto/sha512_generic.c
25000 +++ b/crypto/sha512_generic.c
25001 @@ -21,8 +21,6 @@
25002 #include <linux/percpu.h>
25003 #include <asm/byteorder.h>
25004
25005 -static DEFINE_PER_CPU(u64[80], msg_schedule);
25006 -
25007 static inline u64 Ch(u64 x, u64 y, u64 z)
25008 {
25009 return z ^ (x & (y ^ z));
25010 @@ -80,7 +78,7 @@ static inline void LOAD_OP(int I, u64 *W, const u8 *input)
25011
25012 static inline void BLEND_OP(int I, u64 *W)
25013 {
25014 - W[I] = s1(W[I-2]) + W[I-7] + s0(W[I-15]) + W[I-16];
25015 + W[I % 16] += s1(W[(I-2) % 16]) + W[(I-7) % 16] + s0(W[(I-15) % 16]);
25016 }
25017
25018 static void
25019 @@ -89,38 +87,48 @@ sha512_transform(u64 *state, const u8 *input)
25020 u64 a, b, c, d, e, f, g, h, t1, t2;
25021
25022 int i;
25023 - u64 *W = get_cpu_var(msg_schedule);
25024 + u64 W[16];
25025
25026 /* load the input */
25027 for (i = 0; i < 16; i++)
25028 LOAD_OP(i, W, input);
25029
25030 - for (i = 16; i < 80; i++) {
25031 - BLEND_OP(i, W);
25032 - }
25033 -
25034 /* load the state into our registers */
25035 a=state[0]; b=state[1]; c=state[2]; d=state[3];
25036 e=state[4]; f=state[5]; g=state[6]; h=state[7];
25037
25038 - /* now iterate */
25039 - for (i=0; i<80; i+=8) {
25040 - t1 = h + e1(e) + Ch(e,f,g) + sha512_K[i ] + W[i ];
25041 - t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2;
25042 - t1 = g + e1(d) + Ch(d,e,f) + sha512_K[i+1] + W[i+1];
25043 - t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2;
25044 - t1 = f + e1(c) + Ch(c,d,e) + sha512_K[i+2] + W[i+2];
25045 - t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2;
25046 - t1 = e + e1(b) + Ch(b,c,d) + sha512_K[i+3] + W[i+3];
25047 - t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2;
25048 - t1 = d + e1(a) + Ch(a,b,c) + sha512_K[i+4] + W[i+4];
25049 - t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2;
25050 - t1 = c + e1(h) + Ch(h,a,b) + sha512_K[i+5] + W[i+5];
25051 - t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2;
25052 - t1 = b + e1(g) + Ch(g,h,a) + sha512_K[i+6] + W[i+6];
25053 - t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2;
25054 - t1 = a + e1(f) + Ch(f,g,h) + sha512_K[i+7] + W[i+7];
25055 - t2 = e0(b) + Maj(b,c,d); e+=t1; a=t1+t2;
25056 +#define SHA512_0_15(i, a, b, c, d, e, f, g, h) \
25057 + t1 = h + e1(e) + Ch(e, f, g) + sha512_K[i] + W[i]; \
25058 + t2 = e0(a) + Maj(a, b, c); \
25059 + d += t1; \
25060 + h = t1 + t2
25061 +
25062 +#define SHA512_16_79(i, a, b, c, d, e, f, g, h) \
25063 + BLEND_OP(i, W); \
25064 + t1 = h + e1(e) + Ch(e, f, g) + sha512_K[i] + W[(i)%16]; \
25065 + t2 = e0(a) + Maj(a, b, c); \
25066 + d += t1; \
25067 + h = t1 + t2
25068 +
25069 + for (i = 0; i < 16; i += 8) {
25070 + SHA512_0_15(i, a, b, c, d, e, f, g, h);
25071 + SHA512_0_15(i + 1, h, a, b, c, d, e, f, g);
25072 + SHA512_0_15(i + 2, g, h, a, b, c, d, e, f);
25073 + SHA512_0_15(i + 3, f, g, h, a, b, c, d, e);
25074 + SHA512_0_15(i + 4, e, f, g, h, a, b, c, d);
25075 + SHA512_0_15(i + 5, d, e, f, g, h, a, b, c);
25076 + SHA512_0_15(i + 6, c, d, e, f, g, h, a, b);
25077 + SHA512_0_15(i + 7, b, c, d, e, f, g, h, a);
25078 + }
25079 + for (i = 16; i < 80; i += 8) {
25080 + SHA512_16_79(i, a, b, c, d, e, f, g, h);
25081 + SHA512_16_79(i + 1, h, a, b, c, d, e, f, g);
25082 + SHA512_16_79(i + 2, g, h, a, b, c, d, e, f);
25083 + SHA512_16_79(i + 3, f, g, h, a, b, c, d, e);
25084 + SHA512_16_79(i + 4, e, f, g, h, a, b, c, d);
25085 + SHA512_16_79(i + 5, d, e, f, g, h, a, b, c);
25086 + SHA512_16_79(i + 6, c, d, e, f, g, h, a, b);
25087 + SHA512_16_79(i + 7, b, c, d, e, f, g, h, a);
25088 }
25089
25090 state[0] += a; state[1] += b; state[2] += c; state[3] += d;
25091 @@ -128,8 +136,6 @@ sha512_transform(u64 *state, const u8 *input)
25092
25093 /* erase our data */
25094 a = b = c = d = e = f = g = h = t1 = t2 = 0;
25095 - memset(W, 0, sizeof(__get_cpu_var(msg_schedule)));
25096 - put_cpu_var(msg_schedule);
25097 }
25098
25099 static int
25100 diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
25101 index 5d41894..22021e4 100644
25102 --- a/drivers/acpi/apei/cper.c
25103 +++ b/drivers/acpi/apei/cper.c
25104 @@ -38,12 +38,12 @@
25105 */
25106 u64 cper_next_record_id(void)
25107 {
25108 - static atomic64_t seq;
25109 + static atomic64_unchecked_t seq;
25110
25111 - if (!atomic64_read(&seq))
25112 - atomic64_set(&seq, ((u64)get_seconds()) << 32);
25113 + if (!atomic64_read_unchecked(&seq))
25114 + atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
25115
25116 - return atomic64_inc_return(&seq);
25117 + return atomic64_inc_return_unchecked(&seq);
25118 }
25119 EXPORT_SYMBOL_GPL(cper_next_record_id);
25120
25121 diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
25122 index 6c47ae9..8ab9132 100644
25123 --- a/drivers/acpi/ec_sys.c
25124 +++ b/drivers/acpi/ec_sys.c
25125 @@ -12,6 +12,7 @@
25126 #include <linux/acpi.h>
25127 #include <linux/debugfs.h>
25128 #include <linux/module.h>
25129 +#include <asm/uaccess.h>
25130 #include "internal.h"
25131
25132 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
25133 @@ -40,7 +41,7 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
25134 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
25135 */
25136 unsigned int size = EC_SPACE_SIZE;
25137 - u8 *data = (u8 *) buf;
25138 + u8 data;
25139 loff_t init_off = *off;
25140 int err = 0;
25141
25142 @@ -53,9 +54,11 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
25143 size = count;
25144
25145 while (size) {
25146 - err = ec_read(*off, &data[*off - init_off]);
25147 + err = ec_read(*off, &data);
25148 if (err)
25149 return err;
25150 + if (put_user(data, &buf[*off - init_off]))
25151 + return -EFAULT;
25152 *off += 1;
25153 size--;
25154 }
25155 @@ -71,7 +74,6 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
25156
25157 unsigned int size = count;
25158 loff_t init_off = *off;
25159 - u8 *data = (u8 *) buf;
25160 int err = 0;
25161
25162 if (*off >= EC_SPACE_SIZE)
25163 @@ -82,7 +84,9 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
25164 }
25165
25166 while (size) {
25167 - u8 byte_write = data[*off - init_off];
25168 + u8 byte_write;
25169 + if (get_user(byte_write, &buf[*off - init_off]))
25170 + return -EFAULT;
25171 err = ec_write(*off, byte_write);
25172 if (err)
25173 return err;
25174 diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
25175 index 251c7b62..000462d 100644
25176 --- a/drivers/acpi/proc.c
25177 +++ b/drivers/acpi/proc.c
25178 @@ -343,19 +343,13 @@ acpi_system_write_wakeup_device(struct file *file,
25179 size_t count, loff_t * ppos)
25180 {
25181 struct list_head *node, *next;
25182 - char strbuf[5];
25183 - char str[5] = "";
25184 - unsigned int len = count;
25185 + char strbuf[5] = {0};
25186
25187 - if (len > 4)
25188 - len = 4;
25189 - if (len < 0)
25190 + if (count > 4)
25191 + count = 4;
25192 + if (copy_from_user(strbuf, buffer, count))
25193 return -EFAULT;
25194 -
25195 - if (copy_from_user(strbuf, buffer, len))
25196 - return -EFAULT;
25197 - strbuf[len] = '\0';
25198 - sscanf(strbuf, "%s", str);
25199 + strbuf[count] = '\0';
25200
25201 mutex_lock(&acpi_device_lock);
25202 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
25203 @@ -364,7 +358,7 @@ acpi_system_write_wakeup_device(struct file *file,
25204 if (!dev->wakeup.flags.valid)
25205 continue;
25206
25207 - if (!strncmp(dev->pnp.bus_id, str, 4)) {
25208 + if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
25209 if (device_can_wakeup(&dev->dev)) {
25210 bool enable = !device_may_wakeup(&dev->dev);
25211 device_set_wakeup_enable(&dev->dev, enable);
25212 diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
25213 index 9d7bc9f..a6fc091 100644
25214 --- a/drivers/acpi/processor_driver.c
25215 +++ b/drivers/acpi/processor_driver.c
25216 @@ -473,7 +473,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
25217 return 0;
25218 #endif
25219
25220 - BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
25221 + BUG_ON(pr->id >= nr_cpu_ids);
25222
25223 /*
25224 * Buggy BIOS check
25225 diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
25226 index c04ad68..0b99473 100644
25227 --- a/drivers/ata/libata-core.c
25228 +++ b/drivers/ata/libata-core.c
25229 @@ -4733,7 +4733,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
25230 struct ata_port *ap;
25231 unsigned int tag;
25232
25233 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
25234 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
25235 ap = qc->ap;
25236
25237 qc->flags = 0;
25238 @@ -4749,7 +4749,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
25239 struct ata_port *ap;
25240 struct ata_link *link;
25241
25242 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
25243 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
25244 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
25245 ap = qc->ap;
25246 link = qc->dev->link;
25247 @@ -5754,6 +5754,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
25248 return;
25249
25250 spin_lock(&lock);
25251 + pax_open_kernel();
25252
25253 for (cur = ops->inherits; cur; cur = cur->inherits) {
25254 void **inherit = (void **)cur;
25255 @@ -5767,8 +5768,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
25256 if (IS_ERR(*pp))
25257 *pp = NULL;
25258
25259 - ops->inherits = NULL;
25260 + *(struct ata_port_operations **)&ops->inherits = NULL;
25261
25262 + pax_close_kernel();
25263 spin_unlock(&lock);
25264 }
25265
25266 diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
25267 index e8574bb..f9f6a72 100644
25268 --- a/drivers/ata/pata_arasan_cf.c
25269 +++ b/drivers/ata/pata_arasan_cf.c
25270 @@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(struct platform_device *pdev)
25271 /* Handle platform specific quirks */
25272 if (pdata->quirk) {
25273 if (pdata->quirk & CF_BROKEN_PIO) {
25274 - ap->ops->set_piomode = NULL;
25275 + pax_open_kernel();
25276 + *(void **)&ap->ops->set_piomode = NULL;
25277 + pax_close_kernel();
25278 ap->pio_mask = 0;
25279 }
25280 if (pdata->quirk & CF_BROKEN_MWDMA)
25281 diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
25282 index f9b983a..887b9d8 100644
25283 --- a/drivers/atm/adummy.c
25284 +++ b/drivers/atm/adummy.c
25285 @@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
25286 vcc->pop(vcc, skb);
25287 else
25288 dev_kfree_skb_any(skb);
25289 - atomic_inc(&vcc->stats->tx);
25290 + atomic_inc_unchecked(&vcc->stats->tx);
25291
25292 return 0;
25293 }
25294 diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
25295 index f8f41e0..1f987dd 100644
25296 --- a/drivers/atm/ambassador.c
25297 +++ b/drivers/atm/ambassador.c
25298 @@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
25299 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
25300
25301 // VC layer stats
25302 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25303 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25304
25305 // free the descriptor
25306 kfree (tx_descr);
25307 @@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
25308 dump_skb ("<<<", vc, skb);
25309
25310 // VC layer stats
25311 - atomic_inc(&atm_vcc->stats->rx);
25312 + atomic_inc_unchecked(&atm_vcc->stats->rx);
25313 __net_timestamp(skb);
25314 // end of our responsibility
25315 atm_vcc->push (atm_vcc, skb);
25316 @@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
25317 } else {
25318 PRINTK (KERN_INFO, "dropped over-size frame");
25319 // should we count this?
25320 - atomic_inc(&atm_vcc->stats->rx_drop);
25321 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25322 }
25323
25324 } else {
25325 @@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
25326 }
25327
25328 if (check_area (skb->data, skb->len)) {
25329 - atomic_inc(&atm_vcc->stats->tx_err);
25330 + atomic_inc_unchecked(&atm_vcc->stats->tx_err);
25331 return -ENOMEM; // ?
25332 }
25333
25334 diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
25335 index b22d71c..d6e1049 100644
25336 --- a/drivers/atm/atmtcp.c
25337 +++ b/drivers/atm/atmtcp.c
25338 @@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
25339 if (vcc->pop) vcc->pop(vcc,skb);
25340 else dev_kfree_skb(skb);
25341 if (dev_data) return 0;
25342 - atomic_inc(&vcc->stats->tx_err);
25343 + atomic_inc_unchecked(&vcc->stats->tx_err);
25344 return -ENOLINK;
25345 }
25346 size = skb->len+sizeof(struct atmtcp_hdr);
25347 @@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
25348 if (!new_skb) {
25349 if (vcc->pop) vcc->pop(vcc,skb);
25350 else dev_kfree_skb(skb);
25351 - atomic_inc(&vcc->stats->tx_err);
25352 + atomic_inc_unchecked(&vcc->stats->tx_err);
25353 return -ENOBUFS;
25354 }
25355 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
25356 @@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
25357 if (vcc->pop) vcc->pop(vcc,skb);
25358 else dev_kfree_skb(skb);
25359 out_vcc->push(out_vcc,new_skb);
25360 - atomic_inc(&vcc->stats->tx);
25361 - atomic_inc(&out_vcc->stats->rx);
25362 + atomic_inc_unchecked(&vcc->stats->tx);
25363 + atomic_inc_unchecked(&out_vcc->stats->rx);
25364 return 0;
25365 }
25366
25367 @@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
25368 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
25369 read_unlock(&vcc_sklist_lock);
25370 if (!out_vcc) {
25371 - atomic_inc(&vcc->stats->tx_err);
25372 + atomic_inc_unchecked(&vcc->stats->tx_err);
25373 goto done;
25374 }
25375 skb_pull(skb,sizeof(struct atmtcp_hdr));
25376 @@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
25377 __net_timestamp(new_skb);
25378 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
25379 out_vcc->push(out_vcc,new_skb);
25380 - atomic_inc(&vcc->stats->tx);
25381 - atomic_inc(&out_vcc->stats->rx);
25382 + atomic_inc_unchecked(&vcc->stats->tx);
25383 + atomic_inc_unchecked(&out_vcc->stats->rx);
25384 done:
25385 if (vcc->pop) vcc->pop(vcc,skb);
25386 else dev_kfree_skb(skb);
25387 diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
25388 index 956e9ac..133516d 100644
25389 --- a/drivers/atm/eni.c
25390 +++ b/drivers/atm/eni.c
25391 @@ -526,7 +526,7 @@ static int rx_aal0(struct atm_vcc *vcc)
25392 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
25393 vcc->dev->number);
25394 length = 0;
25395 - atomic_inc(&vcc->stats->rx_err);
25396 + atomic_inc_unchecked(&vcc->stats->rx_err);
25397 }
25398 else {
25399 length = ATM_CELL_SIZE-1; /* no HEC */
25400 @@ -581,7 +581,7 @@ static int rx_aal5(struct atm_vcc *vcc)
25401 size);
25402 }
25403 eff = length = 0;
25404 - atomic_inc(&vcc->stats->rx_err);
25405 + atomic_inc_unchecked(&vcc->stats->rx_err);
25406 }
25407 else {
25408 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
25409 @@ -598,7 +598,7 @@ static int rx_aal5(struct atm_vcc *vcc)
25410 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
25411 vcc->dev->number,vcc->vci,length,size << 2,descr);
25412 length = eff = 0;
25413 - atomic_inc(&vcc->stats->rx_err);
25414 + atomic_inc_unchecked(&vcc->stats->rx_err);
25415 }
25416 }
25417 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
25418 @@ -771,7 +771,7 @@ rx_dequeued++;
25419 vcc->push(vcc,skb);
25420 pushed++;
25421 }
25422 - atomic_inc(&vcc->stats->rx);
25423 + atomic_inc_unchecked(&vcc->stats->rx);
25424 }
25425 wake_up(&eni_dev->rx_wait);
25426 }
25427 @@ -1229,7 +1229,7 @@ static void dequeue_tx(struct atm_dev *dev)
25428 PCI_DMA_TODEVICE);
25429 if (vcc->pop) vcc->pop(vcc,skb);
25430 else dev_kfree_skb_irq(skb);
25431 - atomic_inc(&vcc->stats->tx);
25432 + atomic_inc_unchecked(&vcc->stats->tx);
25433 wake_up(&eni_dev->tx_wait);
25434 dma_complete++;
25435 }
25436 @@ -1569,7 +1569,7 @@ tx_complete++;
25437 /*--------------------------------- entries ---------------------------------*/
25438
25439
25440 -static const char *media_name[] __devinitdata = {
25441 +static const char *media_name[] __devinitconst = {
25442 "MMF", "SMF", "MMF", "03?", /* 0- 3 */
25443 "UTP", "05?", "06?", "07?", /* 4- 7 */
25444 "TAXI","09?", "10?", "11?", /* 8-11 */
25445 diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
25446 index 5072f8a..fa52520d 100644
25447 --- a/drivers/atm/firestream.c
25448 +++ b/drivers/atm/firestream.c
25449 @@ -750,7 +750,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
25450 }
25451 }
25452
25453 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25454 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25455
25456 fs_dprintk (FS_DEBUG_TXMEM, "i");
25457 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
25458 @@ -817,7 +817,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
25459 #endif
25460 skb_put (skb, qe->p1 & 0xffff);
25461 ATM_SKB(skb)->vcc = atm_vcc;
25462 - atomic_inc(&atm_vcc->stats->rx);
25463 + atomic_inc_unchecked(&atm_vcc->stats->rx);
25464 __net_timestamp(skb);
25465 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
25466 atm_vcc->push (atm_vcc, skb);
25467 @@ -838,12 +838,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
25468 kfree (pe);
25469 }
25470 if (atm_vcc)
25471 - atomic_inc(&atm_vcc->stats->rx_drop);
25472 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25473 break;
25474 case 0x1f: /* Reassembly abort: no buffers. */
25475 /* Silently increment error counter. */
25476 if (atm_vcc)
25477 - atomic_inc(&atm_vcc->stats->rx_drop);
25478 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25479 break;
25480 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
25481 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
25482 diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
25483 index 361f5ae..7fc552d 100644
25484 --- a/drivers/atm/fore200e.c
25485 +++ b/drivers/atm/fore200e.c
25486 @@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
25487 #endif
25488 /* check error condition */
25489 if (*entry->status & STATUS_ERROR)
25490 - atomic_inc(&vcc->stats->tx_err);
25491 + atomic_inc_unchecked(&vcc->stats->tx_err);
25492 else
25493 - atomic_inc(&vcc->stats->tx);
25494 + atomic_inc_unchecked(&vcc->stats->tx);
25495 }
25496 }
25497
25498 @@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
25499 if (skb == NULL) {
25500 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
25501
25502 - atomic_inc(&vcc->stats->rx_drop);
25503 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25504 return -ENOMEM;
25505 }
25506
25507 @@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
25508
25509 dev_kfree_skb_any(skb);
25510
25511 - atomic_inc(&vcc->stats->rx_drop);
25512 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25513 return -ENOMEM;
25514 }
25515
25516 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
25517
25518 vcc->push(vcc, skb);
25519 - atomic_inc(&vcc->stats->rx);
25520 + atomic_inc_unchecked(&vcc->stats->rx);
25521
25522 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
25523
25524 @@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
25525 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
25526 fore200e->atm_dev->number,
25527 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
25528 - atomic_inc(&vcc->stats->rx_err);
25529 + atomic_inc_unchecked(&vcc->stats->rx_err);
25530 }
25531 }
25532
25533 @@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
25534 goto retry_here;
25535 }
25536
25537 - atomic_inc(&vcc->stats->tx_err);
25538 + atomic_inc_unchecked(&vcc->stats->tx_err);
25539
25540 fore200e->tx_sat++;
25541 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
25542 diff --git a/drivers/atm/he.c b/drivers/atm/he.c
25543 index 9a51df4..f3bb5f8 100644
25544 --- a/drivers/atm/he.c
25545 +++ b/drivers/atm/he.c
25546 @@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
25547
25548 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
25549 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
25550 - atomic_inc(&vcc->stats->rx_drop);
25551 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25552 goto return_host_buffers;
25553 }
25554
25555 @@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
25556 RBRQ_LEN_ERR(he_dev->rbrq_head)
25557 ? "LEN_ERR" : "",
25558 vcc->vpi, vcc->vci);
25559 - atomic_inc(&vcc->stats->rx_err);
25560 + atomic_inc_unchecked(&vcc->stats->rx_err);
25561 goto return_host_buffers;
25562 }
25563
25564 @@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
25565 vcc->push(vcc, skb);
25566 spin_lock(&he_dev->global_lock);
25567
25568 - atomic_inc(&vcc->stats->rx);
25569 + atomic_inc_unchecked(&vcc->stats->rx);
25570
25571 return_host_buffers:
25572 ++pdus_assembled;
25573 @@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
25574 tpd->vcc->pop(tpd->vcc, tpd->skb);
25575 else
25576 dev_kfree_skb_any(tpd->skb);
25577 - atomic_inc(&tpd->vcc->stats->tx_err);
25578 + atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
25579 }
25580 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
25581 return;
25582 @@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25583 vcc->pop(vcc, skb);
25584 else
25585 dev_kfree_skb_any(skb);
25586 - atomic_inc(&vcc->stats->tx_err);
25587 + atomic_inc_unchecked(&vcc->stats->tx_err);
25588 return -EINVAL;
25589 }
25590
25591 @@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25592 vcc->pop(vcc, skb);
25593 else
25594 dev_kfree_skb_any(skb);
25595 - atomic_inc(&vcc->stats->tx_err);
25596 + atomic_inc_unchecked(&vcc->stats->tx_err);
25597 return -EINVAL;
25598 }
25599 #endif
25600 @@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25601 vcc->pop(vcc, skb);
25602 else
25603 dev_kfree_skb_any(skb);
25604 - atomic_inc(&vcc->stats->tx_err);
25605 + atomic_inc_unchecked(&vcc->stats->tx_err);
25606 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25607 return -ENOMEM;
25608 }
25609 @@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25610 vcc->pop(vcc, skb);
25611 else
25612 dev_kfree_skb_any(skb);
25613 - atomic_inc(&vcc->stats->tx_err);
25614 + atomic_inc_unchecked(&vcc->stats->tx_err);
25615 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25616 return -ENOMEM;
25617 }
25618 @@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25619 __enqueue_tpd(he_dev, tpd, cid);
25620 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25621
25622 - atomic_inc(&vcc->stats->tx);
25623 + atomic_inc_unchecked(&vcc->stats->tx);
25624
25625 return 0;
25626 }
25627 diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
25628 index b812103..e391a49 100644
25629 --- a/drivers/atm/horizon.c
25630 +++ b/drivers/atm/horizon.c
25631 @@ -1035,7 +1035,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
25632 {
25633 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
25634 // VC layer stats
25635 - atomic_inc(&vcc->stats->rx);
25636 + atomic_inc_unchecked(&vcc->stats->rx);
25637 __net_timestamp(skb);
25638 // end of our responsibility
25639 vcc->push (vcc, skb);
25640 @@ -1187,7 +1187,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
25641 dev->tx_iovec = NULL;
25642
25643 // VC layer stats
25644 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25645 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25646
25647 // free the skb
25648 hrz_kfree_skb (skb);
25649 diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
25650 index 1c05212..c28e200 100644
25651 --- a/drivers/atm/idt77252.c
25652 +++ b/drivers/atm/idt77252.c
25653 @@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
25654 else
25655 dev_kfree_skb(skb);
25656
25657 - atomic_inc(&vcc->stats->tx);
25658 + atomic_inc_unchecked(&vcc->stats->tx);
25659 }
25660
25661 atomic_dec(&scq->used);
25662 @@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25663 if ((sb = dev_alloc_skb(64)) == NULL) {
25664 printk("%s: Can't allocate buffers for aal0.\n",
25665 card->name);
25666 - atomic_add(i, &vcc->stats->rx_drop);
25667 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
25668 break;
25669 }
25670 if (!atm_charge(vcc, sb->truesize)) {
25671 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
25672 card->name);
25673 - atomic_add(i - 1, &vcc->stats->rx_drop);
25674 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
25675 dev_kfree_skb(sb);
25676 break;
25677 }
25678 @@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25679 ATM_SKB(sb)->vcc = vcc;
25680 __net_timestamp(sb);
25681 vcc->push(vcc, sb);
25682 - atomic_inc(&vcc->stats->rx);
25683 + atomic_inc_unchecked(&vcc->stats->rx);
25684
25685 cell += ATM_CELL_PAYLOAD;
25686 }
25687 @@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25688 "(CDC: %08x)\n",
25689 card->name, len, rpp->len, readl(SAR_REG_CDC));
25690 recycle_rx_pool_skb(card, rpp);
25691 - atomic_inc(&vcc->stats->rx_err);
25692 + atomic_inc_unchecked(&vcc->stats->rx_err);
25693 return;
25694 }
25695 if (stat & SAR_RSQE_CRC) {
25696 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
25697 recycle_rx_pool_skb(card, rpp);
25698 - atomic_inc(&vcc->stats->rx_err);
25699 + atomic_inc_unchecked(&vcc->stats->rx_err);
25700 return;
25701 }
25702 if (skb_queue_len(&rpp->queue) > 1) {
25703 @@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25704 RXPRINTK("%s: Can't alloc RX skb.\n",
25705 card->name);
25706 recycle_rx_pool_skb(card, rpp);
25707 - atomic_inc(&vcc->stats->rx_err);
25708 + atomic_inc_unchecked(&vcc->stats->rx_err);
25709 return;
25710 }
25711 if (!atm_charge(vcc, skb->truesize)) {
25712 @@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25713 __net_timestamp(skb);
25714
25715 vcc->push(vcc, skb);
25716 - atomic_inc(&vcc->stats->rx);
25717 + atomic_inc_unchecked(&vcc->stats->rx);
25718
25719 return;
25720 }
25721 @@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25722 __net_timestamp(skb);
25723
25724 vcc->push(vcc, skb);
25725 - atomic_inc(&vcc->stats->rx);
25726 + atomic_inc_unchecked(&vcc->stats->rx);
25727
25728 if (skb->truesize > SAR_FB_SIZE_3)
25729 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
25730 @@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
25731 if (vcc->qos.aal != ATM_AAL0) {
25732 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
25733 card->name, vpi, vci);
25734 - atomic_inc(&vcc->stats->rx_drop);
25735 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25736 goto drop;
25737 }
25738
25739 if ((sb = dev_alloc_skb(64)) == NULL) {
25740 printk("%s: Can't allocate buffers for AAL0.\n",
25741 card->name);
25742 - atomic_inc(&vcc->stats->rx_err);
25743 + atomic_inc_unchecked(&vcc->stats->rx_err);
25744 goto drop;
25745 }
25746
25747 @@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
25748 ATM_SKB(sb)->vcc = vcc;
25749 __net_timestamp(sb);
25750 vcc->push(vcc, sb);
25751 - atomic_inc(&vcc->stats->rx);
25752 + atomic_inc_unchecked(&vcc->stats->rx);
25753
25754 drop:
25755 skb_pull(queue, 64);
25756 @@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
25757
25758 if (vc == NULL) {
25759 printk("%s: NULL connection in send().\n", card->name);
25760 - atomic_inc(&vcc->stats->tx_err);
25761 + atomic_inc_unchecked(&vcc->stats->tx_err);
25762 dev_kfree_skb(skb);
25763 return -EINVAL;
25764 }
25765 if (!test_bit(VCF_TX, &vc->flags)) {
25766 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
25767 - atomic_inc(&vcc->stats->tx_err);
25768 + atomic_inc_unchecked(&vcc->stats->tx_err);
25769 dev_kfree_skb(skb);
25770 return -EINVAL;
25771 }
25772 @@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
25773 break;
25774 default:
25775 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
25776 - atomic_inc(&vcc->stats->tx_err);
25777 + atomic_inc_unchecked(&vcc->stats->tx_err);
25778 dev_kfree_skb(skb);
25779 return -EINVAL;
25780 }
25781
25782 if (skb_shinfo(skb)->nr_frags != 0) {
25783 printk("%s: No scatter-gather yet.\n", card->name);
25784 - atomic_inc(&vcc->stats->tx_err);
25785 + atomic_inc_unchecked(&vcc->stats->tx_err);
25786 dev_kfree_skb(skb);
25787 return -EINVAL;
25788 }
25789 @@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
25790
25791 err = queue_skb(card, vc, skb, oam);
25792 if (err) {
25793 - atomic_inc(&vcc->stats->tx_err);
25794 + atomic_inc_unchecked(&vcc->stats->tx_err);
25795 dev_kfree_skb(skb);
25796 return err;
25797 }
25798 @@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
25799 skb = dev_alloc_skb(64);
25800 if (!skb) {
25801 printk("%s: Out of memory in send_oam().\n", card->name);
25802 - atomic_inc(&vcc->stats->tx_err);
25803 + atomic_inc_unchecked(&vcc->stats->tx_err);
25804 return -ENOMEM;
25805 }
25806 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
25807 diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
25808 index 3d0c2b0..45441fa 100644
25809 --- a/drivers/atm/iphase.c
25810 +++ b/drivers/atm/iphase.c
25811 @@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
25812 status = (u_short) (buf_desc_ptr->desc_mode);
25813 if (status & (RX_CER | RX_PTE | RX_OFL))
25814 {
25815 - atomic_inc(&vcc->stats->rx_err);
25816 + atomic_inc_unchecked(&vcc->stats->rx_err);
25817 IF_ERR(printk("IA: bad packet, dropping it");)
25818 if (status & RX_CER) {
25819 IF_ERR(printk(" cause: packet CRC error\n");)
25820 @@ -1169,7 +1169,7 @@ static int rx_pkt(struct atm_dev *dev)
25821 len = dma_addr - buf_addr;
25822 if (len > iadev->rx_buf_sz) {
25823 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
25824 - atomic_inc(&vcc->stats->rx_err);
25825 + atomic_inc_unchecked(&vcc->stats->rx_err);
25826 goto out_free_desc;
25827 }
25828
25829 @@ -1319,7 +1319,7 @@ static void rx_dle_intr(struct atm_dev *dev)
25830 ia_vcc = INPH_IA_VCC(vcc);
25831 if (ia_vcc == NULL)
25832 {
25833 - atomic_inc(&vcc->stats->rx_err);
25834 + atomic_inc_unchecked(&vcc->stats->rx_err);
25835 dev_kfree_skb_any(skb);
25836 atm_return(vcc, atm_guess_pdu2truesize(len));
25837 goto INCR_DLE;
25838 @@ -1331,7 +1331,7 @@ static void rx_dle_intr(struct atm_dev *dev)
25839 if ((length > iadev->rx_buf_sz) || (length >
25840 (skb->len - sizeof(struct cpcs_trailer))))
25841 {
25842 - atomic_inc(&vcc->stats->rx_err);
25843 + atomic_inc_unchecked(&vcc->stats->rx_err);
25844 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
25845 length, skb->len);)
25846 dev_kfree_skb_any(skb);
25847 @@ -1347,7 +1347,7 @@ static void rx_dle_intr(struct atm_dev *dev)
25848
25849 IF_RX(printk("rx_dle_intr: skb push");)
25850 vcc->push(vcc,skb);
25851 - atomic_inc(&vcc->stats->rx);
25852 + atomic_inc_unchecked(&vcc->stats->rx);
25853 iadev->rx_pkt_cnt++;
25854 }
25855 INCR_DLE:
25856 @@ -2827,15 +2827,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
25857 {
25858 struct k_sonet_stats *stats;
25859 stats = &PRIV(_ia_dev[board])->sonet_stats;
25860 - printk("section_bip: %d\n", atomic_read(&stats->section_bip));
25861 - printk("line_bip : %d\n", atomic_read(&stats->line_bip));
25862 - printk("path_bip : %d\n", atomic_read(&stats->path_bip));
25863 - printk("line_febe : %d\n", atomic_read(&stats->line_febe));
25864 - printk("path_febe : %d\n", atomic_read(&stats->path_febe));
25865 - printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
25866 - printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
25867 - printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
25868 - printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
25869 + printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
25870 + printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
25871 + printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
25872 + printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
25873 + printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
25874 + printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
25875 + printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
25876 + printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
25877 + printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
25878 }
25879 ia_cmds.status = 0;
25880 break;
25881 @@ -2940,7 +2940,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
25882 if ((desc == 0) || (desc > iadev->num_tx_desc))
25883 {
25884 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
25885 - atomic_inc(&vcc->stats->tx);
25886 + atomic_inc_unchecked(&vcc->stats->tx);
25887 if (vcc->pop)
25888 vcc->pop(vcc, skb);
25889 else
25890 @@ -3045,14 +3045,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
25891 ATM_DESC(skb) = vcc->vci;
25892 skb_queue_tail(&iadev->tx_dma_q, skb);
25893
25894 - atomic_inc(&vcc->stats->tx);
25895 + atomic_inc_unchecked(&vcc->stats->tx);
25896 iadev->tx_pkt_cnt++;
25897 /* Increment transaction counter */
25898 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
25899
25900 #if 0
25901 /* add flow control logic */
25902 - if (atomic_read(&vcc->stats->tx) % 20 == 0) {
25903 + if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
25904 if (iavcc->vc_desc_cnt > 10) {
25905 vcc->tx_quota = vcc->tx_quota * 3 / 4;
25906 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
25907 diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
25908 index f556969..0da15eb 100644
25909 --- a/drivers/atm/lanai.c
25910 +++ b/drivers/atm/lanai.c
25911 @@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
25912 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
25913 lanai_endtx(lanai, lvcc);
25914 lanai_free_skb(lvcc->tx.atmvcc, skb);
25915 - atomic_inc(&lvcc->tx.atmvcc->stats->tx);
25916 + atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
25917 }
25918
25919 /* Try to fill the buffer - don't call unless there is backlog */
25920 @@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
25921 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
25922 __net_timestamp(skb);
25923 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
25924 - atomic_inc(&lvcc->rx.atmvcc->stats->rx);
25925 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
25926 out:
25927 lvcc->rx.buf.ptr = end;
25928 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
25929 @@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
25930 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
25931 "vcc %d\n", lanai->number, (unsigned int) s, vci);
25932 lanai->stats.service_rxnotaal5++;
25933 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25934 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25935 return 0;
25936 }
25937 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
25938 @@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
25939 int bytes;
25940 read_unlock(&vcc_sklist_lock);
25941 DPRINTK("got trashed rx pdu on vci %d\n", vci);
25942 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25943 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25944 lvcc->stats.x.aal5.service_trash++;
25945 bytes = (SERVICE_GET_END(s) * 16) -
25946 (((unsigned long) lvcc->rx.buf.ptr) -
25947 @@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
25948 }
25949 if (s & SERVICE_STREAM) {
25950 read_unlock(&vcc_sklist_lock);
25951 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25952 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25953 lvcc->stats.x.aal5.service_stream++;
25954 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
25955 "PDU on VCI %d!\n", lanai->number, vci);
25956 @@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
25957 return 0;
25958 }
25959 DPRINTK("got rx crc error on vci %d\n", vci);
25960 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25961 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25962 lvcc->stats.x.aal5.service_rxcrc++;
25963 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
25964 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
25965 diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
25966 index 1c70c45..300718d 100644
25967 --- a/drivers/atm/nicstar.c
25968 +++ b/drivers/atm/nicstar.c
25969 @@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
25970 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
25971 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
25972 card->index);
25973 - atomic_inc(&vcc->stats->tx_err);
25974 + atomic_inc_unchecked(&vcc->stats->tx_err);
25975 dev_kfree_skb_any(skb);
25976 return -EINVAL;
25977 }
25978 @@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
25979 if (!vc->tx) {
25980 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
25981 card->index);
25982 - atomic_inc(&vcc->stats->tx_err);
25983 + atomic_inc_unchecked(&vcc->stats->tx_err);
25984 dev_kfree_skb_any(skb);
25985 return -EINVAL;
25986 }
25987 @@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
25988 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
25989 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
25990 card->index);
25991 - atomic_inc(&vcc->stats->tx_err);
25992 + atomic_inc_unchecked(&vcc->stats->tx_err);
25993 dev_kfree_skb_any(skb);
25994 return -EINVAL;
25995 }
25996
25997 if (skb_shinfo(skb)->nr_frags != 0) {
25998 printk("nicstar%d: No scatter-gather yet.\n", card->index);
25999 - atomic_inc(&vcc->stats->tx_err);
26000 + atomic_inc_unchecked(&vcc->stats->tx_err);
26001 dev_kfree_skb_any(skb);
26002 return -EINVAL;
26003 }
26004 @@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
26005 }
26006
26007 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
26008 - atomic_inc(&vcc->stats->tx_err);
26009 + atomic_inc_unchecked(&vcc->stats->tx_err);
26010 dev_kfree_skb_any(skb);
26011 return -EIO;
26012 }
26013 - atomic_inc(&vcc->stats->tx);
26014 + atomic_inc_unchecked(&vcc->stats->tx);
26015
26016 return 0;
26017 }
26018 @@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26019 printk
26020 ("nicstar%d: Can't allocate buffers for aal0.\n",
26021 card->index);
26022 - atomic_add(i, &vcc->stats->rx_drop);
26023 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
26024 break;
26025 }
26026 if (!atm_charge(vcc, sb->truesize)) {
26027 RXPRINTK
26028 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
26029 card->index);
26030 - atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
26031 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
26032 dev_kfree_skb_any(sb);
26033 break;
26034 }
26035 @@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26036 ATM_SKB(sb)->vcc = vcc;
26037 __net_timestamp(sb);
26038 vcc->push(vcc, sb);
26039 - atomic_inc(&vcc->stats->rx);
26040 + atomic_inc_unchecked(&vcc->stats->rx);
26041 cell += ATM_CELL_PAYLOAD;
26042 }
26043
26044 @@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26045 if (iovb == NULL) {
26046 printk("nicstar%d: Out of iovec buffers.\n",
26047 card->index);
26048 - atomic_inc(&vcc->stats->rx_drop);
26049 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26050 recycle_rx_buf(card, skb);
26051 return;
26052 }
26053 @@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26054 small or large buffer itself. */
26055 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
26056 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
26057 - atomic_inc(&vcc->stats->rx_err);
26058 + atomic_inc_unchecked(&vcc->stats->rx_err);
26059 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
26060 NS_MAX_IOVECS);
26061 NS_PRV_IOVCNT(iovb) = 0;
26062 @@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26063 ("nicstar%d: Expected a small buffer, and this is not one.\n",
26064 card->index);
26065 which_list(card, skb);
26066 - atomic_inc(&vcc->stats->rx_err);
26067 + atomic_inc_unchecked(&vcc->stats->rx_err);
26068 recycle_rx_buf(card, skb);
26069 vc->rx_iov = NULL;
26070 recycle_iov_buf(card, iovb);
26071 @@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26072 ("nicstar%d: Expected a large buffer, and this is not one.\n",
26073 card->index);
26074 which_list(card, skb);
26075 - atomic_inc(&vcc->stats->rx_err);
26076 + atomic_inc_unchecked(&vcc->stats->rx_err);
26077 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
26078 NS_PRV_IOVCNT(iovb));
26079 vc->rx_iov = NULL;
26080 @@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26081 printk(" - PDU size mismatch.\n");
26082 else
26083 printk(".\n");
26084 - atomic_inc(&vcc->stats->rx_err);
26085 + atomic_inc_unchecked(&vcc->stats->rx_err);
26086 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
26087 NS_PRV_IOVCNT(iovb));
26088 vc->rx_iov = NULL;
26089 @@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26090 /* skb points to a small buffer */
26091 if (!atm_charge(vcc, skb->truesize)) {
26092 push_rxbufs(card, skb);
26093 - atomic_inc(&vcc->stats->rx_drop);
26094 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26095 } else {
26096 skb_put(skb, len);
26097 dequeue_sm_buf(card, skb);
26098 @@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26099 ATM_SKB(skb)->vcc = vcc;
26100 __net_timestamp(skb);
26101 vcc->push(vcc, skb);
26102 - atomic_inc(&vcc->stats->rx);
26103 + atomic_inc_unchecked(&vcc->stats->rx);
26104 }
26105 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
26106 struct sk_buff *sb;
26107 @@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26108 if (len <= NS_SMBUFSIZE) {
26109 if (!atm_charge(vcc, sb->truesize)) {
26110 push_rxbufs(card, sb);
26111 - atomic_inc(&vcc->stats->rx_drop);
26112 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26113 } else {
26114 skb_put(sb, len);
26115 dequeue_sm_buf(card, sb);
26116 @@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26117 ATM_SKB(sb)->vcc = vcc;
26118 __net_timestamp(sb);
26119 vcc->push(vcc, sb);
26120 - atomic_inc(&vcc->stats->rx);
26121 + atomic_inc_unchecked(&vcc->stats->rx);
26122 }
26123
26124 push_rxbufs(card, skb);
26125 @@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26126
26127 if (!atm_charge(vcc, skb->truesize)) {
26128 push_rxbufs(card, skb);
26129 - atomic_inc(&vcc->stats->rx_drop);
26130 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26131 } else {
26132 dequeue_lg_buf(card, skb);
26133 #ifdef NS_USE_DESTRUCTORS
26134 @@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26135 ATM_SKB(skb)->vcc = vcc;
26136 __net_timestamp(skb);
26137 vcc->push(vcc, skb);
26138 - atomic_inc(&vcc->stats->rx);
26139 + atomic_inc_unchecked(&vcc->stats->rx);
26140 }
26141
26142 push_rxbufs(card, sb);
26143 @@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26144 printk
26145 ("nicstar%d: Out of huge buffers.\n",
26146 card->index);
26147 - atomic_inc(&vcc->stats->rx_drop);
26148 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26149 recycle_iovec_rx_bufs(card,
26150 (struct iovec *)
26151 iovb->data,
26152 @@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26153 card->hbpool.count++;
26154 } else
26155 dev_kfree_skb_any(hb);
26156 - atomic_inc(&vcc->stats->rx_drop);
26157 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26158 } else {
26159 /* Copy the small buffer to the huge buffer */
26160 sb = (struct sk_buff *)iov->iov_base;
26161 @@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26162 #endif /* NS_USE_DESTRUCTORS */
26163 __net_timestamp(hb);
26164 vcc->push(vcc, hb);
26165 - atomic_inc(&vcc->stats->rx);
26166 + atomic_inc_unchecked(&vcc->stats->rx);
26167 }
26168 }
26169
26170 diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
26171 index 5d1d076..12fbca4 100644
26172 --- a/drivers/atm/solos-pci.c
26173 +++ b/drivers/atm/solos-pci.c
26174 @@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
26175 }
26176 atm_charge(vcc, skb->truesize);
26177 vcc->push(vcc, skb);
26178 - atomic_inc(&vcc->stats->rx);
26179 + atomic_inc_unchecked(&vcc->stats->rx);
26180 break;
26181
26182 case PKT_STATUS:
26183 @@ -1008,7 +1008,7 @@ static uint32_t fpga_tx(struct solos_card *card)
26184 vcc = SKB_CB(oldskb)->vcc;
26185
26186 if (vcc) {
26187 - atomic_inc(&vcc->stats->tx);
26188 + atomic_inc_unchecked(&vcc->stats->tx);
26189 solos_pop(vcc, oldskb);
26190 } else
26191 dev_kfree_skb_irq(oldskb);
26192 diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
26193 index 90f1ccc..04c4a1e 100644
26194 --- a/drivers/atm/suni.c
26195 +++ b/drivers/atm/suni.c
26196 @@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(sunis_lock);
26197
26198
26199 #define ADD_LIMITED(s,v) \
26200 - atomic_add((v),&stats->s); \
26201 - if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
26202 + atomic_add_unchecked((v),&stats->s); \
26203 + if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
26204
26205
26206 static void suni_hz(unsigned long from_timer)
26207 diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
26208 index 5120a96..e2572bd 100644
26209 --- a/drivers/atm/uPD98402.c
26210 +++ b/drivers/atm/uPD98402.c
26211 @@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
26212 struct sonet_stats tmp;
26213 int error = 0;
26214
26215 - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
26216 + atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
26217 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
26218 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
26219 if (zero && !error) {
26220 @@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
26221
26222
26223 #define ADD_LIMITED(s,v) \
26224 - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
26225 - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
26226 - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
26227 + { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
26228 + if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
26229 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
26230
26231
26232 static void stat_event(struct atm_dev *dev)
26233 @@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
26234 if (reason & uPD98402_INT_PFM) stat_event(dev);
26235 if (reason & uPD98402_INT_PCO) {
26236 (void) GET(PCOCR); /* clear interrupt cause */
26237 - atomic_add(GET(HECCT),
26238 + atomic_add_unchecked(GET(HECCT),
26239 &PRIV(dev)->sonet_stats.uncorr_hcs);
26240 }
26241 if ((reason & uPD98402_INT_RFO) &&
26242 @@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
26243 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
26244 uPD98402_INT_LOS),PIMR); /* enable them */
26245 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
26246 - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
26247 - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
26248 - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
26249 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
26250 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
26251 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
26252 return 0;
26253 }
26254
26255 diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
26256 index d889f56..17eb71e 100644
26257 --- a/drivers/atm/zatm.c
26258 +++ b/drivers/atm/zatm.c
26259 @@ -460,7 +460,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
26260 }
26261 if (!size) {
26262 dev_kfree_skb_irq(skb);
26263 - if (vcc) atomic_inc(&vcc->stats->rx_err);
26264 + if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
26265 continue;
26266 }
26267 if (!atm_charge(vcc,skb->truesize)) {
26268 @@ -470,7 +470,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
26269 skb->len = size;
26270 ATM_SKB(skb)->vcc = vcc;
26271 vcc->push(vcc,skb);
26272 - atomic_inc(&vcc->stats->rx);
26273 + atomic_inc_unchecked(&vcc->stats->rx);
26274 }
26275 zout(pos & 0xffff,MTA(mbx));
26276 #if 0 /* probably a stupid idea */
26277 @@ -734,7 +734,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
26278 skb_queue_head(&zatm_vcc->backlog,skb);
26279 break;
26280 }
26281 - atomic_inc(&vcc->stats->tx);
26282 + atomic_inc_unchecked(&vcc->stats->tx);
26283 wake_up(&zatm_vcc->tx_wait);
26284 }
26285
26286 diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
26287 index a4760e0..51283cf 100644
26288 --- a/drivers/base/devtmpfs.c
26289 +++ b/drivers/base/devtmpfs.c
26290 @@ -368,7 +368,7 @@ int devtmpfs_mount(const char *mntdir)
26291 if (!thread)
26292 return 0;
26293
26294 - err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
26295 + err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
26296 if (err)
26297 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
26298 else
26299 diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
26300 index caf995f..6f76697 100644
26301 --- a/drivers/base/power/wakeup.c
26302 +++ b/drivers/base/power/wakeup.c
26303 @@ -30,14 +30,14 @@ bool events_check_enabled;
26304 * They need to be modified together atomically, so it's better to use one
26305 * atomic variable to hold them both.
26306 */
26307 -static atomic_t combined_event_count = ATOMIC_INIT(0);
26308 +static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
26309
26310 #define IN_PROGRESS_BITS (sizeof(int) * 4)
26311 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
26312
26313 static void split_counters(unsigned int *cnt, unsigned int *inpr)
26314 {
26315 - unsigned int comb = atomic_read(&combined_event_count);
26316 + unsigned int comb = atomic_read_unchecked(&combined_event_count);
26317
26318 *cnt = (comb >> IN_PROGRESS_BITS);
26319 *inpr = comb & MAX_IN_PROGRESS;
26320 @@ -353,7 +353,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
26321 ws->last_time = ktime_get();
26322
26323 /* Increment the counter of events in progress. */
26324 - atomic_inc(&combined_event_count);
26325 + atomic_inc_unchecked(&combined_event_count);
26326 }
26327
26328 /**
26329 @@ -443,7 +443,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
26330 * Increment the counter of registered wakeup events and decrement the
26331 * couter of wakeup events in progress simultaneously.
26332 */
26333 - atomic_add(MAX_IN_PROGRESS, &combined_event_count);
26334 + atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
26335 }
26336
26337 /**
26338 diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
26339 index b0f553b..77b928b 100644
26340 --- a/drivers/block/cciss.c
26341 +++ b/drivers/block/cciss.c
26342 @@ -1198,6 +1198,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
26343 int err;
26344 u32 cp;
26345
26346 + memset(&arg64, 0, sizeof(arg64));
26347 +
26348 err = 0;
26349 err |=
26350 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
26351 @@ -3007,7 +3009,7 @@ static void start_io(ctlr_info_t *h)
26352 while (!list_empty(&h->reqQ)) {
26353 c = list_entry(h->reqQ.next, CommandList_struct, list);
26354 /* can't do anything if fifo is full */
26355 - if ((h->access.fifo_full(h))) {
26356 + if ((h->access->fifo_full(h))) {
26357 dev_warn(&h->pdev->dev, "fifo full\n");
26358 break;
26359 }
26360 @@ -3017,7 +3019,7 @@ static void start_io(ctlr_info_t *h)
26361 h->Qdepth--;
26362
26363 /* Tell the controller execute command */
26364 - h->access.submit_command(h, c);
26365 + h->access->submit_command(h, c);
26366
26367 /* Put job onto the completed Q */
26368 addQ(&h->cmpQ, c);
26369 @@ -3443,17 +3445,17 @@ startio:
26370
26371 static inline unsigned long get_next_completion(ctlr_info_t *h)
26372 {
26373 - return h->access.command_completed(h);
26374 + return h->access->command_completed(h);
26375 }
26376
26377 static inline int interrupt_pending(ctlr_info_t *h)
26378 {
26379 - return h->access.intr_pending(h);
26380 + return h->access->intr_pending(h);
26381 }
26382
26383 static inline long interrupt_not_for_us(ctlr_info_t *h)
26384 {
26385 - return ((h->access.intr_pending(h) == 0) ||
26386 + return ((h->access->intr_pending(h) == 0) ||
26387 (h->interrupts_enabled == 0));
26388 }
26389
26390 @@ -3486,7 +3488,7 @@ static inline u32 next_command(ctlr_info_t *h)
26391 u32 a;
26392
26393 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
26394 - return h->access.command_completed(h);
26395 + return h->access->command_completed(h);
26396
26397 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
26398 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
26399 @@ -4044,7 +4046,7 @@ static void __devinit cciss_put_controller_into_performant_mode(ctlr_info_t *h)
26400 trans_support & CFGTBL_Trans_use_short_tags);
26401
26402 /* Change the access methods to the performant access methods */
26403 - h->access = SA5_performant_access;
26404 + h->access = &SA5_performant_access;
26405 h->transMethod = CFGTBL_Trans_Performant;
26406
26407 return;
26408 @@ -4316,7 +4318,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *h)
26409 if (prod_index < 0)
26410 return -ENODEV;
26411 h->product_name = products[prod_index].product_name;
26412 - h->access = *(products[prod_index].access);
26413 + h->access = products[prod_index].access;
26414
26415 if (cciss_board_disabled(h)) {
26416 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
26417 @@ -5041,7 +5043,7 @@ reinit_after_soft_reset:
26418 }
26419
26420 /* make sure the board interrupts are off */
26421 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
26422 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
26423 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
26424 if (rc)
26425 goto clean2;
26426 @@ -5093,7 +5095,7 @@ reinit_after_soft_reset:
26427 * fake ones to scoop up any residual completions.
26428 */
26429 spin_lock_irqsave(&h->lock, flags);
26430 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
26431 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
26432 spin_unlock_irqrestore(&h->lock, flags);
26433 free_irq(h->intr[h->intr_mode], h);
26434 rc = cciss_request_irq(h, cciss_msix_discard_completions,
26435 @@ -5113,9 +5115,9 @@ reinit_after_soft_reset:
26436 dev_info(&h->pdev->dev, "Board READY.\n");
26437 dev_info(&h->pdev->dev,
26438 "Waiting for stale completions to drain.\n");
26439 - h->access.set_intr_mask(h, CCISS_INTR_ON);
26440 + h->access->set_intr_mask(h, CCISS_INTR_ON);
26441 msleep(10000);
26442 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
26443 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
26444
26445 rc = controller_reset_failed(h->cfgtable);
26446 if (rc)
26447 @@ -5138,7 +5140,7 @@ reinit_after_soft_reset:
26448 cciss_scsi_setup(h);
26449
26450 /* Turn the interrupts on so we can service requests */
26451 - h->access.set_intr_mask(h, CCISS_INTR_ON);
26452 + h->access->set_intr_mask(h, CCISS_INTR_ON);
26453
26454 /* Get the firmware version */
26455 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
26456 @@ -5211,7 +5213,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
26457 kfree(flush_buf);
26458 if (return_code != IO_OK)
26459 dev_warn(&h->pdev->dev, "Error flushing cache\n");
26460 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
26461 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
26462 free_irq(h->intr[h->intr_mode], h);
26463 }
26464
26465 diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
26466 index 7fda30e..eb5dfe0 100644
26467 --- a/drivers/block/cciss.h
26468 +++ b/drivers/block/cciss.h
26469 @@ -101,7 +101,7 @@ struct ctlr_info
26470 /* information about each logical volume */
26471 drive_info_struct *drv[CISS_MAX_LUN];
26472
26473 - struct access_method access;
26474 + struct access_method *access;
26475
26476 /* queue and queue Info */
26477 struct list_head reqQ;
26478 diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
26479 index 9125bbe..eede5c8 100644
26480 --- a/drivers/block/cpqarray.c
26481 +++ b/drivers/block/cpqarray.c
26482 @@ -404,7 +404,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
26483 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
26484 goto Enomem4;
26485 }
26486 - hba[i]->access.set_intr_mask(hba[i], 0);
26487 + hba[i]->access->set_intr_mask(hba[i], 0);
26488 if (request_irq(hba[i]->intr, do_ida_intr,
26489 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
26490 {
26491 @@ -459,7 +459,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
26492 add_timer(&hba[i]->timer);
26493
26494 /* Enable IRQ now that spinlock and rate limit timer are set up */
26495 - hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
26496 + hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
26497
26498 for(j=0; j<NWD; j++) {
26499 struct gendisk *disk = ida_gendisk[i][j];
26500 @@ -694,7 +694,7 @@ DBGINFO(
26501 for(i=0; i<NR_PRODUCTS; i++) {
26502 if (board_id == products[i].board_id) {
26503 c->product_name = products[i].product_name;
26504 - c->access = *(products[i].access);
26505 + c->access = products[i].access;
26506 break;
26507 }
26508 }
26509 @@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detect(void)
26510 hba[ctlr]->intr = intr;
26511 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
26512 hba[ctlr]->product_name = products[j].product_name;
26513 - hba[ctlr]->access = *(products[j].access);
26514 + hba[ctlr]->access = products[j].access;
26515 hba[ctlr]->ctlr = ctlr;
26516 hba[ctlr]->board_id = board_id;
26517 hba[ctlr]->pci_dev = NULL; /* not PCI */
26518 @@ -980,7 +980,7 @@ static void start_io(ctlr_info_t *h)
26519
26520 while((c = h->reqQ) != NULL) {
26521 /* Can't do anything if we're busy */
26522 - if (h->access.fifo_full(h) == 0)
26523 + if (h->access->fifo_full(h) == 0)
26524 return;
26525
26526 /* Get the first entry from the request Q */
26527 @@ -988,7 +988,7 @@ static void start_io(ctlr_info_t *h)
26528 h->Qdepth--;
26529
26530 /* Tell the controller to do our bidding */
26531 - h->access.submit_command(h, c);
26532 + h->access->submit_command(h, c);
26533
26534 /* Get onto the completion Q */
26535 addQ(&h->cmpQ, c);
26536 @@ -1050,7 +1050,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
26537 unsigned long flags;
26538 __u32 a,a1;
26539
26540 - istat = h->access.intr_pending(h);
26541 + istat = h->access->intr_pending(h);
26542 /* Is this interrupt for us? */
26543 if (istat == 0)
26544 return IRQ_NONE;
26545 @@ -1061,7 +1061,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
26546 */
26547 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
26548 if (istat & FIFO_NOT_EMPTY) {
26549 - while((a = h->access.command_completed(h))) {
26550 + while((a = h->access->command_completed(h))) {
26551 a1 = a; a &= ~3;
26552 if ((c = h->cmpQ) == NULL)
26553 {
26554 @@ -1449,11 +1449,11 @@ static int sendcmd(
26555 /*
26556 * Disable interrupt
26557 */
26558 - info_p->access.set_intr_mask(info_p, 0);
26559 + info_p->access->set_intr_mask(info_p, 0);
26560 /* Make sure there is room in the command FIFO */
26561 /* Actually it should be completely empty at this time. */
26562 for (i = 200000; i > 0; i--) {
26563 - temp = info_p->access.fifo_full(info_p);
26564 + temp = info_p->access->fifo_full(info_p);
26565 if (temp != 0) {
26566 break;
26567 }
26568 @@ -1466,7 +1466,7 @@ DBG(
26569 /*
26570 * Send the cmd
26571 */
26572 - info_p->access.submit_command(info_p, c);
26573 + info_p->access->submit_command(info_p, c);
26574 complete = pollcomplete(ctlr);
26575
26576 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
26577 @@ -1549,9 +1549,9 @@ static int revalidate_allvol(ctlr_info_t *host)
26578 * we check the new geometry. Then turn interrupts back on when
26579 * we're done.
26580 */
26581 - host->access.set_intr_mask(host, 0);
26582 + host->access->set_intr_mask(host, 0);
26583 getgeometry(ctlr);
26584 - host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
26585 + host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
26586
26587 for(i=0; i<NWD; i++) {
26588 struct gendisk *disk = ida_gendisk[ctlr][i];
26589 @@ -1591,7 +1591,7 @@ static int pollcomplete(int ctlr)
26590 /* Wait (up to 2 seconds) for a command to complete */
26591
26592 for (i = 200000; i > 0; i--) {
26593 - done = hba[ctlr]->access.command_completed(hba[ctlr]);
26594 + done = hba[ctlr]->access->command_completed(hba[ctlr]);
26595 if (done == 0) {
26596 udelay(10); /* a short fixed delay */
26597 } else
26598 diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
26599 index be73e9d..7fbf140 100644
26600 --- a/drivers/block/cpqarray.h
26601 +++ b/drivers/block/cpqarray.h
26602 @@ -99,7 +99,7 @@ struct ctlr_info {
26603 drv_info_t drv[NWD];
26604 struct proc_dir_entry *proc;
26605
26606 - struct access_method access;
26607 + struct access_method *access;
26608
26609 cmdlist_t *reqQ;
26610 cmdlist_t *cmpQ;
26611 diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
26612 index 9cf2035..bffca95 100644
26613 --- a/drivers/block/drbd/drbd_int.h
26614 +++ b/drivers/block/drbd/drbd_int.h
26615 @@ -736,7 +736,7 @@ struct drbd_request;
26616 struct drbd_epoch {
26617 struct list_head list;
26618 unsigned int barrier_nr;
26619 - atomic_t epoch_size; /* increased on every request added. */
26620 + atomic_unchecked_t epoch_size; /* increased on every request added. */
26621 atomic_t active; /* increased on every req. added, and dec on every finished. */
26622 unsigned long flags;
26623 };
26624 @@ -1108,7 +1108,7 @@ struct drbd_conf {
26625 void *int_dig_in;
26626 void *int_dig_vv;
26627 wait_queue_head_t seq_wait;
26628 - atomic_t packet_seq;
26629 + atomic_unchecked_t packet_seq;
26630 unsigned int peer_seq;
26631 spinlock_t peer_seq_lock;
26632 unsigned int minor;
26633 @@ -1617,30 +1617,30 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
26634
26635 static inline void drbd_tcp_cork(struct socket *sock)
26636 {
26637 - int __user val = 1;
26638 + int val = 1;
26639 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
26640 - (char __user *)&val, sizeof(val));
26641 + (char __force_user *)&val, sizeof(val));
26642 }
26643
26644 static inline void drbd_tcp_uncork(struct socket *sock)
26645 {
26646 - int __user val = 0;
26647 + int val = 0;
26648 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
26649 - (char __user *)&val, sizeof(val));
26650 + (char __force_user *)&val, sizeof(val));
26651 }
26652
26653 static inline void drbd_tcp_nodelay(struct socket *sock)
26654 {
26655 - int __user val = 1;
26656 + int val = 1;
26657 (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
26658 - (char __user *)&val, sizeof(val));
26659 + (char __force_user *)&val, sizeof(val));
26660 }
26661
26662 static inline void drbd_tcp_quickack(struct socket *sock)
26663 {
26664 - int __user val = 2;
26665 + int val = 2;
26666 (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
26667 - (char __user *)&val, sizeof(val));
26668 + (char __force_user *)&val, sizeof(val));
26669 }
26670
26671 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
26672 diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
26673 index 0358e55..bc33689 100644
26674 --- a/drivers/block/drbd/drbd_main.c
26675 +++ b/drivers/block/drbd/drbd_main.c
26676 @@ -2397,7 +2397,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
26677 p.sector = sector;
26678 p.block_id = block_id;
26679 p.blksize = blksize;
26680 - p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
26681 + p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
26682
26683 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
26684 return false;
26685 @@ -2696,7 +2696,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
26686 p.sector = cpu_to_be64(req->sector);
26687 p.block_id = (unsigned long)req;
26688 p.seq_num = cpu_to_be32(req->seq_num =
26689 - atomic_add_return(1, &mdev->packet_seq));
26690 + atomic_add_return_unchecked(1, &mdev->packet_seq));
26691
26692 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
26693
26694 @@ -2981,7 +2981,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
26695 atomic_set(&mdev->unacked_cnt, 0);
26696 atomic_set(&mdev->local_cnt, 0);
26697 atomic_set(&mdev->net_cnt, 0);
26698 - atomic_set(&mdev->packet_seq, 0);
26699 + atomic_set_unchecked(&mdev->packet_seq, 0);
26700 atomic_set(&mdev->pp_in_use, 0);
26701 atomic_set(&mdev->pp_in_use_by_net, 0);
26702 atomic_set(&mdev->rs_sect_in, 0);
26703 @@ -3063,8 +3063,8 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
26704 mdev->receiver.t_state);
26705
26706 /* no need to lock it, I'm the only thread alive */
26707 - if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
26708 - dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
26709 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
26710 + dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
26711 mdev->al_writ_cnt =
26712 mdev->bm_writ_cnt =
26713 mdev->read_cnt =
26714 diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
26715 index af2a250..219c74b 100644
26716 --- a/drivers/block/drbd/drbd_nl.c
26717 +++ b/drivers/block/drbd/drbd_nl.c
26718 @@ -2359,7 +2359,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
26719 module_put(THIS_MODULE);
26720 }
26721
26722 -static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
26723 +static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
26724
26725 static unsigned short *
26726 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
26727 @@ -2430,7 +2430,7 @@ void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state)
26728 cn_reply->id.idx = CN_IDX_DRBD;
26729 cn_reply->id.val = CN_VAL_DRBD;
26730
26731 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
26732 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
26733 cn_reply->ack = 0; /* not used here. */
26734 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
26735 (int)((char *)tl - (char *)reply->tag_list);
26736 @@ -2462,7 +2462,7 @@ void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name)
26737 cn_reply->id.idx = CN_IDX_DRBD;
26738 cn_reply->id.val = CN_VAL_DRBD;
26739
26740 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
26741 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
26742 cn_reply->ack = 0; /* not used here. */
26743 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
26744 (int)((char *)tl - (char *)reply->tag_list);
26745 @@ -2540,7 +2540,7 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
26746 cn_reply->id.idx = CN_IDX_DRBD;
26747 cn_reply->id.val = CN_VAL_DRBD;
26748
26749 - cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
26750 + cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
26751 cn_reply->ack = 0; // not used here.
26752 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
26753 (int)((char*)tl - (char*)reply->tag_list);
26754 @@ -2579,7 +2579,7 @@ void drbd_bcast_sync_progress(struct drbd_conf *mdev)
26755 cn_reply->id.idx = CN_IDX_DRBD;
26756 cn_reply->id.val = CN_VAL_DRBD;
26757
26758 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
26759 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
26760 cn_reply->ack = 0; /* not used here. */
26761 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
26762 (int)((char *)tl - (char *)reply->tag_list);
26763 diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
26764 index 43beaca..4a5b1dd 100644
26765 --- a/drivers/block/drbd/drbd_receiver.c
26766 +++ b/drivers/block/drbd/drbd_receiver.c
26767 @@ -894,7 +894,7 @@ retry:
26768 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
26769 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
26770
26771 - atomic_set(&mdev->packet_seq, 0);
26772 + atomic_set_unchecked(&mdev->packet_seq, 0);
26773 mdev->peer_seq = 0;
26774
26775 drbd_thread_start(&mdev->asender);
26776 @@ -985,7 +985,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
26777 do {
26778 next_epoch = NULL;
26779
26780 - epoch_size = atomic_read(&epoch->epoch_size);
26781 + epoch_size = atomic_read_unchecked(&epoch->epoch_size);
26782
26783 switch (ev & ~EV_CLEANUP) {
26784 case EV_PUT:
26785 @@ -1020,7 +1020,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
26786 rv = FE_DESTROYED;
26787 } else {
26788 epoch->flags = 0;
26789 - atomic_set(&epoch->epoch_size, 0);
26790 + atomic_set_unchecked(&epoch->epoch_size, 0);
26791 /* atomic_set(&epoch->active, 0); is already zero */
26792 if (rv == FE_STILL_LIVE)
26793 rv = FE_RECYCLED;
26794 @@ -1191,14 +1191,14 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
26795 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
26796 drbd_flush(mdev);
26797
26798 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
26799 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
26800 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
26801 if (epoch)
26802 break;
26803 }
26804
26805 epoch = mdev->current_epoch;
26806 - wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
26807 + wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
26808
26809 D_ASSERT(atomic_read(&epoch->active) == 0);
26810 D_ASSERT(epoch->flags == 0);
26811 @@ -1210,11 +1210,11 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
26812 }
26813
26814 epoch->flags = 0;
26815 - atomic_set(&epoch->epoch_size, 0);
26816 + atomic_set_unchecked(&epoch->epoch_size, 0);
26817 atomic_set(&epoch->active, 0);
26818
26819 spin_lock(&mdev->epoch_lock);
26820 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
26821 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
26822 list_add(&epoch->list, &mdev->current_epoch->list);
26823 mdev->current_epoch = epoch;
26824 mdev->epochs++;
26825 @@ -1663,7 +1663,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
26826 spin_unlock(&mdev->peer_seq_lock);
26827
26828 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
26829 - atomic_inc(&mdev->current_epoch->epoch_size);
26830 + atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
26831 return drbd_drain_block(mdev, data_size);
26832 }
26833
26834 @@ -1689,7 +1689,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
26835
26836 spin_lock(&mdev->epoch_lock);
26837 e->epoch = mdev->current_epoch;
26838 - atomic_inc(&e->epoch->epoch_size);
26839 + atomic_inc_unchecked(&e->epoch->epoch_size);
26840 atomic_inc(&e->epoch->active);
26841 spin_unlock(&mdev->epoch_lock);
26842
26843 @@ -3885,7 +3885,7 @@ static void drbd_disconnect(struct drbd_conf *mdev)
26844 D_ASSERT(list_empty(&mdev->done_ee));
26845
26846 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
26847 - atomic_set(&mdev->current_epoch->epoch_size, 0);
26848 + atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
26849 D_ASSERT(list_empty(&mdev->current_epoch->list));
26850 }
26851
26852 diff --git a/drivers/block/loop.c b/drivers/block/loop.c
26853 index 1e888c9..05cf1b0 100644
26854 --- a/drivers/block/loop.c
26855 +++ b/drivers/block/loop.c
26856 @@ -227,7 +227,7 @@ static int __do_lo_send_write(struct file *file,
26857 mm_segment_t old_fs = get_fs();
26858
26859 set_fs(get_ds());
26860 - bw = file->f_op->write(file, buf, len, &pos);
26861 + bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
26862 set_fs(old_fs);
26863 if (likely(bw == len))
26864 return 0;
26865 diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
26866 index 4364303..9adf4ee 100644
26867 --- a/drivers/char/Kconfig
26868 +++ b/drivers/char/Kconfig
26869 @@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
26870
26871 config DEVKMEM
26872 bool "/dev/kmem virtual device support"
26873 - default y
26874 + default n
26875 + depends on !GRKERNSEC_KMEM
26876 help
26877 Say Y here if you want to support the /dev/kmem device. The
26878 /dev/kmem device is rarely used, but can be used for certain
26879 @@ -596,6 +597,7 @@ config DEVPORT
26880 bool
26881 depends on !M68K
26882 depends on ISA || PCI
26883 + depends on !GRKERNSEC_KMEM
26884 default y
26885
26886 source "drivers/s390/char/Kconfig"
26887 diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
26888 index 2e04433..22afc64 100644
26889 --- a/drivers/char/agp/frontend.c
26890 +++ b/drivers/char/agp/frontend.c
26891 @@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
26892 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
26893 return -EFAULT;
26894
26895 - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
26896 + if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
26897 return -EFAULT;
26898
26899 client = agp_find_client_by_pid(reserve.pid);
26900 diff --git a/drivers/char/briq_panel.c b/drivers/char/briq_panel.c
26901 index 095ab90..afad0a4 100644
26902 --- a/drivers/char/briq_panel.c
26903 +++ b/drivers/char/briq_panel.c
26904 @@ -9,6 +9,7 @@
26905 #include <linux/types.h>
26906 #include <linux/errno.h>
26907 #include <linux/tty.h>
26908 +#include <linux/mutex.h>
26909 #include <linux/timer.h>
26910 #include <linux/kernel.h>
26911 #include <linux/wait.h>
26912 @@ -34,6 +35,7 @@ static int vfd_is_open;
26913 static unsigned char vfd[40];
26914 static int vfd_cursor;
26915 static unsigned char ledpb, led;
26916 +static DEFINE_MUTEX(vfd_mutex);
26917
26918 static void update_vfd(void)
26919 {
26920 @@ -140,12 +142,15 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
26921 if (!vfd_is_open)
26922 return -EBUSY;
26923
26924 + mutex_lock(&vfd_mutex);
26925 for (;;) {
26926 char c;
26927 if (!indx)
26928 break;
26929 - if (get_user(c, buf))
26930 + if (get_user(c, buf)) {
26931 + mutex_unlock(&vfd_mutex);
26932 return -EFAULT;
26933 + }
26934 if (esc) {
26935 set_led(c);
26936 esc = 0;
26937 @@ -175,6 +180,7 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
26938 buf++;
26939 }
26940 update_vfd();
26941 + mutex_unlock(&vfd_mutex);
26942
26943 return len;
26944 }
26945 diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
26946 index f773a9d..65cd683 100644
26947 --- a/drivers/char/genrtc.c
26948 +++ b/drivers/char/genrtc.c
26949 @@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file,
26950 switch (cmd) {
26951
26952 case RTC_PLL_GET:
26953 + memset(&pll, 0, sizeof(pll));
26954 if (get_rtc_pll(&pll))
26955 return -EINVAL;
26956 else
26957 diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
26958 index 0833896..cccce52 100644
26959 --- a/drivers/char/hpet.c
26960 +++ b/drivers/char/hpet.c
26961 @@ -572,7 +572,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
26962 }
26963
26964 static int
26965 -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
26966 +hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
26967 struct hpet_info *info)
26968 {
26969 struct hpet_timer __iomem *timer;
26970 diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
26971 index 58c0e63..46c16bf 100644
26972 --- a/drivers/char/ipmi/ipmi_msghandler.c
26973 +++ b/drivers/char/ipmi/ipmi_msghandler.c
26974 @@ -415,7 +415,7 @@ struct ipmi_smi {
26975 struct proc_dir_entry *proc_dir;
26976 char proc_dir_name[10];
26977
26978 - atomic_t stats[IPMI_NUM_STATS];
26979 + atomic_unchecked_t stats[IPMI_NUM_STATS];
26980
26981 /*
26982 * run_to_completion duplicate of smb_info, smi_info
26983 @@ -448,9 +448,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
26984
26985
26986 #define ipmi_inc_stat(intf, stat) \
26987 - atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
26988 + atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
26989 #define ipmi_get_stat(intf, stat) \
26990 - ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
26991 + ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
26992
26993 static int is_lan_addr(struct ipmi_addr *addr)
26994 {
26995 @@ -2868,7 +2868,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
26996 INIT_LIST_HEAD(&intf->cmd_rcvrs);
26997 init_waitqueue_head(&intf->waitq);
26998 for (i = 0; i < IPMI_NUM_STATS; i++)
26999 - atomic_set(&intf->stats[i], 0);
27000 + atomic_set_unchecked(&intf->stats[i], 0);
27001
27002 intf->proc_dir = NULL;
27003
27004 diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
27005 index 9397ab4..d01bee1 100644
27006 --- a/drivers/char/ipmi/ipmi_si_intf.c
27007 +++ b/drivers/char/ipmi/ipmi_si_intf.c
27008 @@ -277,7 +277,7 @@ struct smi_info {
27009 unsigned char slave_addr;
27010
27011 /* Counters and things for the proc filesystem. */
27012 - atomic_t stats[SI_NUM_STATS];
27013 + atomic_unchecked_t stats[SI_NUM_STATS];
27014
27015 struct task_struct *thread;
27016
27017 @@ -286,9 +286,9 @@ struct smi_info {
27018 };
27019
27020 #define smi_inc_stat(smi, stat) \
27021 - atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
27022 + atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
27023 #define smi_get_stat(smi, stat) \
27024 - ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
27025 + ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
27026
27027 #define SI_MAX_PARMS 4
27028
27029 @@ -3230,7 +3230,7 @@ static int try_smi_init(struct smi_info *new_smi)
27030 atomic_set(&new_smi->req_events, 0);
27031 new_smi->run_to_completion = 0;
27032 for (i = 0; i < SI_NUM_STATS; i++)
27033 - atomic_set(&new_smi->stats[i], 0);
27034 + atomic_set_unchecked(&new_smi->stats[i], 0);
27035
27036 new_smi->interrupt_disabled = 1;
27037 atomic_set(&new_smi->stop_operation, 0);
27038 diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
27039 index 1aeaaba..e018570 100644
27040 --- a/drivers/char/mbcs.c
27041 +++ b/drivers/char/mbcs.c
27042 @@ -800,7 +800,7 @@ static int mbcs_remove(struct cx_dev *dev)
27043 return 0;
27044 }
27045
27046 -static const struct cx_device_id __devinitdata mbcs_id_table[] = {
27047 +static const struct cx_device_id __devinitconst mbcs_id_table[] = {
27048 {
27049 .part_num = MBCS_PART_NUM,
27050 .mfg_num = MBCS_MFG_NUM,
27051 diff --git a/drivers/char/mem.c b/drivers/char/mem.c
27052 index 1451790..f705c30 100644
27053 --- a/drivers/char/mem.c
27054 +++ b/drivers/char/mem.c
27055 @@ -18,6 +18,7 @@
27056 #include <linux/raw.h>
27057 #include <linux/tty.h>
27058 #include <linux/capability.h>
27059 +#include <linux/security.h>
27060 #include <linux/ptrace.h>
27061 #include <linux/device.h>
27062 #include <linux/highmem.h>
27063 @@ -35,6 +36,10 @@
27064 # include <linux/efi.h>
27065 #endif
27066
27067 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
27068 +extern const struct file_operations grsec_fops;
27069 +#endif
27070 +
27071 static inline unsigned long size_inside_page(unsigned long start,
27072 unsigned long size)
27073 {
27074 @@ -66,9 +71,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27075
27076 while (cursor < to) {
27077 if (!devmem_is_allowed(pfn)) {
27078 +#ifdef CONFIG_GRKERNSEC_KMEM
27079 + gr_handle_mem_readwrite(from, to);
27080 +#else
27081 printk(KERN_INFO
27082 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
27083 current->comm, from, to);
27084 +#endif
27085 return 0;
27086 }
27087 cursor += PAGE_SIZE;
27088 @@ -76,6 +85,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27089 }
27090 return 1;
27091 }
27092 +#elif defined(CONFIG_GRKERNSEC_KMEM)
27093 +static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27094 +{
27095 + return 0;
27096 +}
27097 #else
27098 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27099 {
27100 @@ -118,6 +132,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
27101
27102 while (count > 0) {
27103 unsigned long remaining;
27104 + char *temp;
27105
27106 sz = size_inside_page(p, count);
27107
27108 @@ -133,7 +148,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
27109 if (!ptr)
27110 return -EFAULT;
27111
27112 - remaining = copy_to_user(buf, ptr, sz);
27113 +#ifdef CONFIG_PAX_USERCOPY
27114 + temp = kmalloc(sz, GFP_KERNEL);
27115 + if (!temp) {
27116 + unxlate_dev_mem_ptr(p, ptr);
27117 + return -ENOMEM;
27118 + }
27119 + memcpy(temp, ptr, sz);
27120 +#else
27121 + temp = ptr;
27122 +#endif
27123 +
27124 + remaining = copy_to_user(buf, temp, sz);
27125 +
27126 +#ifdef CONFIG_PAX_USERCOPY
27127 + kfree(temp);
27128 +#endif
27129 +
27130 unxlate_dev_mem_ptr(p, ptr);
27131 if (remaining)
27132 return -EFAULT;
27133 @@ -396,9 +427,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
27134 size_t count, loff_t *ppos)
27135 {
27136 unsigned long p = *ppos;
27137 - ssize_t low_count, read, sz;
27138 + ssize_t low_count, read, sz, err = 0;
27139 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
27140 - int err = 0;
27141
27142 read = 0;
27143 if (p < (unsigned long) high_memory) {
27144 @@ -420,6 +450,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
27145 }
27146 #endif
27147 while (low_count > 0) {
27148 + char *temp;
27149 +
27150 sz = size_inside_page(p, low_count);
27151
27152 /*
27153 @@ -429,7 +461,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
27154 */
27155 kbuf = xlate_dev_kmem_ptr((char *)p);
27156
27157 - if (copy_to_user(buf, kbuf, sz))
27158 +#ifdef CONFIG_PAX_USERCOPY
27159 + temp = kmalloc(sz, GFP_KERNEL);
27160 + if (!temp)
27161 + return -ENOMEM;
27162 + memcpy(temp, kbuf, sz);
27163 +#else
27164 + temp = kbuf;
27165 +#endif
27166 +
27167 + err = copy_to_user(buf, temp, sz);
27168 +
27169 +#ifdef CONFIG_PAX_USERCOPY
27170 + kfree(temp);
27171 +#endif
27172 +
27173 + if (err)
27174 return -EFAULT;
27175 buf += sz;
27176 p += sz;
27177 @@ -867,6 +914,9 @@ static const struct memdev {
27178 #ifdef CONFIG_CRASH_DUMP
27179 [12] = { "oldmem", 0, &oldmem_fops, NULL },
27180 #endif
27181 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
27182 + [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
27183 +#endif
27184 };
27185
27186 static int memory_open(struct inode *inode, struct file *filp)
27187 diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
27188 index da3cfee..a5a6606 100644
27189 --- a/drivers/char/nvram.c
27190 +++ b/drivers/char/nvram.c
27191 @@ -248,7 +248,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
27192
27193 spin_unlock_irq(&rtc_lock);
27194
27195 - if (copy_to_user(buf, contents, tmp - contents))
27196 + if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
27197 return -EFAULT;
27198
27199 *ppos = i;
27200 diff --git a/drivers/char/random.c b/drivers/char/random.c
27201 index 6035ab8..bdfe4fd 100644
27202 --- a/drivers/char/random.c
27203 +++ b/drivers/char/random.c
27204 @@ -261,8 +261,13 @@
27205 /*
27206 * Configuration information
27207 */
27208 +#ifdef CONFIG_GRKERNSEC_RANDNET
27209 +#define INPUT_POOL_WORDS 512
27210 +#define OUTPUT_POOL_WORDS 128
27211 +#else
27212 #define INPUT_POOL_WORDS 128
27213 #define OUTPUT_POOL_WORDS 32
27214 +#endif
27215 #define SEC_XFER_SIZE 512
27216 #define EXTRACT_SIZE 10
27217
27218 @@ -300,10 +305,17 @@ static struct poolinfo {
27219 int poolwords;
27220 int tap1, tap2, tap3, tap4, tap5;
27221 } poolinfo_table[] = {
27222 +#ifdef CONFIG_GRKERNSEC_RANDNET
27223 + /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
27224 + { 512, 411, 308, 208, 104, 1 },
27225 + /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
27226 + { 128, 103, 76, 51, 25, 1 },
27227 +#else
27228 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
27229 { 128, 103, 76, 51, 25, 1 },
27230 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
27231 { 32, 26, 20, 14, 7, 1 },
27232 +#endif
27233 #if 0
27234 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
27235 { 2048, 1638, 1231, 819, 411, 1 },
27236 @@ -909,7 +921,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
27237
27238 extract_buf(r, tmp);
27239 i = min_t(int, nbytes, EXTRACT_SIZE);
27240 - if (copy_to_user(buf, tmp, i)) {
27241 + if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
27242 ret = -EFAULT;
27243 break;
27244 }
27245 @@ -1228,7 +1240,7 @@ EXPORT_SYMBOL(generate_random_uuid);
27246 #include <linux/sysctl.h>
27247
27248 static int min_read_thresh = 8, min_write_thresh;
27249 -static int max_read_thresh = INPUT_POOL_WORDS * 32;
27250 +static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
27251 static int max_write_thresh = INPUT_POOL_WORDS * 32;
27252 static char sysctl_bootid[16];
27253
27254 diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
27255 index 1ee8ce7..b778bef 100644
27256 --- a/drivers/char/sonypi.c
27257 +++ b/drivers/char/sonypi.c
27258 @@ -55,6 +55,7 @@
27259 #include <asm/uaccess.h>
27260 #include <asm/io.h>
27261 #include <asm/system.h>
27262 +#include <asm/local.h>
27263
27264 #include <linux/sonypi.h>
27265
27266 @@ -491,7 +492,7 @@ static struct sonypi_device {
27267 spinlock_t fifo_lock;
27268 wait_queue_head_t fifo_proc_list;
27269 struct fasync_struct *fifo_async;
27270 - int open_count;
27271 + local_t open_count;
27272 int model;
27273 struct input_dev *input_jog_dev;
27274 struct input_dev *input_key_dev;
27275 @@ -898,7 +899,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
27276 static int sonypi_misc_release(struct inode *inode, struct file *file)
27277 {
27278 mutex_lock(&sonypi_device.lock);
27279 - sonypi_device.open_count--;
27280 + local_dec(&sonypi_device.open_count);
27281 mutex_unlock(&sonypi_device.lock);
27282 return 0;
27283 }
27284 @@ -907,9 +908,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
27285 {
27286 mutex_lock(&sonypi_device.lock);
27287 /* Flush input queue on first open */
27288 - if (!sonypi_device.open_count)
27289 + if (!local_read(&sonypi_device.open_count))
27290 kfifo_reset(&sonypi_device.fifo);
27291 - sonypi_device.open_count++;
27292 + local_inc(&sonypi_device.open_count);
27293 mutex_unlock(&sonypi_device.lock);
27294
27295 return 0;
27296 diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
27297 index 361a1df..2471eee 100644
27298 --- a/drivers/char/tpm/tpm.c
27299 +++ b/drivers/char/tpm/tpm.c
27300 @@ -414,7 +414,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
27301 chip->vendor.req_complete_val)
27302 goto out_recv;
27303
27304 - if ((status == chip->vendor.req_canceled)) {
27305 + if (status == chip->vendor.req_canceled) {
27306 dev_err(chip->dev, "Operation Canceled\n");
27307 rc = -ECANCELED;
27308 goto out;
27309 diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
27310 index 0636520..169c1d0 100644
27311 --- a/drivers/char/tpm/tpm_bios.c
27312 +++ b/drivers/char/tpm/tpm_bios.c
27313 @@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
27314 event = addr;
27315
27316 if ((event->event_type == 0 && event->event_size == 0) ||
27317 - ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
27318 + (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
27319 return NULL;
27320
27321 return addr;
27322 @@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
27323 return NULL;
27324
27325 if ((event->event_type == 0 && event->event_size == 0) ||
27326 - ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
27327 + (event->event_size >= limit - v - sizeof(struct tcpa_event)))
27328 return NULL;
27329
27330 (*pos)++;
27331 @@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
27332 int i;
27333
27334 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
27335 - seq_putc(m, data[i]);
27336 + if (!seq_putc(m, data[i]))
27337 + return -EFAULT;
27338
27339 return 0;
27340 }
27341 @@ -410,8 +411,13 @@ static int read_log(struct tpm_bios_log *log)
27342 log->bios_event_log_end = log->bios_event_log + len;
27343
27344 virt = acpi_os_map_memory(start, len);
27345 + if (!virt) {
27346 + kfree(log->bios_event_log);
27347 + log->bios_event_log = NULL;
27348 + return -EFAULT;
27349 + }
27350
27351 - memcpy(log->bios_event_log, virt, len);
27352 + memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
27353
27354 acpi_os_unmap_memory(virt, len);
27355 return 0;
27356 diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
27357 index 8e3c46d..c139b99 100644
27358 --- a/drivers/char/virtio_console.c
27359 +++ b/drivers/char/virtio_console.c
27360 @@ -563,7 +563,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
27361 if (to_user) {
27362 ssize_t ret;
27363
27364 - ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
27365 + ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
27366 if (ret)
27367 return -EFAULT;
27368 } else {
27369 @@ -662,7 +662,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
27370 if (!port_has_data(port) && !port->host_connected)
27371 return 0;
27372
27373 - return fill_readbuf(port, ubuf, count, true);
27374 + return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
27375 }
27376
27377 static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
27378 diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
27379 index eb1d864..39ee5a7 100644
27380 --- a/drivers/dma/dmatest.c
27381 +++ b/drivers/dma/dmatest.c
27382 @@ -591,7 +591,7 @@ static int dmatest_add_channel(struct dma_chan *chan)
27383 }
27384 if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
27385 cnt = dmatest_add_threads(dtc, DMA_PQ);
27386 - thread_count += cnt > 0 ?: 0;
27387 + thread_count += cnt > 0 ? cnt : 0;
27388 }
27389
27390 pr_info("dmatest: Started %u threads using %s\n",
27391 diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
27392 index c9eee6d..f9d5280 100644
27393 --- a/drivers/edac/amd64_edac.c
27394 +++ b/drivers/edac/amd64_edac.c
27395 @@ -2685,7 +2685,7 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
27396 * PCI core identifies what devices are on a system during boot, and then
27397 * inquiry this table to see if this driver is for a given device found.
27398 */
27399 -static const struct pci_device_id amd64_pci_table[] __devinitdata = {
27400 +static const struct pci_device_id amd64_pci_table[] __devinitconst = {
27401 {
27402 .vendor = PCI_VENDOR_ID_AMD,
27403 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
27404 diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
27405 index e47e73b..348e0bd 100644
27406 --- a/drivers/edac/amd76x_edac.c
27407 +++ b/drivers/edac/amd76x_edac.c
27408 @@ -321,7 +321,7 @@ static void __devexit amd76x_remove_one(struct pci_dev *pdev)
27409 edac_mc_free(mci);
27410 }
27411
27412 -static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
27413 +static const struct pci_device_id amd76x_pci_tbl[] __devinitconst = {
27414 {
27415 PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27416 AMD762},
27417 diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
27418 index 1af531a..3a8ff27 100644
27419 --- a/drivers/edac/e752x_edac.c
27420 +++ b/drivers/edac/e752x_edac.c
27421 @@ -1380,7 +1380,7 @@ static void __devexit e752x_remove_one(struct pci_dev *pdev)
27422 edac_mc_free(mci);
27423 }
27424
27425 -static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
27426 +static const struct pci_device_id e752x_pci_tbl[] __devinitconst = {
27427 {
27428 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27429 E7520},
27430 diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
27431 index 6ffb6d2..383d8d7 100644
27432 --- a/drivers/edac/e7xxx_edac.c
27433 +++ b/drivers/edac/e7xxx_edac.c
27434 @@ -525,7 +525,7 @@ static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
27435 edac_mc_free(mci);
27436 }
27437
27438 -static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
27439 +static const struct pci_device_id e7xxx_pci_tbl[] __devinitconst = {
27440 {
27441 PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27442 E7205},
27443 diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
27444 index 495198a..ac08c85 100644
27445 --- a/drivers/edac/edac_pci_sysfs.c
27446 +++ b/drivers/edac/edac_pci_sysfs.c
27447 @@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
27448 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
27449 static int edac_pci_poll_msec = 1000; /* one second workq period */
27450
27451 -static atomic_t pci_parity_count = ATOMIC_INIT(0);
27452 -static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
27453 +static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
27454 +static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
27455
27456 static struct kobject *edac_pci_top_main_kobj;
27457 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
27458 @@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27459 edac_printk(KERN_CRIT, EDAC_PCI,
27460 "Signaled System Error on %s\n",
27461 pci_name(dev));
27462 - atomic_inc(&pci_nonparity_count);
27463 + atomic_inc_unchecked(&pci_nonparity_count);
27464 }
27465
27466 if (status & (PCI_STATUS_PARITY)) {
27467 @@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27468 "Master Data Parity Error on %s\n",
27469 pci_name(dev));
27470
27471 - atomic_inc(&pci_parity_count);
27472 + atomic_inc_unchecked(&pci_parity_count);
27473 }
27474
27475 if (status & (PCI_STATUS_DETECTED_PARITY)) {
27476 @@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27477 "Detected Parity Error on %s\n",
27478 pci_name(dev));
27479
27480 - atomic_inc(&pci_parity_count);
27481 + atomic_inc_unchecked(&pci_parity_count);
27482 }
27483 }
27484
27485 @@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27486 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
27487 "Signaled System Error on %s\n",
27488 pci_name(dev));
27489 - atomic_inc(&pci_nonparity_count);
27490 + atomic_inc_unchecked(&pci_nonparity_count);
27491 }
27492
27493 if (status & (PCI_STATUS_PARITY)) {
27494 @@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27495 "Master Data Parity Error on "
27496 "%s\n", pci_name(dev));
27497
27498 - atomic_inc(&pci_parity_count);
27499 + atomic_inc_unchecked(&pci_parity_count);
27500 }
27501
27502 if (status & (PCI_STATUS_DETECTED_PARITY)) {
27503 @@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27504 "Detected Parity Error on %s\n",
27505 pci_name(dev));
27506
27507 - atomic_inc(&pci_parity_count);
27508 + atomic_inc_unchecked(&pci_parity_count);
27509 }
27510 }
27511 }
27512 @@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
27513 if (!check_pci_errors)
27514 return;
27515
27516 - before_count = atomic_read(&pci_parity_count);
27517 + before_count = atomic_read_unchecked(&pci_parity_count);
27518
27519 /* scan all PCI devices looking for a Parity Error on devices and
27520 * bridges.
27521 @@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
27522 /* Only if operator has selected panic on PCI Error */
27523 if (edac_pci_get_panic_on_pe()) {
27524 /* If the count is different 'after' from 'before' */
27525 - if (before_count != atomic_read(&pci_parity_count))
27526 + if (before_count != atomic_read_unchecked(&pci_parity_count))
27527 panic("EDAC: PCI Parity Error");
27528 }
27529 }
27530 diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
27531 index c0510b3..6e2a954 100644
27532 --- a/drivers/edac/i3000_edac.c
27533 +++ b/drivers/edac/i3000_edac.c
27534 @@ -470,7 +470,7 @@ static void __devexit i3000_remove_one(struct pci_dev *pdev)
27535 edac_mc_free(mci);
27536 }
27537
27538 -static const struct pci_device_id i3000_pci_tbl[] __devinitdata = {
27539 +static const struct pci_device_id i3000_pci_tbl[] __devinitconst = {
27540 {
27541 PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27542 I3000},
27543 diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
27544 index aa08497..7e6822a 100644
27545 --- a/drivers/edac/i3200_edac.c
27546 +++ b/drivers/edac/i3200_edac.c
27547 @@ -456,7 +456,7 @@ static void __devexit i3200_remove_one(struct pci_dev *pdev)
27548 edac_mc_free(mci);
27549 }
27550
27551 -static const struct pci_device_id i3200_pci_tbl[] __devinitdata = {
27552 +static const struct pci_device_id i3200_pci_tbl[] __devinitconst = {
27553 {
27554 PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27555 I3200},
27556 diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
27557 index 4dc3ac2..67d05a6 100644
27558 --- a/drivers/edac/i5000_edac.c
27559 +++ b/drivers/edac/i5000_edac.c
27560 @@ -1516,7 +1516,7 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev)
27561 *
27562 * The "E500P" device is the first device supported.
27563 */
27564 -static const struct pci_device_id i5000_pci_tbl[] __devinitdata = {
27565 +static const struct pci_device_id i5000_pci_tbl[] __devinitconst = {
27566 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16),
27567 .driver_data = I5000P},
27568
27569 diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
27570 index bcbdeec..9886d16 100644
27571 --- a/drivers/edac/i5100_edac.c
27572 +++ b/drivers/edac/i5100_edac.c
27573 @@ -1051,7 +1051,7 @@ static void __devexit i5100_remove_one(struct pci_dev *pdev)
27574 edac_mc_free(mci);
27575 }
27576
27577 -static const struct pci_device_id i5100_pci_tbl[] __devinitdata = {
27578 +static const struct pci_device_id i5100_pci_tbl[] __devinitconst = {
27579 /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
27580 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
27581 { 0, }
27582 diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
27583 index 74d6ec34..baff517 100644
27584 --- a/drivers/edac/i5400_edac.c
27585 +++ b/drivers/edac/i5400_edac.c
27586 @@ -1383,7 +1383,7 @@ static void __devexit i5400_remove_one(struct pci_dev *pdev)
27587 *
27588 * The "E500P" device is the first device supported.
27589 */
27590 -static const struct pci_device_id i5400_pci_tbl[] __devinitdata = {
27591 +static const struct pci_device_id i5400_pci_tbl[] __devinitconst = {
27592 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)},
27593 {0,} /* 0 terminated list. */
27594 };
27595 diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
27596 index 6104dba..e7ea8e1 100644
27597 --- a/drivers/edac/i7300_edac.c
27598 +++ b/drivers/edac/i7300_edac.c
27599 @@ -1192,7 +1192,7 @@ static void __devexit i7300_remove_one(struct pci_dev *pdev)
27600 *
27601 * Has only 8086:360c PCI ID
27602 */
27603 -static const struct pci_device_id i7300_pci_tbl[] __devinitdata = {
27604 +static const struct pci_device_id i7300_pci_tbl[] __devinitconst = {
27605 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_ERR)},
27606 {0,} /* 0 terminated list. */
27607 };
27608 diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
27609 index 70ad892..178943c 100644
27610 --- a/drivers/edac/i7core_edac.c
27611 +++ b/drivers/edac/i7core_edac.c
27612 @@ -391,7 +391,7 @@ static const struct pci_id_table pci_dev_table[] = {
27613 /*
27614 * pci_device_id table for which devices we are looking for
27615 */
27616 -static const struct pci_device_id i7core_pci_tbl[] __devinitdata = {
27617 +static const struct pci_device_id i7core_pci_tbl[] __devinitconst = {
27618 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)},
27619 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)},
27620 {0,} /* 0 terminated list. */
27621 diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
27622 index 4329d39..f3022ef 100644
27623 --- a/drivers/edac/i82443bxgx_edac.c
27624 +++ b/drivers/edac/i82443bxgx_edac.c
27625 @@ -380,7 +380,7 @@ static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
27626
27627 EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one);
27628
27629 -static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitdata = {
27630 +static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitconst = {
27631 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)},
27632 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)},
27633 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)},
27634 diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
27635 index 931a057..fd28340 100644
27636 --- a/drivers/edac/i82860_edac.c
27637 +++ b/drivers/edac/i82860_edac.c
27638 @@ -270,7 +270,7 @@ static void __devexit i82860_remove_one(struct pci_dev *pdev)
27639 edac_mc_free(mci);
27640 }
27641
27642 -static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
27643 +static const struct pci_device_id i82860_pci_tbl[] __devinitconst = {
27644 {
27645 PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27646 I82860},
27647 diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
27648 index 33864c6..01edc61 100644
27649 --- a/drivers/edac/i82875p_edac.c
27650 +++ b/drivers/edac/i82875p_edac.c
27651 @@ -511,7 +511,7 @@ static void __devexit i82875p_remove_one(struct pci_dev *pdev)
27652 edac_mc_free(mci);
27653 }
27654
27655 -static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
27656 +static const struct pci_device_id i82875p_pci_tbl[] __devinitconst = {
27657 {
27658 PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27659 I82875P},
27660 diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
27661 index a5da732..983363b 100644
27662 --- a/drivers/edac/i82975x_edac.c
27663 +++ b/drivers/edac/i82975x_edac.c
27664 @@ -604,7 +604,7 @@ static void __devexit i82975x_remove_one(struct pci_dev *pdev)
27665 edac_mc_free(mci);
27666 }
27667
27668 -static const struct pci_device_id i82975x_pci_tbl[] __devinitdata = {
27669 +static const struct pci_device_id i82975x_pci_tbl[] __devinitconst = {
27670 {
27671 PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27672 I82975X
27673 diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
27674 index 0106747..0b40417 100644
27675 --- a/drivers/edac/mce_amd.h
27676 +++ b/drivers/edac/mce_amd.h
27677 @@ -83,7 +83,7 @@ struct amd_decoder_ops {
27678 bool (*dc_mce)(u16, u8);
27679 bool (*ic_mce)(u16, u8);
27680 bool (*nb_mce)(u16, u8);
27681 -};
27682 +} __no_const;
27683
27684 void amd_report_gart_errors(bool);
27685 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
27686 diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
27687 index b153674..ad2ba9b 100644
27688 --- a/drivers/edac/r82600_edac.c
27689 +++ b/drivers/edac/r82600_edac.c
27690 @@ -373,7 +373,7 @@ static void __devexit r82600_remove_one(struct pci_dev *pdev)
27691 edac_mc_free(mci);
27692 }
27693
27694 -static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
27695 +static const struct pci_device_id r82600_pci_tbl[] __devinitconst = {
27696 {
27697 PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
27698 },
27699 diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
27700 index 7a402bf..af0b211 100644
27701 --- a/drivers/edac/sb_edac.c
27702 +++ b/drivers/edac/sb_edac.c
27703 @@ -367,7 +367,7 @@ static const struct pci_id_table pci_dev_descr_sbridge_table[] = {
27704 /*
27705 * pci_device_id table for which devices we are looking for
27706 */
27707 -static const struct pci_device_id sbridge_pci_tbl[] __devinitdata = {
27708 +static const struct pci_device_id sbridge_pci_tbl[] __devinitconst = {
27709 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA)},
27710 {0,} /* 0 terminated list. */
27711 };
27712 diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
27713 index b6f47de..c5acf3a 100644
27714 --- a/drivers/edac/x38_edac.c
27715 +++ b/drivers/edac/x38_edac.c
27716 @@ -440,7 +440,7 @@ static void __devexit x38_remove_one(struct pci_dev *pdev)
27717 edac_mc_free(mci);
27718 }
27719
27720 -static const struct pci_device_id x38_pci_tbl[] __devinitdata = {
27721 +static const struct pci_device_id x38_pci_tbl[] __devinitconst = {
27722 {
27723 PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27724 X38},
27725 diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
27726 index 85661b0..c784559a 100644
27727 --- a/drivers/firewire/core-card.c
27728 +++ b/drivers/firewire/core-card.c
27729 @@ -657,7 +657,7 @@ void fw_card_release(struct kref *kref)
27730
27731 void fw_core_remove_card(struct fw_card *card)
27732 {
27733 - struct fw_card_driver dummy_driver = dummy_driver_template;
27734 + fw_card_driver_no_const dummy_driver = dummy_driver_template;
27735
27736 card->driver->update_phy_reg(card, 4,
27737 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
27738 diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
27739 index 4799393..37bd3ab 100644
27740 --- a/drivers/firewire/core-cdev.c
27741 +++ b/drivers/firewire/core-cdev.c
27742 @@ -1331,8 +1331,7 @@ static int init_iso_resource(struct client *client,
27743 int ret;
27744
27745 if ((request->channels == 0 && request->bandwidth == 0) ||
27746 - request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
27747 - request->bandwidth < 0)
27748 + request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
27749 return -EINVAL;
27750
27751 r = kmalloc(sizeof(*r), GFP_KERNEL);
27752 diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
27753 index 855ab3f..11f4bbd 100644
27754 --- a/drivers/firewire/core-transaction.c
27755 +++ b/drivers/firewire/core-transaction.c
27756 @@ -37,6 +37,7 @@
27757 #include <linux/timer.h>
27758 #include <linux/types.h>
27759 #include <linux/workqueue.h>
27760 +#include <linux/sched.h>
27761
27762 #include <asm/byteorder.h>
27763
27764 diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
27765 index b45be57..5fad18b 100644
27766 --- a/drivers/firewire/core.h
27767 +++ b/drivers/firewire/core.h
27768 @@ -101,6 +101,7 @@ struct fw_card_driver {
27769
27770 int (*stop_iso)(struct fw_iso_context *ctx);
27771 };
27772 +typedef struct fw_card_driver __no_const fw_card_driver_no_const;
27773
27774 void fw_card_initialize(struct fw_card *card,
27775 const struct fw_card_driver *driver, struct device *device);
27776 diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
27777 index 153980b..4b4d046 100644
27778 --- a/drivers/firmware/dmi_scan.c
27779 +++ b/drivers/firmware/dmi_scan.c
27780 @@ -449,11 +449,6 @@ void __init dmi_scan_machine(void)
27781 }
27782 }
27783 else {
27784 - /*
27785 - * no iounmap() for that ioremap(); it would be a no-op, but
27786 - * it's so early in setup that sucker gets confused into doing
27787 - * what it shouldn't if we actually call it.
27788 - */
27789 p = dmi_ioremap(0xF0000, 0x10000);
27790 if (p == NULL)
27791 goto error;
27792 @@ -723,7 +718,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
27793 if (buf == NULL)
27794 return -1;
27795
27796 - dmi_table(buf, dmi_len, dmi_num, decode, private_data);
27797 + dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
27798
27799 iounmap(buf);
27800 return 0;
27801 diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
27802 index 98723cb..10ca85b 100644
27803 --- a/drivers/gpio/gpio-vr41xx.c
27804 +++ b/drivers/gpio/gpio-vr41xx.c
27805 @@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
27806 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
27807 maskl, pendl, maskh, pendh);
27808
27809 - atomic_inc(&irq_err_count);
27810 + atomic_inc_unchecked(&irq_err_count);
27811
27812 return -EINVAL;
27813 }
27814 diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
27815 index 8323fc3..5c1d755 100644
27816 --- a/drivers/gpu/drm/drm_crtc.c
27817 +++ b/drivers/gpu/drm/drm_crtc.c
27818 @@ -1379,7 +1379,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
27819 */
27820 if ((out_resp->count_modes >= mode_count) && mode_count) {
27821 copied = 0;
27822 - mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
27823 + mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
27824 list_for_each_entry(mode, &connector->modes, head) {
27825 drm_crtc_convert_to_umode(&u_mode, mode);
27826 if (copy_to_user(mode_ptr + copied,
27827 @@ -1394,8 +1394,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
27828
27829 if ((out_resp->count_props >= props_count) && props_count) {
27830 copied = 0;
27831 - prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
27832 - prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
27833 + prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
27834 + prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
27835 for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
27836 if (connector->property_ids[i] != 0) {
27837 if (put_user(connector->property_ids[i],
27838 @@ -1417,7 +1417,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
27839
27840 if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
27841 copied = 0;
27842 - encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
27843 + encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
27844 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
27845 if (connector->encoder_ids[i] != 0) {
27846 if (put_user(connector->encoder_ids[i],
27847 @@ -1576,7 +1576,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
27848 }
27849
27850 for (i = 0; i < crtc_req->count_connectors; i++) {
27851 - set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
27852 + set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
27853 if (get_user(out_id, &set_connectors_ptr[i])) {
27854 ret = -EFAULT;
27855 goto out;
27856 @@ -1857,7 +1857,7 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
27857 fb = obj_to_fb(obj);
27858
27859 num_clips = r->num_clips;
27860 - clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr;
27861 + clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr;
27862
27863 if (!num_clips != !clips_ptr) {
27864 ret = -EINVAL;
27865 @@ -2283,7 +2283,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
27866 out_resp->flags = property->flags;
27867
27868 if ((out_resp->count_values >= value_count) && value_count) {
27869 - values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
27870 + values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
27871 for (i = 0; i < value_count; i++) {
27872 if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
27873 ret = -EFAULT;
27874 @@ -2296,7 +2296,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
27875 if (property->flags & DRM_MODE_PROP_ENUM) {
27876 if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
27877 copied = 0;
27878 - enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
27879 + enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
27880 list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
27881
27882 if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
27883 @@ -2319,7 +2319,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
27884 if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
27885 copied = 0;
27886 blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
27887 - blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
27888 + blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
27889
27890 list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
27891 if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
27892 @@ -2380,7 +2380,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
27893 struct drm_mode_get_blob *out_resp = data;
27894 struct drm_property_blob *blob;
27895 int ret = 0;
27896 - void *blob_ptr;
27897 + void __user *blob_ptr;
27898
27899 if (!drm_core_check_feature(dev, DRIVER_MODESET))
27900 return -EINVAL;
27901 @@ -2394,7 +2394,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
27902 blob = obj_to_blob(obj);
27903
27904 if (out_resp->length == blob->length) {
27905 - blob_ptr = (void *)(unsigned long)out_resp->data;
27906 + blob_ptr = (void __user *)(unsigned long)out_resp->data;
27907 if (copy_to_user(blob_ptr, blob->data, blob->length)){
27908 ret = -EFAULT;
27909 goto done;
27910 diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
27911 index d2619d7..bd6bd00 100644
27912 --- a/drivers/gpu/drm/drm_crtc_helper.c
27913 +++ b/drivers/gpu/drm/drm_crtc_helper.c
27914 @@ -279,7 +279,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
27915 struct drm_crtc *tmp;
27916 int crtc_mask = 1;
27917
27918 - WARN(!crtc, "checking null crtc?\n");
27919 + BUG_ON(!crtc);
27920
27921 dev = crtc->dev;
27922
27923 diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
27924 index 40c187c..5746164 100644
27925 --- a/drivers/gpu/drm/drm_drv.c
27926 +++ b/drivers/gpu/drm/drm_drv.c
27927 @@ -308,7 +308,7 @@ module_exit(drm_core_exit);
27928 /**
27929 * Copy and IOCTL return string to user space
27930 */
27931 -static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
27932 +static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
27933 {
27934 int len;
27935
27936 @@ -387,7 +387,7 @@ long drm_ioctl(struct file *filp,
27937
27938 dev = file_priv->minor->dev;
27939 atomic_inc(&dev->ioctl_count);
27940 - atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
27941 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
27942 ++file_priv->ioctl_count;
27943
27944 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
27945 diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
27946 index 4911e1d..484c8a3 100644
27947 --- a/drivers/gpu/drm/drm_fops.c
27948 +++ b/drivers/gpu/drm/drm_fops.c
27949 @@ -71,7 +71,7 @@ static int drm_setup(struct drm_device * dev)
27950 }
27951
27952 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
27953 - atomic_set(&dev->counts[i], 0);
27954 + atomic_set_unchecked(&dev->counts[i], 0);
27955
27956 dev->sigdata.lock = NULL;
27957
27958 @@ -135,8 +135,8 @@ int drm_open(struct inode *inode, struct file *filp)
27959
27960 retcode = drm_open_helper(inode, filp, dev);
27961 if (!retcode) {
27962 - atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
27963 - if (!dev->open_count++)
27964 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
27965 + if (local_inc_return(&dev->open_count) == 1)
27966 retcode = drm_setup(dev);
27967 }
27968 if (!retcode) {
27969 @@ -473,7 +473,7 @@ int drm_release(struct inode *inode, struct file *filp)
27970
27971 mutex_lock(&drm_global_mutex);
27972
27973 - DRM_DEBUG("open_count = %d\n", dev->open_count);
27974 + DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
27975
27976 if (dev->driver->preclose)
27977 dev->driver->preclose(dev, file_priv);
27978 @@ -485,7 +485,7 @@ int drm_release(struct inode *inode, struct file *filp)
27979 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
27980 task_pid_nr(current),
27981 (long)old_encode_dev(file_priv->minor->device),
27982 - dev->open_count);
27983 + local_read(&dev->open_count));
27984
27985 /* if the master has gone away we can't do anything with the lock */
27986 if (file_priv->minor->master)
27987 @@ -566,8 +566,8 @@ int drm_release(struct inode *inode, struct file *filp)
27988 * End inline drm_release
27989 */
27990
27991 - atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
27992 - if (!--dev->open_count) {
27993 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
27994 + if (local_dec_and_test(&dev->open_count)) {
27995 if (atomic_read(&dev->ioctl_count)) {
27996 DRM_ERROR("Device busy: %d\n",
27997 atomic_read(&dev->ioctl_count));
27998 diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
27999 index c87dc96..326055d 100644
28000 --- a/drivers/gpu/drm/drm_global.c
28001 +++ b/drivers/gpu/drm/drm_global.c
28002 @@ -36,7 +36,7 @@
28003 struct drm_global_item {
28004 struct mutex mutex;
28005 void *object;
28006 - int refcount;
28007 + atomic_t refcount;
28008 };
28009
28010 static struct drm_global_item glob[DRM_GLOBAL_NUM];
28011 @@ -49,7 +49,7 @@ void drm_global_init(void)
28012 struct drm_global_item *item = &glob[i];
28013 mutex_init(&item->mutex);
28014 item->object = NULL;
28015 - item->refcount = 0;
28016 + atomic_set(&item->refcount, 0);
28017 }
28018 }
28019
28020 @@ -59,7 +59,7 @@ void drm_global_release(void)
28021 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
28022 struct drm_global_item *item = &glob[i];
28023 BUG_ON(item->object != NULL);
28024 - BUG_ON(item->refcount != 0);
28025 + BUG_ON(atomic_read(&item->refcount) != 0);
28026 }
28027 }
28028
28029 @@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
28030 void *object;
28031
28032 mutex_lock(&item->mutex);
28033 - if (item->refcount == 0) {
28034 + if (atomic_read(&item->refcount) == 0) {
28035 item->object = kzalloc(ref->size, GFP_KERNEL);
28036 if (unlikely(item->object == NULL)) {
28037 ret = -ENOMEM;
28038 @@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
28039 goto out_err;
28040
28041 }
28042 - ++item->refcount;
28043 + atomic_inc(&item->refcount);
28044 ref->object = item->object;
28045 object = item->object;
28046 mutex_unlock(&item->mutex);
28047 @@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
28048 struct drm_global_item *item = &glob[ref->global_type];
28049
28050 mutex_lock(&item->mutex);
28051 - BUG_ON(item->refcount == 0);
28052 + BUG_ON(atomic_read(&item->refcount) == 0);
28053 BUG_ON(ref->object != item->object);
28054 - if (--item->refcount == 0) {
28055 + if (atomic_dec_and_test(&item->refcount)) {
28056 ref->release(ref);
28057 item->object = NULL;
28058 }
28059 diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
28060 index ab1162d..42587b2 100644
28061 --- a/drivers/gpu/drm/drm_info.c
28062 +++ b/drivers/gpu/drm/drm_info.c
28063 @@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
28064 struct drm_local_map *map;
28065 struct drm_map_list *r_list;
28066
28067 - /* Hardcoded from _DRM_FRAME_BUFFER,
28068 - _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
28069 - _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
28070 - const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
28071 + static const char * const types[] = {
28072 + [_DRM_FRAME_BUFFER] = "FB",
28073 + [_DRM_REGISTERS] = "REG",
28074 + [_DRM_SHM] = "SHM",
28075 + [_DRM_AGP] = "AGP",
28076 + [_DRM_SCATTER_GATHER] = "SG",
28077 + [_DRM_CONSISTENT] = "PCI",
28078 + [_DRM_GEM] = "GEM" };
28079 const char *type;
28080 int i;
28081
28082 @@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
28083 map = r_list->map;
28084 if (!map)
28085 continue;
28086 - if (map->type < 0 || map->type > 5)
28087 + if (map->type >= ARRAY_SIZE(types))
28088 type = "??";
28089 else
28090 type = types[map->type];
28091 @@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, void *data)
28092 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
28093 vma->vm_flags & VM_LOCKED ? 'l' : '-',
28094 vma->vm_flags & VM_IO ? 'i' : '-',
28095 +#ifdef CONFIG_GRKERNSEC_HIDESYM
28096 + 0);
28097 +#else
28098 vma->vm_pgoff);
28099 +#endif
28100
28101 #if defined(__i386__)
28102 pgprot = pgprot_val(vma->vm_page_prot);
28103 diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
28104 index ddd70db..40321e6 100644
28105 --- a/drivers/gpu/drm/drm_ioc32.c
28106 +++ b/drivers/gpu/drm/drm_ioc32.c
28107 @@ -456,7 +456,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
28108 request = compat_alloc_user_space(nbytes);
28109 if (!access_ok(VERIFY_WRITE, request, nbytes))
28110 return -EFAULT;
28111 - list = (struct drm_buf_desc *) (request + 1);
28112 + list = (struct drm_buf_desc __user *) (request + 1);
28113
28114 if (__put_user(count, &request->count)
28115 || __put_user(list, &request->list))
28116 @@ -517,7 +517,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
28117 request = compat_alloc_user_space(nbytes);
28118 if (!access_ok(VERIFY_WRITE, request, nbytes))
28119 return -EFAULT;
28120 - list = (struct drm_buf_pub *) (request + 1);
28121 + list = (struct drm_buf_pub __user *) (request + 1);
28122
28123 if (__put_user(count, &request->count)
28124 || __put_user(list, &request->list))
28125 diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
28126 index 904d7e9..ab88581 100644
28127 --- a/drivers/gpu/drm/drm_ioctl.c
28128 +++ b/drivers/gpu/drm/drm_ioctl.c
28129 @@ -256,7 +256,7 @@ int drm_getstats(struct drm_device *dev, void *data,
28130 stats->data[i].value =
28131 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
28132 else
28133 - stats->data[i].value = atomic_read(&dev->counts[i]);
28134 + stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
28135 stats->data[i].type = dev->types[i];
28136 }
28137
28138 diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
28139 index 632ae24..244cf4a 100644
28140 --- a/drivers/gpu/drm/drm_lock.c
28141 +++ b/drivers/gpu/drm/drm_lock.c
28142 @@ -89,7 +89,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
28143 if (drm_lock_take(&master->lock, lock->context)) {
28144 master->lock.file_priv = file_priv;
28145 master->lock.lock_time = jiffies;
28146 - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
28147 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
28148 break; /* Got lock */
28149 }
28150
28151 @@ -160,7 +160,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
28152 return -EINVAL;
28153 }
28154
28155 - atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
28156 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
28157
28158 if (drm_lock_free(&master->lock, lock->context)) {
28159 /* FIXME: Should really bail out here. */
28160 diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
28161 index 8f371e8..9f85d52 100644
28162 --- a/drivers/gpu/drm/i810/i810_dma.c
28163 +++ b/drivers/gpu/drm/i810/i810_dma.c
28164 @@ -950,8 +950,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
28165 dma->buflist[vertex->idx],
28166 vertex->discard, vertex->used);
28167
28168 - atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
28169 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
28170 + atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
28171 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
28172 sarea_priv->last_enqueue = dev_priv->counter - 1;
28173 sarea_priv->last_dispatch = (int)hw_status[5];
28174
28175 @@ -1111,8 +1111,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
28176 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
28177 mc->last_render);
28178
28179 - atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
28180 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
28181 + atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
28182 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
28183 sarea_priv->last_enqueue = dev_priv->counter - 1;
28184 sarea_priv->last_dispatch = (int)hw_status[5];
28185
28186 diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
28187 index c9339f4..f5e1b9d 100644
28188 --- a/drivers/gpu/drm/i810/i810_drv.h
28189 +++ b/drivers/gpu/drm/i810/i810_drv.h
28190 @@ -108,8 +108,8 @@ typedef struct drm_i810_private {
28191 int page_flipping;
28192
28193 wait_queue_head_t irq_queue;
28194 - atomic_t irq_received;
28195 - atomic_t irq_emitted;
28196 + atomic_unchecked_t irq_received;
28197 + atomic_unchecked_t irq_emitted;
28198
28199 int front_offset;
28200 } drm_i810_private_t;
28201 diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
28202 index 004b048..7588eba 100644
28203 --- a/drivers/gpu/drm/i915/i915_debugfs.c
28204 +++ b/drivers/gpu/drm/i915/i915_debugfs.c
28205 @@ -499,7 +499,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
28206 I915_READ(GTIMR));
28207 }
28208 seq_printf(m, "Interrupts received: %d\n",
28209 - atomic_read(&dev_priv->irq_received));
28210 + atomic_read_unchecked(&dev_priv->irq_received));
28211 for (i = 0; i < I915_NUM_RINGS; i++) {
28212 if (IS_GEN6(dev) || IS_GEN7(dev)) {
28213 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
28214 @@ -1232,7 +1232,7 @@ static int i915_opregion(struct seq_file *m, void *unused)
28215 return ret;
28216
28217 if (opregion->header)
28218 - seq_write(m, opregion->header, OPREGION_SIZE);
28219 + seq_write(m, (const void __force_kernel *)opregion->header, OPREGION_SIZE);
28220
28221 mutex_unlock(&dev->struct_mutex);
28222
28223 diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
28224 index a9ae374..43c1e9e 100644
28225 --- a/drivers/gpu/drm/i915/i915_dma.c
28226 +++ b/drivers/gpu/drm/i915/i915_dma.c
28227 @@ -1172,7 +1172,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
28228 bool can_switch;
28229
28230 spin_lock(&dev->count_lock);
28231 - can_switch = (dev->open_count == 0);
28232 + can_switch = (local_read(&dev->open_count) == 0);
28233 spin_unlock(&dev->count_lock);
28234 return can_switch;
28235 }
28236 diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
28237 index 554bef7..d24791c 100644
28238 --- a/drivers/gpu/drm/i915/i915_drv.h
28239 +++ b/drivers/gpu/drm/i915/i915_drv.h
28240 @@ -229,7 +229,7 @@ struct drm_i915_display_funcs {
28241 /* render clock increase/decrease */
28242 /* display clock increase/decrease */
28243 /* pll clock increase/decrease */
28244 -};
28245 +} __no_const;
28246
28247 struct intel_device_info {
28248 u8 gen;
28249 @@ -312,7 +312,7 @@ typedef struct drm_i915_private {
28250 int current_page;
28251 int page_flipping;
28252
28253 - atomic_t irq_received;
28254 + atomic_unchecked_t irq_received;
28255
28256 /* protects the irq masks */
28257 spinlock_t irq_lock;
28258 @@ -887,7 +887,7 @@ struct drm_i915_gem_object {
28259 * will be page flipped away on the next vblank. When it
28260 * reaches 0, dev_priv->pending_flip_queue will be woken up.
28261 */
28262 - atomic_t pending_flip;
28263 + atomic_unchecked_t pending_flip;
28264 };
28265
28266 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
28267 @@ -1267,7 +1267,7 @@ extern int intel_setup_gmbus(struct drm_device *dev);
28268 extern void intel_teardown_gmbus(struct drm_device *dev);
28269 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
28270 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
28271 -extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
28272 +static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
28273 {
28274 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
28275 }
28276 diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
28277 index b9da890..cad1d98 100644
28278 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
28279 +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
28280 @@ -189,7 +189,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
28281 i915_gem_clflush_object(obj);
28282
28283 if (obj->base.pending_write_domain)
28284 - cd->flips |= atomic_read(&obj->pending_flip);
28285 + cd->flips |= atomic_read_unchecked(&obj->pending_flip);
28286
28287 /* The actual obj->write_domain will be updated with
28288 * pending_write_domain after we emit the accumulated flush for all
28289 @@ -882,9 +882,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
28290
28291 static int
28292 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
28293 - int count)
28294 + unsigned int count)
28295 {
28296 - int i;
28297 + unsigned int i;
28298
28299 for (i = 0; i < count; i++) {
28300 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
28301 diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
28302 index b40004b..7c53a75 100644
28303 --- a/drivers/gpu/drm/i915/i915_irq.c
28304 +++ b/drivers/gpu/drm/i915/i915_irq.c
28305 @@ -475,7 +475,7 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
28306 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
28307 struct drm_i915_master_private *master_priv;
28308
28309 - atomic_inc(&dev_priv->irq_received);
28310 + atomic_inc_unchecked(&dev_priv->irq_received);
28311
28312 /* disable master interrupt before clearing iir */
28313 de_ier = I915_READ(DEIER);
28314 @@ -566,7 +566,7 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
28315 struct drm_i915_master_private *master_priv;
28316 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
28317
28318 - atomic_inc(&dev_priv->irq_received);
28319 + atomic_inc_unchecked(&dev_priv->irq_received);
28320
28321 if (IS_GEN6(dev))
28322 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
28323 @@ -1231,7 +1231,7 @@ static irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
28324 int ret = IRQ_NONE, pipe;
28325 bool blc_event = false;
28326
28327 - atomic_inc(&dev_priv->irq_received);
28328 + atomic_inc_unchecked(&dev_priv->irq_received);
28329
28330 iir = I915_READ(IIR);
28331
28332 @@ -1743,7 +1743,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
28333 {
28334 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
28335
28336 - atomic_set(&dev_priv->irq_received, 0);
28337 + atomic_set_unchecked(&dev_priv->irq_received, 0);
28338
28339 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
28340 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
28341 @@ -1931,7 +1931,7 @@ static void i915_driver_irq_preinstall(struct drm_device * dev)
28342 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
28343 int pipe;
28344
28345 - atomic_set(&dev_priv->irq_received, 0);
28346 + atomic_set_unchecked(&dev_priv->irq_received, 0);
28347
28348 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
28349 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
28350 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
28351 index daa5743..c0757a9 100644
28352 --- a/drivers/gpu/drm/i915/intel_display.c
28353 +++ b/drivers/gpu/drm/i915/intel_display.c
28354 @@ -2230,7 +2230,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
28355
28356 wait_event(dev_priv->pending_flip_queue,
28357 atomic_read(&dev_priv->mm.wedged) ||
28358 - atomic_read(&obj->pending_flip) == 0);
28359 + atomic_read_unchecked(&obj->pending_flip) == 0);
28360
28361 /* Big Hammer, we also need to ensure that any pending
28362 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
28363 @@ -2851,7 +2851,7 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
28364 obj = to_intel_framebuffer(crtc->fb)->obj;
28365 dev_priv = crtc->dev->dev_private;
28366 wait_event(dev_priv->pending_flip_queue,
28367 - atomic_read(&obj->pending_flip) == 0);
28368 + atomic_read_unchecked(&obj->pending_flip) == 0);
28369 }
28370
28371 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
28372 @@ -6952,7 +6952,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
28373
28374 atomic_clear_mask(1 << intel_crtc->plane,
28375 &obj->pending_flip.counter);
28376 - if (atomic_read(&obj->pending_flip) == 0)
28377 + if (atomic_read_unchecked(&obj->pending_flip) == 0)
28378 wake_up(&dev_priv->pending_flip_queue);
28379
28380 schedule_work(&work->work);
28381 @@ -7242,7 +7242,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
28382 /* Block clients from rendering to the new back buffer until
28383 * the flip occurs and the object is no longer visible.
28384 */
28385 - atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
28386 + atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
28387
28388 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
28389 if (ret)
28390 @@ -7256,7 +7256,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
28391 return 0;
28392
28393 cleanup_pending:
28394 - atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
28395 + atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
28396 drm_gem_object_unreference(&work->old_fb_obj->base);
28397 drm_gem_object_unreference(&obj->base);
28398 mutex_unlock(&dev->struct_mutex);
28399 diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
28400 index 54558a0..2d97005 100644
28401 --- a/drivers/gpu/drm/mga/mga_drv.h
28402 +++ b/drivers/gpu/drm/mga/mga_drv.h
28403 @@ -120,9 +120,9 @@ typedef struct drm_mga_private {
28404 u32 clear_cmd;
28405 u32 maccess;
28406
28407 - atomic_t vbl_received; /**< Number of vblanks received. */
28408 + atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
28409 wait_queue_head_t fence_queue;
28410 - atomic_t last_fence_retired;
28411 + atomic_unchecked_t last_fence_retired;
28412 u32 next_fence_to_post;
28413
28414 unsigned int fb_cpp;
28415 diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
28416 index 2581202..f230a8d9 100644
28417 --- a/drivers/gpu/drm/mga/mga_irq.c
28418 +++ b/drivers/gpu/drm/mga/mga_irq.c
28419 @@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
28420 if (crtc != 0)
28421 return 0;
28422
28423 - return atomic_read(&dev_priv->vbl_received);
28424 + return atomic_read_unchecked(&dev_priv->vbl_received);
28425 }
28426
28427
28428 @@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
28429 /* VBLANK interrupt */
28430 if (status & MGA_VLINEPEN) {
28431 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
28432 - atomic_inc(&dev_priv->vbl_received);
28433 + atomic_inc_unchecked(&dev_priv->vbl_received);
28434 drm_handle_vblank(dev, 0);
28435 handled = 1;
28436 }
28437 @@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
28438 if ((prim_start & ~0x03) != (prim_end & ~0x03))
28439 MGA_WRITE(MGA_PRIMEND, prim_end);
28440
28441 - atomic_inc(&dev_priv->last_fence_retired);
28442 + atomic_inc_unchecked(&dev_priv->last_fence_retired);
28443 DRM_WAKEUP(&dev_priv->fence_queue);
28444 handled = 1;
28445 }
28446 @@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
28447 * using fences.
28448 */
28449 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
28450 - (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
28451 + (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
28452 - *sequence) <= (1 << 23)));
28453
28454 *sequence = cur_fence;
28455 diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
28456 index 5fc201b..7b032b9 100644
28457 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c
28458 +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
28459 @@ -201,7 +201,7 @@ struct methods {
28460 const char desc[8];
28461 void (*loadbios)(struct drm_device *, uint8_t *);
28462 const bool rw;
28463 -};
28464 +} __do_const;
28465
28466 static struct methods shadow_methods[] = {
28467 { "PRAMIN", load_vbios_pramin, true },
28468 @@ -5474,7 +5474,7 @@ parse_bit_U_tbl_entry(struct drm_device *dev, struct nvbios *bios,
28469 struct bit_table {
28470 const char id;
28471 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
28472 -};
28473 +} __no_const;
28474
28475 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
28476
28477 diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
28478 index 4c0be3a..5757582 100644
28479 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h
28480 +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
28481 @@ -238,7 +238,7 @@ struct nouveau_channel {
28482 struct list_head pending;
28483 uint32_t sequence;
28484 uint32_t sequence_ack;
28485 - atomic_t last_sequence_irq;
28486 + atomic_unchecked_t last_sequence_irq;
28487 struct nouveau_vma vma;
28488 } fence;
28489
28490 @@ -319,7 +319,7 @@ struct nouveau_exec_engine {
28491 u32 handle, u16 class);
28492 void (*set_tile_region)(struct drm_device *dev, int i);
28493 void (*tlb_flush)(struct drm_device *, int engine);
28494 -};
28495 +} __no_const;
28496
28497 struct nouveau_instmem_engine {
28498 void *priv;
28499 @@ -341,13 +341,13 @@ struct nouveau_instmem_engine {
28500 struct nouveau_mc_engine {
28501 int (*init)(struct drm_device *dev);
28502 void (*takedown)(struct drm_device *dev);
28503 -};
28504 +} __no_const;
28505
28506 struct nouveau_timer_engine {
28507 int (*init)(struct drm_device *dev);
28508 void (*takedown)(struct drm_device *dev);
28509 uint64_t (*read)(struct drm_device *dev);
28510 -};
28511 +} __no_const;
28512
28513 struct nouveau_fb_engine {
28514 int num_tiles;
28515 @@ -558,7 +558,7 @@ struct nouveau_vram_engine {
28516 void (*put)(struct drm_device *, struct nouveau_mem **);
28517
28518 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
28519 -};
28520 +} __no_const;
28521
28522 struct nouveau_engine {
28523 struct nouveau_instmem_engine instmem;
28524 @@ -706,7 +706,7 @@ struct drm_nouveau_private {
28525 struct drm_global_reference mem_global_ref;
28526 struct ttm_bo_global_ref bo_global_ref;
28527 struct ttm_bo_device bdev;
28528 - atomic_t validate_sequence;
28529 + atomic_unchecked_t validate_sequence;
28530 } ttm;
28531
28532 struct {
28533 diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
28534 index 2f6daae..c9d7b9e 100644
28535 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c
28536 +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
28537 @@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_channel *chan)
28538 if (USE_REFCNT(dev))
28539 sequence = nvchan_rd32(chan, 0x48);
28540 else
28541 - sequence = atomic_read(&chan->fence.last_sequence_irq);
28542 + sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
28543
28544 if (chan->fence.sequence_ack == sequence)
28545 goto out;
28546 @@ -539,7 +539,7 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
28547 return ret;
28548 }
28549
28550 - atomic_set(&chan->fence.last_sequence_irq, 0);
28551 + atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
28552 return 0;
28553 }
28554
28555 diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
28556 index 5f0bc57..eb9fac8 100644
28557 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c
28558 +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
28559 @@ -314,7 +314,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
28560 int trycnt = 0;
28561 int ret, i;
28562
28563 - sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
28564 + sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
28565 retry:
28566 if (++trycnt > 100000) {
28567 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
28568 diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
28569 index d8831ab..0ba8356 100644
28570 --- a/drivers/gpu/drm/nouveau/nouveau_state.c
28571 +++ b/drivers/gpu/drm/nouveau/nouveau_state.c
28572 @@ -542,7 +542,7 @@ static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev)
28573 bool can_switch;
28574
28575 spin_lock(&dev->count_lock);
28576 - can_switch = (dev->open_count == 0);
28577 + can_switch = (local_read(&dev->open_count) == 0);
28578 spin_unlock(&dev->count_lock);
28579 return can_switch;
28580 }
28581 diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
28582 index dbdea8e..cd6eeeb 100644
28583 --- a/drivers/gpu/drm/nouveau/nv04_graph.c
28584 +++ b/drivers/gpu/drm/nouveau/nv04_graph.c
28585 @@ -554,7 +554,7 @@ static int
28586 nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
28587 u32 class, u32 mthd, u32 data)
28588 {
28589 - atomic_set(&chan->fence.last_sequence_irq, data);
28590 + atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
28591 return 0;
28592 }
28593
28594 diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
28595 index bcac90b..53bfc76 100644
28596 --- a/drivers/gpu/drm/r128/r128_cce.c
28597 +++ b/drivers/gpu/drm/r128/r128_cce.c
28598 @@ -378,7 +378,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
28599
28600 /* GH: Simple idle check.
28601 */
28602 - atomic_set(&dev_priv->idle_count, 0);
28603 + atomic_set_unchecked(&dev_priv->idle_count, 0);
28604
28605 /* We don't support anything other than bus-mastering ring mode,
28606 * but the ring can be in either AGP or PCI space for the ring
28607 diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
28608 index 930c71b..499aded 100644
28609 --- a/drivers/gpu/drm/r128/r128_drv.h
28610 +++ b/drivers/gpu/drm/r128/r128_drv.h
28611 @@ -90,14 +90,14 @@ typedef struct drm_r128_private {
28612 int is_pci;
28613 unsigned long cce_buffers_offset;
28614
28615 - atomic_t idle_count;
28616 + atomic_unchecked_t idle_count;
28617
28618 int page_flipping;
28619 int current_page;
28620 u32 crtc_offset;
28621 u32 crtc_offset_cntl;
28622
28623 - atomic_t vbl_received;
28624 + atomic_unchecked_t vbl_received;
28625
28626 u32 color_fmt;
28627 unsigned int front_offset;
28628 diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
28629 index 429d5a0..7e899ed 100644
28630 --- a/drivers/gpu/drm/r128/r128_irq.c
28631 +++ b/drivers/gpu/drm/r128/r128_irq.c
28632 @@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
28633 if (crtc != 0)
28634 return 0;
28635
28636 - return atomic_read(&dev_priv->vbl_received);
28637 + return atomic_read_unchecked(&dev_priv->vbl_received);
28638 }
28639
28640 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
28641 @@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
28642 /* VBLANK interrupt */
28643 if (status & R128_CRTC_VBLANK_INT) {
28644 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
28645 - atomic_inc(&dev_priv->vbl_received);
28646 + atomic_inc_unchecked(&dev_priv->vbl_received);
28647 drm_handle_vblank(dev, 0);
28648 return IRQ_HANDLED;
28649 }
28650 diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
28651 index a9e33ce..09edd4b 100644
28652 --- a/drivers/gpu/drm/r128/r128_state.c
28653 +++ b/drivers/gpu/drm/r128/r128_state.c
28654 @@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
28655
28656 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
28657 {
28658 - if (atomic_read(&dev_priv->idle_count) == 0)
28659 + if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
28660 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
28661 else
28662 - atomic_set(&dev_priv->idle_count, 0);
28663 + atomic_set_unchecked(&dev_priv->idle_count, 0);
28664 }
28665
28666 #endif
28667 diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
28668 index 5a82b6b..9e69c73 100644
28669 --- a/drivers/gpu/drm/radeon/mkregtable.c
28670 +++ b/drivers/gpu/drm/radeon/mkregtable.c
28671 @@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
28672 regex_t mask_rex;
28673 regmatch_t match[4];
28674 char buf[1024];
28675 - size_t end;
28676 + long end;
28677 int len;
28678 int done = 0;
28679 int r;
28680 unsigned o;
28681 struct offset *offset;
28682 char last_reg_s[10];
28683 - int last_reg;
28684 + unsigned long last_reg;
28685
28686 if (regcomp
28687 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
28688 diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
28689 index 8227e76..ce0b195 100644
28690 --- a/drivers/gpu/drm/radeon/radeon.h
28691 +++ b/drivers/gpu/drm/radeon/radeon.h
28692 @@ -192,7 +192,7 @@ extern int sumo_get_temp(struct radeon_device *rdev);
28693 */
28694 struct radeon_fence_driver {
28695 uint32_t scratch_reg;
28696 - atomic_t seq;
28697 + atomic_unchecked_t seq;
28698 uint32_t last_seq;
28699 unsigned long last_jiffies;
28700 unsigned long last_timeout;
28701 @@ -530,7 +530,7 @@ struct r600_blit_cp_primitives {
28702 int x2, int y2);
28703 void (*draw_auto)(struct radeon_device *rdev);
28704 void (*set_default_state)(struct radeon_device *rdev);
28705 -};
28706 +} __no_const;
28707
28708 struct r600_blit {
28709 struct mutex mutex;
28710 @@ -954,7 +954,7 @@ struct radeon_asic {
28711 void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
28712 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
28713 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
28714 -};
28715 +} __no_const;
28716
28717 /*
28718 * Asic structures
28719 diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
28720 index 9b39145..389b93b 100644
28721 --- a/drivers/gpu/drm/radeon/radeon_device.c
28722 +++ b/drivers/gpu/drm/radeon/radeon_device.c
28723 @@ -687,7 +687,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
28724 bool can_switch;
28725
28726 spin_lock(&dev->count_lock);
28727 - can_switch = (dev->open_count == 0);
28728 + can_switch = (local_read(&dev->open_count) == 0);
28729 spin_unlock(&dev->count_lock);
28730 return can_switch;
28731 }
28732 diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
28733 index a1b59ca..86f2d44 100644
28734 --- a/drivers/gpu/drm/radeon/radeon_drv.h
28735 +++ b/drivers/gpu/drm/radeon/radeon_drv.h
28736 @@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
28737
28738 /* SW interrupt */
28739 wait_queue_head_t swi_queue;
28740 - atomic_t swi_emitted;
28741 + atomic_unchecked_t swi_emitted;
28742 int vblank_crtc;
28743 uint32_t irq_enable_reg;
28744 uint32_t r500_disp_irq_reg;
28745 diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
28746 index 76ec0e9..6feb1a3 100644
28747 --- a/drivers/gpu/drm/radeon/radeon_fence.c
28748 +++ b/drivers/gpu/drm/radeon/radeon_fence.c
28749 @@ -78,7 +78,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
28750 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
28751 return 0;
28752 }
28753 - fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
28754 + fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
28755 if (!rdev->cp.ready)
28756 /* FIXME: cp is not running assume everythings is done right
28757 * away
28758 @@ -373,7 +373,7 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
28759 return r;
28760 }
28761 radeon_fence_write(rdev, 0);
28762 - atomic_set(&rdev->fence_drv.seq, 0);
28763 + atomic_set_unchecked(&rdev->fence_drv.seq, 0);
28764 INIT_LIST_HEAD(&rdev->fence_drv.created);
28765 INIT_LIST_HEAD(&rdev->fence_drv.emited);
28766 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
28767 diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
28768 index 48b7cea..342236f 100644
28769 --- a/drivers/gpu/drm/radeon/radeon_ioc32.c
28770 +++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
28771 @@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
28772 request = compat_alloc_user_space(sizeof(*request));
28773 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
28774 || __put_user(req32.param, &request->param)
28775 - || __put_user((void __user *)(unsigned long)req32.value,
28776 + || __put_user((unsigned long)req32.value,
28777 &request->value))
28778 return -EFAULT;
28779
28780 diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
28781 index 00da384..32f972d 100644
28782 --- a/drivers/gpu/drm/radeon/radeon_irq.c
28783 +++ b/drivers/gpu/drm/radeon/radeon_irq.c
28784 @@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_device * dev)
28785 unsigned int ret;
28786 RING_LOCALS;
28787
28788 - atomic_inc(&dev_priv->swi_emitted);
28789 - ret = atomic_read(&dev_priv->swi_emitted);
28790 + atomic_inc_unchecked(&dev_priv->swi_emitted);
28791 + ret = atomic_read_unchecked(&dev_priv->swi_emitted);
28792
28793 BEGIN_RING(4);
28794 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
28795 @@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
28796 drm_radeon_private_t *dev_priv =
28797 (drm_radeon_private_t *) dev->dev_private;
28798
28799 - atomic_set(&dev_priv->swi_emitted, 0);
28800 + atomic_set_unchecked(&dev_priv->swi_emitted, 0);
28801 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
28802
28803 dev->max_vblank_count = 0x001fffff;
28804 diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
28805 index e8422ae..d22d4a8 100644
28806 --- a/drivers/gpu/drm/radeon/radeon_state.c
28807 +++ b/drivers/gpu/drm/radeon/radeon_state.c
28808 @@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
28809 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
28810 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
28811
28812 - if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
28813 + if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
28814 sarea_priv->nbox * sizeof(depth_boxes[0])))
28815 return -EFAULT;
28816
28817 @@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
28818 {
28819 drm_radeon_private_t *dev_priv = dev->dev_private;
28820 drm_radeon_getparam_t *param = data;
28821 - int value;
28822 + int value = 0;
28823
28824 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
28825
28826 diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
28827 index 0b5468b..9c4b308 100644
28828 --- a/drivers/gpu/drm/radeon/radeon_ttm.c
28829 +++ b/drivers/gpu/drm/radeon/radeon_ttm.c
28830 @@ -672,8 +672,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
28831 }
28832 if (unlikely(ttm_vm_ops == NULL)) {
28833 ttm_vm_ops = vma->vm_ops;
28834 - radeon_ttm_vm_ops = *ttm_vm_ops;
28835 - radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
28836 + pax_open_kernel();
28837 + memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
28838 + *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
28839 + pax_close_kernel();
28840 }
28841 vma->vm_ops = &radeon_ttm_vm_ops;
28842 return 0;
28843 diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
28844 index a9049ed..501f284 100644
28845 --- a/drivers/gpu/drm/radeon/rs690.c
28846 +++ b/drivers/gpu/drm/radeon/rs690.c
28847 @@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
28848 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
28849 rdev->pm.sideport_bandwidth.full)
28850 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
28851 - read_delay_latency.full = dfixed_const(370 * 800 * 1000);
28852 + read_delay_latency.full = dfixed_const(800 * 1000);
28853 read_delay_latency.full = dfixed_div(read_delay_latency,
28854 rdev->pm.igp_sideport_mclk);
28855 + a.full = dfixed_const(370);
28856 + read_delay_latency.full = dfixed_mul(read_delay_latency, a);
28857 } else {
28858 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
28859 rdev->pm.k8_bandwidth.full)
28860 diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
28861 index 727e93d..1565650 100644
28862 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
28863 +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
28864 @@ -398,9 +398,9 @@ static int ttm_pool_get_num_unused_pages(void)
28865 static int ttm_pool_mm_shrink(struct shrinker *shrink,
28866 struct shrink_control *sc)
28867 {
28868 - static atomic_t start_pool = ATOMIC_INIT(0);
28869 + static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
28870 unsigned i;
28871 - unsigned pool_offset = atomic_add_return(1, &start_pool);
28872 + unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
28873 struct ttm_page_pool *pool;
28874 int shrink_pages = sc->nr_to_scan;
28875
28876 diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
28877 index 9cf87d9..2000b7d 100644
28878 --- a/drivers/gpu/drm/via/via_drv.h
28879 +++ b/drivers/gpu/drm/via/via_drv.h
28880 @@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
28881 typedef uint32_t maskarray_t[5];
28882
28883 typedef struct drm_via_irq {
28884 - atomic_t irq_received;
28885 + atomic_unchecked_t irq_received;
28886 uint32_t pending_mask;
28887 uint32_t enable_mask;
28888 wait_queue_head_t irq_queue;
28889 @@ -75,7 +75,7 @@ typedef struct drm_via_private {
28890 struct timeval last_vblank;
28891 int last_vblank_valid;
28892 unsigned usec_per_vblank;
28893 - atomic_t vbl_received;
28894 + atomic_unchecked_t vbl_received;
28895 drm_via_state_t hc_state;
28896 char pci_buf[VIA_PCI_BUF_SIZE];
28897 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
28898 diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
28899 index d391f48..10c8ca3 100644
28900 --- a/drivers/gpu/drm/via/via_irq.c
28901 +++ b/drivers/gpu/drm/via/via_irq.c
28902 @@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
28903 if (crtc != 0)
28904 return 0;
28905
28906 - return atomic_read(&dev_priv->vbl_received);
28907 + return atomic_read_unchecked(&dev_priv->vbl_received);
28908 }
28909
28910 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
28911 @@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
28912
28913 status = VIA_READ(VIA_REG_INTERRUPT);
28914 if (status & VIA_IRQ_VBLANK_PENDING) {
28915 - atomic_inc(&dev_priv->vbl_received);
28916 - if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
28917 + atomic_inc_unchecked(&dev_priv->vbl_received);
28918 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
28919 do_gettimeofday(&cur_vblank);
28920 if (dev_priv->last_vblank_valid) {
28921 dev_priv->usec_per_vblank =
28922 @@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
28923 dev_priv->last_vblank = cur_vblank;
28924 dev_priv->last_vblank_valid = 1;
28925 }
28926 - if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
28927 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
28928 DRM_DEBUG("US per vblank is: %u\n",
28929 dev_priv->usec_per_vblank);
28930 }
28931 @@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
28932
28933 for (i = 0; i < dev_priv->num_irqs; ++i) {
28934 if (status & cur_irq->pending_mask) {
28935 - atomic_inc(&cur_irq->irq_received);
28936 + atomic_inc_unchecked(&cur_irq->irq_received);
28937 DRM_WAKEUP(&cur_irq->irq_queue);
28938 handled = 1;
28939 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
28940 @@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
28941 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
28942 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
28943 masks[irq][4]));
28944 - cur_irq_sequence = atomic_read(&cur_irq->irq_received);
28945 + cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
28946 } else {
28947 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
28948 (((cur_irq_sequence =
28949 - atomic_read(&cur_irq->irq_received)) -
28950 + atomic_read_unchecked(&cur_irq->irq_received)) -
28951 *sequence) <= (1 << 23)));
28952 }
28953 *sequence = cur_irq_sequence;
28954 @@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
28955 }
28956
28957 for (i = 0; i < dev_priv->num_irqs; ++i) {
28958 - atomic_set(&cur_irq->irq_received, 0);
28959 + atomic_set_unchecked(&cur_irq->irq_received, 0);
28960 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
28961 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
28962 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
28963 @@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
28964 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
28965 case VIA_IRQ_RELATIVE:
28966 irqwait->request.sequence +=
28967 - atomic_read(&cur_irq->irq_received);
28968 + atomic_read_unchecked(&cur_irq->irq_received);
28969 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
28970 case VIA_IRQ_ABSOLUTE:
28971 break;
28972 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
28973 index dc27970..f18b008 100644
28974 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
28975 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
28976 @@ -260,7 +260,7 @@ struct vmw_private {
28977 * Fencing and IRQs.
28978 */
28979
28980 - atomic_t marker_seq;
28981 + atomic_unchecked_t marker_seq;
28982 wait_queue_head_t fence_queue;
28983 wait_queue_head_t fifo_queue;
28984 int fence_queue_waiters; /* Protected by hw_mutex */
28985 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
28986 index a0c2f12..68ae6cb 100644
28987 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
28988 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
28989 @@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
28990 (unsigned int) min,
28991 (unsigned int) fifo->capabilities);
28992
28993 - atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
28994 + atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
28995 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
28996 vmw_marker_queue_init(&fifo->marker_queue);
28997 return vmw_fifo_send_fence(dev_priv, &dummy);
28998 @@ -355,7 +355,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
28999 if (reserveable)
29000 iowrite32(bytes, fifo_mem +
29001 SVGA_FIFO_RESERVED);
29002 - return fifo_mem + (next_cmd >> 2);
29003 + return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
29004 } else {
29005 need_bounce = true;
29006 }
29007 @@ -475,7 +475,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
29008
29009 fm = vmw_fifo_reserve(dev_priv, bytes);
29010 if (unlikely(fm == NULL)) {
29011 - *seqno = atomic_read(&dev_priv->marker_seq);
29012 + *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
29013 ret = -ENOMEM;
29014 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
29015 false, 3*HZ);
29016 @@ -483,7 +483,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
29017 }
29018
29019 do {
29020 - *seqno = atomic_add_return(1, &dev_priv->marker_seq);
29021 + *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
29022 } while (*seqno == 0);
29023
29024 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
29025 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
29026 index cabc95f..14b3d77 100644
29027 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
29028 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
29029 @@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
29030 * emitted. Then the fence is stale and signaled.
29031 */
29032
29033 - ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
29034 + ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
29035 > VMW_FENCE_WRAP);
29036
29037 return ret;
29038 @@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
29039
29040 if (fifo_idle)
29041 down_read(&fifo_state->rwsem);
29042 - signal_seq = atomic_read(&dev_priv->marker_seq);
29043 + signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
29044 ret = 0;
29045
29046 for (;;) {
29047 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
29048 index 8a8725c..afed796 100644
29049 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
29050 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
29051 @@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
29052 while (!vmw_lag_lt(queue, us)) {
29053 spin_lock(&queue->lock);
29054 if (list_empty(&queue->head))
29055 - seqno = atomic_read(&dev_priv->marker_seq);
29056 + seqno = atomic_read_unchecked(&dev_priv->marker_seq);
29057 else {
29058 marker = list_first_entry(&queue->head,
29059 struct vmw_marker, head);
29060 diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
29061 index bb656d8..4169fca 100644
29062 --- a/drivers/hid/hid-core.c
29063 +++ b/drivers/hid/hid-core.c
29064 @@ -2012,7 +2012,7 @@ static bool hid_ignore(struct hid_device *hdev)
29065
29066 int hid_add_device(struct hid_device *hdev)
29067 {
29068 - static atomic_t id = ATOMIC_INIT(0);
29069 + static atomic_unchecked_t id = ATOMIC_INIT(0);
29070 int ret;
29071
29072 if (WARN_ON(hdev->status & HID_STAT_ADDED))
29073 @@ -2027,7 +2027,7 @@ int hid_add_device(struct hid_device *hdev)
29074 /* XXX hack, any other cleaner solution after the driver core
29075 * is converted to allow more than 20 bytes as the device name? */
29076 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
29077 - hdev->vendor, hdev->product, atomic_inc_return(&id));
29078 + hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
29079
29080 hid_debug_register(hdev, dev_name(&hdev->dev));
29081 ret = device_add(&hdev->dev);
29082 diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
29083 index 4ef02b2..8a96831 100644
29084 --- a/drivers/hid/usbhid/hiddev.c
29085 +++ b/drivers/hid/usbhid/hiddev.c
29086 @@ -624,7 +624,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
29087 break;
29088
29089 case HIDIOCAPPLICATION:
29090 - if (arg < 0 || arg >= hid->maxapplication)
29091 + if (arg >= hid->maxapplication)
29092 break;
29093
29094 for (i = 0; i < hid->maxcollection; i++)
29095 diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
29096 index 4065374..10ed7dc 100644
29097 --- a/drivers/hv/channel.c
29098 +++ b/drivers/hv/channel.c
29099 @@ -400,8 +400,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
29100 int ret = 0;
29101 int t;
29102
29103 - next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
29104 - atomic_inc(&vmbus_connection.next_gpadl_handle);
29105 + next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
29106 + atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
29107
29108 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
29109 if (ret)
29110 diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
29111 index 0fb100e..baf87e5 100644
29112 --- a/drivers/hv/hv.c
29113 +++ b/drivers/hv/hv.c
29114 @@ -132,7 +132,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
29115 u64 output_address = (output) ? virt_to_phys(output) : 0;
29116 u32 output_address_hi = output_address >> 32;
29117 u32 output_address_lo = output_address & 0xFFFFFFFF;
29118 - void *hypercall_page = hv_context.hypercall_page;
29119 + void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
29120
29121 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
29122 "=a"(hv_status_lo) : "d" (control_hi),
29123 diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
29124 index 0aee112..b72d21f 100644
29125 --- a/drivers/hv/hyperv_vmbus.h
29126 +++ b/drivers/hv/hyperv_vmbus.h
29127 @@ -556,7 +556,7 @@ enum vmbus_connect_state {
29128 struct vmbus_connection {
29129 enum vmbus_connect_state conn_state;
29130
29131 - atomic_t next_gpadl_handle;
29132 + atomic_unchecked_t next_gpadl_handle;
29133
29134 /*
29135 * Represents channel interrupts. Each bit position represents a
29136 diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
29137 index d2d0a2a..90b8f4d 100644
29138 --- a/drivers/hv/vmbus_drv.c
29139 +++ b/drivers/hv/vmbus_drv.c
29140 @@ -663,10 +663,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
29141 {
29142 int ret = 0;
29143
29144 - static atomic_t device_num = ATOMIC_INIT(0);
29145 + static atomic_unchecked_t device_num = ATOMIC_INIT(0);
29146
29147 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
29148 - atomic_inc_return(&device_num));
29149 + atomic_inc_return_unchecked(&device_num));
29150
29151 child_device_obj->device.bus = &hv_bus;
29152 child_device_obj->device.parent = &hv_acpi_dev->dev;
29153 diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
29154 index 66f6729..2d6de0a 100644
29155 --- a/drivers/hwmon/acpi_power_meter.c
29156 +++ b/drivers/hwmon/acpi_power_meter.c
29157 @@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
29158 return res;
29159
29160 temp /= 1000;
29161 - if (temp < 0)
29162 - return -EINVAL;
29163
29164 mutex_lock(&resource->lock);
29165 resource->trip[attr->index - 7] = temp;
29166 diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
29167 index fe4104c..346febb 100644
29168 --- a/drivers/hwmon/sht15.c
29169 +++ b/drivers/hwmon/sht15.c
29170 @@ -166,7 +166,7 @@ struct sht15_data {
29171 int supply_uV;
29172 bool supply_uV_valid;
29173 struct work_struct update_supply_work;
29174 - atomic_t interrupt_handled;
29175 + atomic_unchecked_t interrupt_handled;
29176 };
29177
29178 /**
29179 @@ -509,13 +509,13 @@ static int sht15_measurement(struct sht15_data *data,
29180 return ret;
29181
29182 gpio_direction_input(data->pdata->gpio_data);
29183 - atomic_set(&data->interrupt_handled, 0);
29184 + atomic_set_unchecked(&data->interrupt_handled, 0);
29185
29186 enable_irq(gpio_to_irq(data->pdata->gpio_data));
29187 if (gpio_get_value(data->pdata->gpio_data) == 0) {
29188 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
29189 /* Only relevant if the interrupt hasn't occurred. */
29190 - if (!atomic_read(&data->interrupt_handled))
29191 + if (!atomic_read_unchecked(&data->interrupt_handled))
29192 schedule_work(&data->read_work);
29193 }
29194 ret = wait_event_timeout(data->wait_queue,
29195 @@ -782,7 +782,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
29196
29197 /* First disable the interrupt */
29198 disable_irq_nosync(irq);
29199 - atomic_inc(&data->interrupt_handled);
29200 + atomic_inc_unchecked(&data->interrupt_handled);
29201 /* Then schedule a reading work struct */
29202 if (data->state != SHT15_READING_NOTHING)
29203 schedule_work(&data->read_work);
29204 @@ -804,11 +804,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
29205 * If not, then start the interrupt again - care here as could
29206 * have gone low in meantime so verify it hasn't!
29207 */
29208 - atomic_set(&data->interrupt_handled, 0);
29209 + atomic_set_unchecked(&data->interrupt_handled, 0);
29210 enable_irq(gpio_to_irq(data->pdata->gpio_data));
29211 /* If still not occurred or another handler has been scheduled */
29212 if (gpio_get_value(data->pdata->gpio_data)
29213 - || atomic_read(&data->interrupt_handled))
29214 + || atomic_read_unchecked(&data->interrupt_handled))
29215 return;
29216 }
29217
29218 diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
29219 index 378fcb5..5e91fa8 100644
29220 --- a/drivers/i2c/busses/i2c-amd756-s4882.c
29221 +++ b/drivers/i2c/busses/i2c-amd756-s4882.c
29222 @@ -43,7 +43,7 @@
29223 extern struct i2c_adapter amd756_smbus;
29224
29225 static struct i2c_adapter *s4882_adapter;
29226 -static struct i2c_algorithm *s4882_algo;
29227 +static i2c_algorithm_no_const *s4882_algo;
29228
29229 /* Wrapper access functions for multiplexed SMBus */
29230 static DEFINE_MUTEX(amd756_lock);
29231 diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
29232 index 29015eb..af2d8e9 100644
29233 --- a/drivers/i2c/busses/i2c-nforce2-s4985.c
29234 +++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
29235 @@ -41,7 +41,7 @@
29236 extern struct i2c_adapter *nforce2_smbus;
29237
29238 static struct i2c_adapter *s4985_adapter;
29239 -static struct i2c_algorithm *s4985_algo;
29240 +static i2c_algorithm_no_const *s4985_algo;
29241
29242 /* Wrapper access functions for multiplexed SMBus */
29243 static DEFINE_MUTEX(nforce2_lock);
29244 diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
29245 index d7a4833..7fae376 100644
29246 --- a/drivers/i2c/i2c-mux.c
29247 +++ b/drivers/i2c/i2c-mux.c
29248 @@ -28,7 +28,7 @@
29249 /* multiplexer per channel data */
29250 struct i2c_mux_priv {
29251 struct i2c_adapter adap;
29252 - struct i2c_algorithm algo;
29253 + i2c_algorithm_no_const algo;
29254
29255 struct i2c_adapter *parent;
29256 void *mux_dev; /* the mux chip/device */
29257 diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c
29258 index 57d00ca..0145194 100644
29259 --- a/drivers/ide/aec62xx.c
29260 +++ b/drivers/ide/aec62xx.c
29261 @@ -181,7 +181,7 @@ static const struct ide_port_ops atp86x_port_ops = {
29262 .cable_detect = atp86x_cable_detect,
29263 };
29264
29265 -static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
29266 +static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
29267 { /* 0: AEC6210 */
29268 .name = DRV_NAME,
29269 .init_chipset = init_chipset_aec62xx,
29270 diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
29271 index 2c8016a..911a27c 100644
29272 --- a/drivers/ide/alim15x3.c
29273 +++ b/drivers/ide/alim15x3.c
29274 @@ -512,7 +512,7 @@ static const struct ide_dma_ops ali_dma_ops = {
29275 .dma_sff_read_status = ide_dma_sff_read_status,
29276 };
29277
29278 -static const struct ide_port_info ali15x3_chipset __devinitdata = {
29279 +static const struct ide_port_info ali15x3_chipset __devinitconst = {
29280 .name = DRV_NAME,
29281 .init_chipset = init_chipset_ali15x3,
29282 .init_hwif = init_hwif_ali15x3,
29283 diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
29284 index 3747b25..56fc995 100644
29285 --- a/drivers/ide/amd74xx.c
29286 +++ b/drivers/ide/amd74xx.c
29287 @@ -223,7 +223,7 @@ static const struct ide_port_ops amd_port_ops = {
29288 .udma_mask = udma, \
29289 }
29290
29291 -static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
29292 +static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
29293 /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
29294 /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
29295 /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
29296 diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
29297 index 15f0ead..cb43480 100644
29298 --- a/drivers/ide/atiixp.c
29299 +++ b/drivers/ide/atiixp.c
29300 @@ -139,7 +139,7 @@ static const struct ide_port_ops atiixp_port_ops = {
29301 .cable_detect = atiixp_cable_detect,
29302 };
29303
29304 -static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
29305 +static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
29306 { /* 0: IXP200/300/400/700 */
29307 .name = DRV_NAME,
29308 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
29309 diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
29310 index 5f80312..d1fc438 100644
29311 --- a/drivers/ide/cmd64x.c
29312 +++ b/drivers/ide/cmd64x.c
29313 @@ -327,7 +327,7 @@ static const struct ide_dma_ops cmd646_rev1_dma_ops = {
29314 .dma_sff_read_status = ide_dma_sff_read_status,
29315 };
29316
29317 -static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
29318 +static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
29319 { /* 0: CMD643 */
29320 .name = DRV_NAME,
29321 .init_chipset = init_chipset_cmd64x,
29322 diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
29323 index 2c1e5f7..1444762 100644
29324 --- a/drivers/ide/cs5520.c
29325 +++ b/drivers/ide/cs5520.c
29326 @@ -94,7 +94,7 @@ static const struct ide_port_ops cs5520_port_ops = {
29327 .set_dma_mode = cs5520_set_dma_mode,
29328 };
29329
29330 -static const struct ide_port_info cyrix_chipset __devinitdata = {
29331 +static const struct ide_port_info cyrix_chipset __devinitconst = {
29332 .name = DRV_NAME,
29333 .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
29334 .port_ops = &cs5520_port_ops,
29335 diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c
29336 index 4dc4eb9..49b40ad 100644
29337 --- a/drivers/ide/cs5530.c
29338 +++ b/drivers/ide/cs5530.c
29339 @@ -245,7 +245,7 @@ static const struct ide_port_ops cs5530_port_ops = {
29340 .udma_filter = cs5530_udma_filter,
29341 };
29342
29343 -static const struct ide_port_info cs5530_chipset __devinitdata = {
29344 +static const struct ide_port_info cs5530_chipset __devinitconst = {
29345 .name = DRV_NAME,
29346 .init_chipset = init_chipset_cs5530,
29347 .init_hwif = init_hwif_cs5530,
29348 diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c
29349 index 5059faf..18d4c85 100644
29350 --- a/drivers/ide/cs5535.c
29351 +++ b/drivers/ide/cs5535.c
29352 @@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_port_ops = {
29353 .cable_detect = cs5535_cable_detect,
29354 };
29355
29356 -static const struct ide_port_info cs5535_chipset __devinitdata = {
29357 +static const struct ide_port_info cs5535_chipset __devinitconst = {
29358 .name = DRV_NAME,
29359 .port_ops = &cs5535_port_ops,
29360 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
29361 diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
29362 index 847553f..3ffb49d 100644
29363 --- a/drivers/ide/cy82c693.c
29364 +++ b/drivers/ide/cy82c693.c
29365 @@ -163,7 +163,7 @@ static const struct ide_port_ops cy82c693_port_ops = {
29366 .set_dma_mode = cy82c693_set_dma_mode,
29367 };
29368
29369 -static const struct ide_port_info cy82c693_chipset __devinitdata = {
29370 +static const struct ide_port_info cy82c693_chipset __devinitconst = {
29371 .name = DRV_NAME,
29372 .init_iops = init_iops_cy82c693,
29373 .port_ops = &cy82c693_port_ops,
29374 diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
29375 index 58c51cd..4aec3b8 100644
29376 --- a/drivers/ide/hpt366.c
29377 +++ b/drivers/ide/hpt366.c
29378 @@ -443,7 +443,7 @@ static struct hpt_timings hpt37x_timings = {
29379 }
29380 };
29381
29382 -static const struct hpt_info hpt36x __devinitdata = {
29383 +static const struct hpt_info hpt36x __devinitconst = {
29384 .chip_name = "HPT36x",
29385 .chip_type = HPT36x,
29386 .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
29387 @@ -451,7 +451,7 @@ static const struct hpt_info hpt36x __devinitdata = {
29388 .timings = &hpt36x_timings
29389 };
29390
29391 -static const struct hpt_info hpt370 __devinitdata = {
29392 +static const struct hpt_info hpt370 __devinitconst = {
29393 .chip_name = "HPT370",
29394 .chip_type = HPT370,
29395 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
29396 @@ -459,7 +459,7 @@ static const struct hpt_info hpt370 __devinitdata = {
29397 .timings = &hpt37x_timings
29398 };
29399
29400 -static const struct hpt_info hpt370a __devinitdata = {
29401 +static const struct hpt_info hpt370a __devinitconst = {
29402 .chip_name = "HPT370A",
29403 .chip_type = HPT370A,
29404 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
29405 @@ -467,7 +467,7 @@ static const struct hpt_info hpt370a __devinitdata = {
29406 .timings = &hpt37x_timings
29407 };
29408
29409 -static const struct hpt_info hpt374 __devinitdata = {
29410 +static const struct hpt_info hpt374 __devinitconst = {
29411 .chip_name = "HPT374",
29412 .chip_type = HPT374,
29413 .udma_mask = ATA_UDMA5,
29414 @@ -475,7 +475,7 @@ static const struct hpt_info hpt374 __devinitdata = {
29415 .timings = &hpt37x_timings
29416 };
29417
29418 -static const struct hpt_info hpt372 __devinitdata = {
29419 +static const struct hpt_info hpt372 __devinitconst = {
29420 .chip_name = "HPT372",
29421 .chip_type = HPT372,
29422 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29423 @@ -483,7 +483,7 @@ static const struct hpt_info hpt372 __devinitdata = {
29424 .timings = &hpt37x_timings
29425 };
29426
29427 -static const struct hpt_info hpt372a __devinitdata = {
29428 +static const struct hpt_info hpt372a __devinitconst = {
29429 .chip_name = "HPT372A",
29430 .chip_type = HPT372A,
29431 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29432 @@ -491,7 +491,7 @@ static const struct hpt_info hpt372a __devinitdata = {
29433 .timings = &hpt37x_timings
29434 };
29435
29436 -static const struct hpt_info hpt302 __devinitdata = {
29437 +static const struct hpt_info hpt302 __devinitconst = {
29438 .chip_name = "HPT302",
29439 .chip_type = HPT302,
29440 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29441 @@ -499,7 +499,7 @@ static const struct hpt_info hpt302 __devinitdata = {
29442 .timings = &hpt37x_timings
29443 };
29444
29445 -static const struct hpt_info hpt371 __devinitdata = {
29446 +static const struct hpt_info hpt371 __devinitconst = {
29447 .chip_name = "HPT371",
29448 .chip_type = HPT371,
29449 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29450 @@ -507,7 +507,7 @@ static const struct hpt_info hpt371 __devinitdata = {
29451 .timings = &hpt37x_timings
29452 };
29453
29454 -static const struct hpt_info hpt372n __devinitdata = {
29455 +static const struct hpt_info hpt372n __devinitconst = {
29456 .chip_name = "HPT372N",
29457 .chip_type = HPT372N,
29458 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29459 @@ -515,7 +515,7 @@ static const struct hpt_info hpt372n __devinitdata = {
29460 .timings = &hpt37x_timings
29461 };
29462
29463 -static const struct hpt_info hpt302n __devinitdata = {
29464 +static const struct hpt_info hpt302n __devinitconst = {
29465 .chip_name = "HPT302N",
29466 .chip_type = HPT302N,
29467 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29468 @@ -523,7 +523,7 @@ static const struct hpt_info hpt302n __devinitdata = {
29469 .timings = &hpt37x_timings
29470 };
29471
29472 -static const struct hpt_info hpt371n __devinitdata = {
29473 +static const struct hpt_info hpt371n __devinitconst = {
29474 .chip_name = "HPT371N",
29475 .chip_type = HPT371N,
29476 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29477 @@ -1361,7 +1361,7 @@ static const struct ide_dma_ops hpt36x_dma_ops = {
29478 .dma_sff_read_status = ide_dma_sff_read_status,
29479 };
29480
29481 -static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
29482 +static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
29483 { /* 0: HPT36x */
29484 .name = DRV_NAME,
29485 .init_chipset = init_chipset_hpt366,
29486 diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
29487 index 8126824..55a2798 100644
29488 --- a/drivers/ide/ide-cd.c
29489 +++ b/drivers/ide/ide-cd.c
29490 @@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
29491 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
29492 if ((unsigned long)buf & alignment
29493 || blk_rq_bytes(rq) & q->dma_pad_mask
29494 - || object_is_on_stack(buf))
29495 + || object_starts_on_stack(buf))
29496 drive->dma = 0;
29497 }
29498 }
29499 diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
29500 index a743e68..1cfd674 100644
29501 --- a/drivers/ide/ide-pci-generic.c
29502 +++ b/drivers/ide/ide-pci-generic.c
29503 @@ -53,7 +53,7 @@ static const struct ide_port_ops netcell_port_ops = {
29504 .udma_mask = ATA_UDMA6, \
29505 }
29506
29507 -static const struct ide_port_info generic_chipsets[] __devinitdata = {
29508 +static const struct ide_port_info generic_chipsets[] __devinitconst = {
29509 /* 0: Unknown */
29510 DECLARE_GENERIC_PCI_DEV(0),
29511
29512 diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c
29513 index 560e66d..d5dd180 100644
29514 --- a/drivers/ide/it8172.c
29515 +++ b/drivers/ide/it8172.c
29516 @@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_port_ops = {
29517 .set_dma_mode = it8172_set_dma_mode,
29518 };
29519
29520 -static const struct ide_port_info it8172_port_info __devinitdata = {
29521 +static const struct ide_port_info it8172_port_info __devinitconst = {
29522 .name = DRV_NAME,
29523 .port_ops = &it8172_port_ops,
29524 .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
29525 diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c
29526 index 46816ba..1847aeb 100644
29527 --- a/drivers/ide/it8213.c
29528 +++ b/drivers/ide/it8213.c
29529 @@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_port_ops = {
29530 .cable_detect = it8213_cable_detect,
29531 };
29532
29533 -static const struct ide_port_info it8213_chipset __devinitdata = {
29534 +static const struct ide_port_info it8213_chipset __devinitconst = {
29535 .name = DRV_NAME,
29536 .enablebits = { {0x41, 0x80, 0x80} },
29537 .port_ops = &it8213_port_ops,
29538 diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
29539 index 2e3169f..c5611db 100644
29540 --- a/drivers/ide/it821x.c
29541 +++ b/drivers/ide/it821x.c
29542 @@ -630,7 +630,7 @@ static const struct ide_port_ops it821x_port_ops = {
29543 .cable_detect = it821x_cable_detect,
29544 };
29545
29546 -static const struct ide_port_info it821x_chipset __devinitdata = {
29547 +static const struct ide_port_info it821x_chipset __devinitconst = {
29548 .name = DRV_NAME,
29549 .init_chipset = init_chipset_it821x,
29550 .init_hwif = init_hwif_it821x,
29551 diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c
29552 index 74c2c4a..efddd7d 100644
29553 --- a/drivers/ide/jmicron.c
29554 +++ b/drivers/ide/jmicron.c
29555 @@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron_port_ops = {
29556 .cable_detect = jmicron_cable_detect,
29557 };
29558
29559 -static const struct ide_port_info jmicron_chipset __devinitdata = {
29560 +static const struct ide_port_info jmicron_chipset __devinitconst = {
29561 .name = DRV_NAME,
29562 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
29563 .port_ops = &jmicron_port_ops,
29564 diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c
29565 index 95327a2..73f78d8 100644
29566 --- a/drivers/ide/ns87415.c
29567 +++ b/drivers/ide/ns87415.c
29568 @@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_dma_ops = {
29569 .dma_sff_read_status = superio_dma_sff_read_status,
29570 };
29571
29572 -static const struct ide_port_info ns87415_chipset __devinitdata = {
29573 +static const struct ide_port_info ns87415_chipset __devinitconst = {
29574 .name = DRV_NAME,
29575 .init_hwif = init_hwif_ns87415,
29576 .tp_ops = &ns87415_tp_ops,
29577 diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c
29578 index 1a53a4c..39edc66 100644
29579 --- a/drivers/ide/opti621.c
29580 +++ b/drivers/ide/opti621.c
29581 @@ -131,7 +131,7 @@ static const struct ide_port_ops opti621_port_ops = {
29582 .set_pio_mode = opti621_set_pio_mode,
29583 };
29584
29585 -static const struct ide_port_info opti621_chipset __devinitdata = {
29586 +static const struct ide_port_info opti621_chipset __devinitconst = {
29587 .name = DRV_NAME,
29588 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
29589 .port_ops = &opti621_port_ops,
29590 diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
29591 index 9546fe2..2e5ceb6 100644
29592 --- a/drivers/ide/pdc202xx_new.c
29593 +++ b/drivers/ide/pdc202xx_new.c
29594 @@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_port_ops = {
29595 .udma_mask = udma, \
29596 }
29597
29598 -static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
29599 +static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
29600 /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
29601 /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
29602 };
29603 diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
29604 index 3a35ec6..5634510 100644
29605 --- a/drivers/ide/pdc202xx_old.c
29606 +++ b/drivers/ide/pdc202xx_old.c
29607 @@ -270,7 +270,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
29608 .max_sectors = sectors, \
29609 }
29610
29611 -static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
29612 +static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
29613 { /* 0: PDC20246 */
29614 .name = DRV_NAME,
29615 .init_chipset = init_chipset_pdc202xx,
29616 diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
29617 index 1892e81..fe0fd60 100644
29618 --- a/drivers/ide/piix.c
29619 +++ b/drivers/ide/piix.c
29620 @@ -344,7 +344,7 @@ static const struct ide_port_ops ich_port_ops = {
29621 .udma_mask = udma, \
29622 }
29623
29624 -static const struct ide_port_info piix_pci_info[] __devinitdata = {
29625 +static const struct ide_port_info piix_pci_info[] __devinitconst = {
29626 /* 0: MPIIX */
29627 { /*
29628 * MPIIX actually has only a single IDE channel mapped to
29629 diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c
29630 index a6414a8..c04173e 100644
29631 --- a/drivers/ide/rz1000.c
29632 +++ b/drivers/ide/rz1000.c
29633 @@ -38,7 +38,7 @@ static int __devinit rz1000_disable_readahead(struct pci_dev *dev)
29634 }
29635 }
29636
29637 -static const struct ide_port_info rz1000_chipset __devinitdata = {
29638 +static const struct ide_port_info rz1000_chipset __devinitconst = {
29639 .name = DRV_NAME,
29640 .host_flags = IDE_HFLAG_NO_DMA,
29641 };
29642 diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c
29643 index 356b9b5..d4758eb 100644
29644 --- a/drivers/ide/sc1200.c
29645 +++ b/drivers/ide/sc1200.c
29646 @@ -291,7 +291,7 @@ static const struct ide_dma_ops sc1200_dma_ops = {
29647 .dma_sff_read_status = ide_dma_sff_read_status,
29648 };
29649
29650 -static const struct ide_port_info sc1200_chipset __devinitdata = {
29651 +static const struct ide_port_info sc1200_chipset __devinitconst = {
29652 .name = DRV_NAME,
29653 .port_ops = &sc1200_port_ops,
29654 .dma_ops = &sc1200_dma_ops,
29655 diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
29656 index b7f5b0c..9701038 100644
29657 --- a/drivers/ide/scc_pata.c
29658 +++ b/drivers/ide/scc_pata.c
29659 @@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_ops = {
29660 .dma_sff_read_status = scc_dma_sff_read_status,
29661 };
29662
29663 -static const struct ide_port_info scc_chipset __devinitdata = {
29664 +static const struct ide_port_info scc_chipset __devinitconst = {
29665 .name = "sccIDE",
29666 .init_iops = init_iops_scc,
29667 .init_dma = scc_init_dma,
29668 diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
29669 index 35fb8da..24d72ef 100644
29670 --- a/drivers/ide/serverworks.c
29671 +++ b/drivers/ide/serverworks.c
29672 @@ -337,7 +337,7 @@ static const struct ide_port_ops svwks_port_ops = {
29673 .cable_detect = svwks_cable_detect,
29674 };
29675
29676 -static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
29677 +static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
29678 { /* 0: OSB4 */
29679 .name = DRV_NAME,
29680 .init_chipset = init_chipset_svwks,
29681 diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
29682 index ddeda44..46f7e30 100644
29683 --- a/drivers/ide/siimage.c
29684 +++ b/drivers/ide/siimage.c
29685 @@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_ops = {
29686 .udma_mask = ATA_UDMA6, \
29687 }
29688
29689 -static const struct ide_port_info siimage_chipsets[] __devinitdata = {
29690 +static const struct ide_port_info siimage_chipsets[] __devinitconst = {
29691 /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
29692 /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
29693 };
29694 diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
29695 index 4a00225..09e61b4 100644
29696 --- a/drivers/ide/sis5513.c
29697 +++ b/drivers/ide/sis5513.c
29698 @@ -563,7 +563,7 @@ static const struct ide_port_ops sis_ata133_port_ops = {
29699 .cable_detect = sis_cable_detect,
29700 };
29701
29702 -static const struct ide_port_info sis5513_chipset __devinitdata = {
29703 +static const struct ide_port_info sis5513_chipset __devinitconst = {
29704 .name = DRV_NAME,
29705 .init_chipset = init_chipset_sis5513,
29706 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
29707 diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
29708 index f21dc2a..d051cd2 100644
29709 --- a/drivers/ide/sl82c105.c
29710 +++ b/drivers/ide/sl82c105.c
29711 @@ -299,7 +299,7 @@ static const struct ide_dma_ops sl82c105_dma_ops = {
29712 .dma_sff_read_status = ide_dma_sff_read_status,
29713 };
29714
29715 -static const struct ide_port_info sl82c105_chipset __devinitdata = {
29716 +static const struct ide_port_info sl82c105_chipset __devinitconst = {
29717 .name = DRV_NAME,
29718 .init_chipset = init_chipset_sl82c105,
29719 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
29720 diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
29721 index 864ffe0..863a5e9 100644
29722 --- a/drivers/ide/slc90e66.c
29723 +++ b/drivers/ide/slc90e66.c
29724 @@ -132,7 +132,7 @@ static const struct ide_port_ops slc90e66_port_ops = {
29725 .cable_detect = slc90e66_cable_detect,
29726 };
29727
29728 -static const struct ide_port_info slc90e66_chipset __devinitdata = {
29729 +static const struct ide_port_info slc90e66_chipset __devinitconst = {
29730 .name = DRV_NAME,
29731 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
29732 .port_ops = &slc90e66_port_ops,
29733 diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
29734 index 4799d5c..1794678 100644
29735 --- a/drivers/ide/tc86c001.c
29736 +++ b/drivers/ide/tc86c001.c
29737 @@ -192,7 +192,7 @@ static const struct ide_dma_ops tc86c001_dma_ops = {
29738 .dma_sff_read_status = ide_dma_sff_read_status,
29739 };
29740
29741 -static const struct ide_port_info tc86c001_chipset __devinitdata = {
29742 +static const struct ide_port_info tc86c001_chipset __devinitconst = {
29743 .name = DRV_NAME,
29744 .init_hwif = init_hwif_tc86c001,
29745 .port_ops = &tc86c001_port_ops,
29746 diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
29747 index 281c914..55ce1b8 100644
29748 --- a/drivers/ide/triflex.c
29749 +++ b/drivers/ide/triflex.c
29750 @@ -92,7 +92,7 @@ static const struct ide_port_ops triflex_port_ops = {
29751 .set_dma_mode = triflex_set_mode,
29752 };
29753
29754 -static const struct ide_port_info triflex_device __devinitdata = {
29755 +static const struct ide_port_info triflex_device __devinitconst = {
29756 .name = DRV_NAME,
29757 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
29758 .port_ops = &triflex_port_ops,
29759 diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c
29760 index 4b42ca0..e494a98 100644
29761 --- a/drivers/ide/trm290.c
29762 +++ b/drivers/ide/trm290.c
29763 @@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops = {
29764 .dma_check = trm290_dma_check,
29765 };
29766
29767 -static const struct ide_port_info trm290_chipset __devinitdata = {
29768 +static const struct ide_port_info trm290_chipset __devinitconst = {
29769 .name = DRV_NAME,
29770 .init_hwif = init_hwif_trm290,
29771 .tp_ops = &trm290_tp_ops,
29772 diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
29773 index f46f49c..eb77678 100644
29774 --- a/drivers/ide/via82cxxx.c
29775 +++ b/drivers/ide/via82cxxx.c
29776 @@ -403,7 +403,7 @@ static const struct ide_port_ops via_port_ops = {
29777 .cable_detect = via82cxxx_cable_detect,
29778 };
29779
29780 -static const struct ide_port_info via82cxxx_chipset __devinitdata = {
29781 +static const struct ide_port_info via82cxxx_chipset __devinitconst = {
29782 .name = DRV_NAME,
29783 .init_chipset = init_chipset_via82cxxx,
29784 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
29785 diff --git a/drivers/ieee802154/fakehard.c b/drivers/ieee802154/fakehard.c
29786 index eb0e2cc..14241c7 100644
29787 --- a/drivers/ieee802154/fakehard.c
29788 +++ b/drivers/ieee802154/fakehard.c
29789 @@ -386,7 +386,7 @@ static int __devinit ieee802154fake_probe(struct platform_device *pdev)
29790 phy->transmit_power = 0xbf;
29791
29792 dev->netdev_ops = &fake_ops;
29793 - dev->ml_priv = &fake_mlme;
29794 + dev->ml_priv = (void *)&fake_mlme;
29795
29796 priv = netdev_priv(dev);
29797 priv->phy = phy;
29798 diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
29799 index 8b72f39..55df4c8 100644
29800 --- a/drivers/infiniband/core/cm.c
29801 +++ b/drivers/infiniband/core/cm.c
29802 @@ -114,7 +114,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
29803
29804 struct cm_counter_group {
29805 struct kobject obj;
29806 - atomic_long_t counter[CM_ATTR_COUNT];
29807 + atomic_long_unchecked_t counter[CM_ATTR_COUNT];
29808 };
29809
29810 struct cm_counter_attribute {
29811 @@ -1394,7 +1394,7 @@ static void cm_dup_req_handler(struct cm_work *work,
29812 struct ib_mad_send_buf *msg = NULL;
29813 int ret;
29814
29815 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29816 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29817 counter[CM_REQ_COUNTER]);
29818
29819 /* Quick state check to discard duplicate REQs. */
29820 @@ -1778,7 +1778,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
29821 if (!cm_id_priv)
29822 return;
29823
29824 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29825 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29826 counter[CM_REP_COUNTER]);
29827 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
29828 if (ret)
29829 @@ -1945,7 +1945,7 @@ static int cm_rtu_handler(struct cm_work *work)
29830 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
29831 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
29832 spin_unlock_irq(&cm_id_priv->lock);
29833 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29834 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29835 counter[CM_RTU_COUNTER]);
29836 goto out;
29837 }
29838 @@ -2128,7 +2128,7 @@ static int cm_dreq_handler(struct cm_work *work)
29839 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
29840 dreq_msg->local_comm_id);
29841 if (!cm_id_priv) {
29842 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29843 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29844 counter[CM_DREQ_COUNTER]);
29845 cm_issue_drep(work->port, work->mad_recv_wc);
29846 return -EINVAL;
29847 @@ -2153,7 +2153,7 @@ static int cm_dreq_handler(struct cm_work *work)
29848 case IB_CM_MRA_REP_RCVD:
29849 break;
29850 case IB_CM_TIMEWAIT:
29851 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29852 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29853 counter[CM_DREQ_COUNTER]);
29854 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
29855 goto unlock;
29856 @@ -2167,7 +2167,7 @@ static int cm_dreq_handler(struct cm_work *work)
29857 cm_free_msg(msg);
29858 goto deref;
29859 case IB_CM_DREQ_RCVD:
29860 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29861 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29862 counter[CM_DREQ_COUNTER]);
29863 goto unlock;
29864 default:
29865 @@ -2534,7 +2534,7 @@ static int cm_mra_handler(struct cm_work *work)
29866 ib_modify_mad(cm_id_priv->av.port->mad_agent,
29867 cm_id_priv->msg, timeout)) {
29868 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
29869 - atomic_long_inc(&work->port->
29870 + atomic_long_inc_unchecked(&work->port->
29871 counter_group[CM_RECV_DUPLICATES].
29872 counter[CM_MRA_COUNTER]);
29873 goto out;
29874 @@ -2543,7 +2543,7 @@ static int cm_mra_handler(struct cm_work *work)
29875 break;
29876 case IB_CM_MRA_REQ_RCVD:
29877 case IB_CM_MRA_REP_RCVD:
29878 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29879 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29880 counter[CM_MRA_COUNTER]);
29881 /* fall through */
29882 default:
29883 @@ -2705,7 +2705,7 @@ static int cm_lap_handler(struct cm_work *work)
29884 case IB_CM_LAP_IDLE:
29885 break;
29886 case IB_CM_MRA_LAP_SENT:
29887 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29888 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29889 counter[CM_LAP_COUNTER]);
29890 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
29891 goto unlock;
29892 @@ -2721,7 +2721,7 @@ static int cm_lap_handler(struct cm_work *work)
29893 cm_free_msg(msg);
29894 goto deref;
29895 case IB_CM_LAP_RCVD:
29896 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29897 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29898 counter[CM_LAP_COUNTER]);
29899 goto unlock;
29900 default:
29901 @@ -3005,7 +3005,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
29902 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
29903 if (cur_cm_id_priv) {
29904 spin_unlock_irq(&cm.lock);
29905 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29906 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29907 counter[CM_SIDR_REQ_COUNTER]);
29908 goto out; /* Duplicate message. */
29909 }
29910 @@ -3217,10 +3217,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
29911 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
29912 msg->retries = 1;
29913
29914 - atomic_long_add(1 + msg->retries,
29915 + atomic_long_add_unchecked(1 + msg->retries,
29916 &port->counter_group[CM_XMIT].counter[attr_index]);
29917 if (msg->retries)
29918 - atomic_long_add(msg->retries,
29919 + atomic_long_add_unchecked(msg->retries,
29920 &port->counter_group[CM_XMIT_RETRIES].
29921 counter[attr_index]);
29922
29923 @@ -3430,7 +3430,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
29924 }
29925
29926 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
29927 - atomic_long_inc(&port->counter_group[CM_RECV].
29928 + atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
29929 counter[attr_id - CM_ATTR_ID_OFFSET]);
29930
29931 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
29932 @@ -3635,7 +3635,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
29933 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
29934
29935 return sprintf(buf, "%ld\n",
29936 - atomic_long_read(&group->counter[cm_attr->index]));
29937 + atomic_long_read_unchecked(&group->counter[cm_attr->index]));
29938 }
29939
29940 static const struct sysfs_ops cm_counter_ops = {
29941 diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
29942 index 176c8f9..2627b62 100644
29943 --- a/drivers/infiniband/core/fmr_pool.c
29944 +++ b/drivers/infiniband/core/fmr_pool.c
29945 @@ -98,8 +98,8 @@ struct ib_fmr_pool {
29946
29947 struct task_struct *thread;
29948
29949 - atomic_t req_ser;
29950 - atomic_t flush_ser;
29951 + atomic_unchecked_t req_ser;
29952 + atomic_unchecked_t flush_ser;
29953
29954 wait_queue_head_t force_wait;
29955 };
29956 @@ -180,10 +180,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
29957 struct ib_fmr_pool *pool = pool_ptr;
29958
29959 do {
29960 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
29961 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
29962 ib_fmr_batch_release(pool);
29963
29964 - atomic_inc(&pool->flush_ser);
29965 + atomic_inc_unchecked(&pool->flush_ser);
29966 wake_up_interruptible(&pool->force_wait);
29967
29968 if (pool->flush_function)
29969 @@ -191,7 +191,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
29970 }
29971
29972 set_current_state(TASK_INTERRUPTIBLE);
29973 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
29974 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
29975 !kthread_should_stop())
29976 schedule();
29977 __set_current_state(TASK_RUNNING);
29978 @@ -283,8 +283,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
29979 pool->dirty_watermark = params->dirty_watermark;
29980 pool->dirty_len = 0;
29981 spin_lock_init(&pool->pool_lock);
29982 - atomic_set(&pool->req_ser, 0);
29983 - atomic_set(&pool->flush_ser, 0);
29984 + atomic_set_unchecked(&pool->req_ser, 0);
29985 + atomic_set_unchecked(&pool->flush_ser, 0);
29986 init_waitqueue_head(&pool->force_wait);
29987
29988 pool->thread = kthread_run(ib_fmr_cleanup_thread,
29989 @@ -412,11 +412,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
29990 }
29991 spin_unlock_irq(&pool->pool_lock);
29992
29993 - serial = atomic_inc_return(&pool->req_ser);
29994 + serial = atomic_inc_return_unchecked(&pool->req_ser);
29995 wake_up_process(pool->thread);
29996
29997 if (wait_event_interruptible(pool->force_wait,
29998 - atomic_read(&pool->flush_ser) - serial >= 0))
29999 + atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
30000 return -EINTR;
30001
30002 return 0;
30003 @@ -526,7 +526,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
30004 } else {
30005 list_add_tail(&fmr->list, &pool->dirty_list);
30006 if (++pool->dirty_len >= pool->dirty_watermark) {
30007 - atomic_inc(&pool->req_ser);
30008 + atomic_inc_unchecked(&pool->req_ser);
30009 wake_up_process(pool->thread);
30010 }
30011 }
30012 diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
30013 index 40c8353..946b0e4 100644
30014 --- a/drivers/infiniband/hw/cxgb4/mem.c
30015 +++ b/drivers/infiniband/hw/cxgb4/mem.c
30016 @@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
30017 int err;
30018 struct fw_ri_tpte tpt;
30019 u32 stag_idx;
30020 - static atomic_t key;
30021 + static atomic_unchecked_t key;
30022
30023 if (c4iw_fatal_error(rdev))
30024 return -EIO;
30025 @@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
30026 &rdev->resource.tpt_fifo_lock);
30027 if (!stag_idx)
30028 return -ENOMEM;
30029 - *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
30030 + *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
30031 }
30032 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
30033 __func__, stag_state, type, pdid, stag_idx);
30034 diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
30035 index 79b3dbc..96e5fcc 100644
30036 --- a/drivers/infiniband/hw/ipath/ipath_rc.c
30037 +++ b/drivers/infiniband/hw/ipath/ipath_rc.c
30038 @@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
30039 struct ib_atomic_eth *ateth;
30040 struct ipath_ack_entry *e;
30041 u64 vaddr;
30042 - atomic64_t *maddr;
30043 + atomic64_unchecked_t *maddr;
30044 u64 sdata;
30045 u32 rkey;
30046 u8 next;
30047 @@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
30048 IB_ACCESS_REMOTE_ATOMIC)))
30049 goto nack_acc_unlck;
30050 /* Perform atomic OP and save result. */
30051 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
30052 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
30053 sdata = be64_to_cpu(ateth->swap_data);
30054 e = &qp->s_ack_queue[qp->r_head_ack_queue];
30055 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
30056 - (u64) atomic64_add_return(sdata, maddr) - sdata :
30057 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
30058 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
30059 be64_to_cpu(ateth->compare_data),
30060 sdata);
30061 diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
30062 index 1f95bba..9530f87 100644
30063 --- a/drivers/infiniband/hw/ipath/ipath_ruc.c
30064 +++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
30065 @@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
30066 unsigned long flags;
30067 struct ib_wc wc;
30068 u64 sdata;
30069 - atomic64_t *maddr;
30070 + atomic64_unchecked_t *maddr;
30071 enum ib_wc_status send_status;
30072
30073 /*
30074 @@ -382,11 +382,11 @@ again:
30075 IB_ACCESS_REMOTE_ATOMIC)))
30076 goto acc_err;
30077 /* Perform atomic OP and save result. */
30078 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
30079 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
30080 sdata = wqe->wr.wr.atomic.compare_add;
30081 *(u64 *) sqp->s_sge.sge.vaddr =
30082 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
30083 - (u64) atomic64_add_return(sdata, maddr) - sdata :
30084 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
30085 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
30086 sdata, wqe->wr.wr.atomic.swap);
30087 goto send_comp;
30088 diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
30089 index 5965b3d..16817fb 100644
30090 --- a/drivers/infiniband/hw/nes/nes.c
30091 +++ b/drivers/infiniband/hw/nes/nes.c
30092 @@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
30093 LIST_HEAD(nes_adapter_list);
30094 static LIST_HEAD(nes_dev_list);
30095
30096 -atomic_t qps_destroyed;
30097 +atomic_unchecked_t qps_destroyed;
30098
30099 static unsigned int ee_flsh_adapter;
30100 static unsigned int sysfs_nonidx_addr;
30101 @@ -272,7 +272,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
30102 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
30103 struct nes_adapter *nesadapter = nesdev->nesadapter;
30104
30105 - atomic_inc(&qps_destroyed);
30106 + atomic_inc_unchecked(&qps_destroyed);
30107
30108 /* Free the control structures */
30109
30110 diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
30111 index 568b4f1..5ea3eff 100644
30112 --- a/drivers/infiniband/hw/nes/nes.h
30113 +++ b/drivers/infiniband/hw/nes/nes.h
30114 @@ -178,17 +178,17 @@ extern unsigned int nes_debug_level;
30115 extern unsigned int wqm_quanta;
30116 extern struct list_head nes_adapter_list;
30117
30118 -extern atomic_t cm_connects;
30119 -extern atomic_t cm_accepts;
30120 -extern atomic_t cm_disconnects;
30121 -extern atomic_t cm_closes;
30122 -extern atomic_t cm_connecteds;
30123 -extern atomic_t cm_connect_reqs;
30124 -extern atomic_t cm_rejects;
30125 -extern atomic_t mod_qp_timouts;
30126 -extern atomic_t qps_created;
30127 -extern atomic_t qps_destroyed;
30128 -extern atomic_t sw_qps_destroyed;
30129 +extern atomic_unchecked_t cm_connects;
30130 +extern atomic_unchecked_t cm_accepts;
30131 +extern atomic_unchecked_t cm_disconnects;
30132 +extern atomic_unchecked_t cm_closes;
30133 +extern atomic_unchecked_t cm_connecteds;
30134 +extern atomic_unchecked_t cm_connect_reqs;
30135 +extern atomic_unchecked_t cm_rejects;
30136 +extern atomic_unchecked_t mod_qp_timouts;
30137 +extern atomic_unchecked_t qps_created;
30138 +extern atomic_unchecked_t qps_destroyed;
30139 +extern atomic_unchecked_t sw_qps_destroyed;
30140 extern u32 mh_detected;
30141 extern u32 mh_pauses_sent;
30142 extern u32 cm_packets_sent;
30143 @@ -197,16 +197,16 @@ extern u32 cm_packets_created;
30144 extern u32 cm_packets_received;
30145 extern u32 cm_packets_dropped;
30146 extern u32 cm_packets_retrans;
30147 -extern atomic_t cm_listens_created;
30148 -extern atomic_t cm_listens_destroyed;
30149 +extern atomic_unchecked_t cm_listens_created;
30150 +extern atomic_unchecked_t cm_listens_destroyed;
30151 extern u32 cm_backlog_drops;
30152 -extern atomic_t cm_loopbacks;
30153 -extern atomic_t cm_nodes_created;
30154 -extern atomic_t cm_nodes_destroyed;
30155 -extern atomic_t cm_accel_dropped_pkts;
30156 -extern atomic_t cm_resets_recvd;
30157 -extern atomic_t pau_qps_created;
30158 -extern atomic_t pau_qps_destroyed;
30159 +extern atomic_unchecked_t cm_loopbacks;
30160 +extern atomic_unchecked_t cm_nodes_created;
30161 +extern atomic_unchecked_t cm_nodes_destroyed;
30162 +extern atomic_unchecked_t cm_accel_dropped_pkts;
30163 +extern atomic_unchecked_t cm_resets_recvd;
30164 +extern atomic_unchecked_t pau_qps_created;
30165 +extern atomic_unchecked_t pau_qps_destroyed;
30166
30167 extern u32 int_mod_timer_init;
30168 extern u32 int_mod_cq_depth_256;
30169 diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
30170 index 0a52d72..0642f36 100644
30171 --- a/drivers/infiniband/hw/nes/nes_cm.c
30172 +++ b/drivers/infiniband/hw/nes/nes_cm.c
30173 @@ -68,14 +68,14 @@ u32 cm_packets_dropped;
30174 u32 cm_packets_retrans;
30175 u32 cm_packets_created;
30176 u32 cm_packets_received;
30177 -atomic_t cm_listens_created;
30178 -atomic_t cm_listens_destroyed;
30179 +atomic_unchecked_t cm_listens_created;
30180 +atomic_unchecked_t cm_listens_destroyed;
30181 u32 cm_backlog_drops;
30182 -atomic_t cm_loopbacks;
30183 -atomic_t cm_nodes_created;
30184 -atomic_t cm_nodes_destroyed;
30185 -atomic_t cm_accel_dropped_pkts;
30186 -atomic_t cm_resets_recvd;
30187 +atomic_unchecked_t cm_loopbacks;
30188 +atomic_unchecked_t cm_nodes_created;
30189 +atomic_unchecked_t cm_nodes_destroyed;
30190 +atomic_unchecked_t cm_accel_dropped_pkts;
30191 +atomic_unchecked_t cm_resets_recvd;
30192
30193 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
30194 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
30195 @@ -148,13 +148,13 @@ static struct nes_cm_ops nes_cm_api = {
30196
30197 static struct nes_cm_core *g_cm_core;
30198
30199 -atomic_t cm_connects;
30200 -atomic_t cm_accepts;
30201 -atomic_t cm_disconnects;
30202 -atomic_t cm_closes;
30203 -atomic_t cm_connecteds;
30204 -atomic_t cm_connect_reqs;
30205 -atomic_t cm_rejects;
30206 +atomic_unchecked_t cm_connects;
30207 +atomic_unchecked_t cm_accepts;
30208 +atomic_unchecked_t cm_disconnects;
30209 +atomic_unchecked_t cm_closes;
30210 +atomic_unchecked_t cm_connecteds;
30211 +atomic_unchecked_t cm_connect_reqs;
30212 +atomic_unchecked_t cm_rejects;
30213
30214 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
30215 {
30216 @@ -1271,7 +1271,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
30217 kfree(listener);
30218 listener = NULL;
30219 ret = 0;
30220 - atomic_inc(&cm_listens_destroyed);
30221 + atomic_inc_unchecked(&cm_listens_destroyed);
30222 } else {
30223 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
30224 }
30225 @@ -1473,7 +1473,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
30226 cm_node->rem_mac);
30227
30228 add_hte_node(cm_core, cm_node);
30229 - atomic_inc(&cm_nodes_created);
30230 + atomic_inc_unchecked(&cm_nodes_created);
30231
30232 return cm_node;
30233 }
30234 @@ -1531,7 +1531,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
30235 }
30236
30237 atomic_dec(&cm_core->node_cnt);
30238 - atomic_inc(&cm_nodes_destroyed);
30239 + atomic_inc_unchecked(&cm_nodes_destroyed);
30240 nesqp = cm_node->nesqp;
30241 if (nesqp) {
30242 nesqp->cm_node = NULL;
30243 @@ -1595,7 +1595,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
30244
30245 static void drop_packet(struct sk_buff *skb)
30246 {
30247 - atomic_inc(&cm_accel_dropped_pkts);
30248 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
30249 dev_kfree_skb_any(skb);
30250 }
30251
30252 @@ -1658,7 +1658,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
30253 {
30254
30255 int reset = 0; /* whether to send reset in case of err.. */
30256 - atomic_inc(&cm_resets_recvd);
30257 + atomic_inc_unchecked(&cm_resets_recvd);
30258 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
30259 " refcnt=%d\n", cm_node, cm_node->state,
30260 atomic_read(&cm_node->ref_count));
30261 @@ -2299,7 +2299,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
30262 rem_ref_cm_node(cm_node->cm_core, cm_node);
30263 return NULL;
30264 }
30265 - atomic_inc(&cm_loopbacks);
30266 + atomic_inc_unchecked(&cm_loopbacks);
30267 loopbackremotenode->loopbackpartner = cm_node;
30268 loopbackremotenode->tcp_cntxt.rcv_wscale =
30269 NES_CM_DEFAULT_RCV_WND_SCALE;
30270 @@ -2574,7 +2574,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
30271 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
30272 else {
30273 rem_ref_cm_node(cm_core, cm_node);
30274 - atomic_inc(&cm_accel_dropped_pkts);
30275 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
30276 dev_kfree_skb_any(skb);
30277 }
30278 break;
30279 @@ -2880,7 +2880,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
30280
30281 if ((cm_id) && (cm_id->event_handler)) {
30282 if (issue_disconn) {
30283 - atomic_inc(&cm_disconnects);
30284 + atomic_inc_unchecked(&cm_disconnects);
30285 cm_event.event = IW_CM_EVENT_DISCONNECT;
30286 cm_event.status = disconn_status;
30287 cm_event.local_addr = cm_id->local_addr;
30288 @@ -2902,7 +2902,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
30289 }
30290
30291 if (issue_close) {
30292 - atomic_inc(&cm_closes);
30293 + atomic_inc_unchecked(&cm_closes);
30294 nes_disconnect(nesqp, 1);
30295
30296 cm_id->provider_data = nesqp;
30297 @@ -3038,7 +3038,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
30298
30299 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
30300 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
30301 - atomic_inc(&cm_accepts);
30302 + atomic_inc_unchecked(&cm_accepts);
30303
30304 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
30305 netdev_refcnt_read(nesvnic->netdev));
30306 @@ -3240,7 +3240,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
30307 struct nes_cm_core *cm_core;
30308 u8 *start_buff;
30309
30310 - atomic_inc(&cm_rejects);
30311 + atomic_inc_unchecked(&cm_rejects);
30312 cm_node = (struct nes_cm_node *)cm_id->provider_data;
30313 loopback = cm_node->loopbackpartner;
30314 cm_core = cm_node->cm_core;
30315 @@ -3300,7 +3300,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
30316 ntohl(cm_id->local_addr.sin_addr.s_addr),
30317 ntohs(cm_id->local_addr.sin_port));
30318
30319 - atomic_inc(&cm_connects);
30320 + atomic_inc_unchecked(&cm_connects);
30321 nesqp->active_conn = 1;
30322
30323 /* cache the cm_id in the qp */
30324 @@ -3406,7 +3406,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
30325 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
30326 return err;
30327 }
30328 - atomic_inc(&cm_listens_created);
30329 + atomic_inc_unchecked(&cm_listens_created);
30330 }
30331
30332 cm_id->add_ref(cm_id);
30333 @@ -3507,7 +3507,7 @@ static void cm_event_connected(struct nes_cm_event *event)
30334
30335 if (nesqp->destroyed)
30336 return;
30337 - atomic_inc(&cm_connecteds);
30338 + atomic_inc_unchecked(&cm_connecteds);
30339 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
30340 " local port 0x%04X. jiffies = %lu.\n",
30341 nesqp->hwqp.qp_id,
30342 @@ -3694,7 +3694,7 @@ static void cm_event_reset(struct nes_cm_event *event)
30343
30344 cm_id->add_ref(cm_id);
30345 ret = cm_id->event_handler(cm_id, &cm_event);
30346 - atomic_inc(&cm_closes);
30347 + atomic_inc_unchecked(&cm_closes);
30348 cm_event.event = IW_CM_EVENT_CLOSE;
30349 cm_event.status = 0;
30350 cm_event.provider_data = cm_id->provider_data;
30351 @@ -3730,7 +3730,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
30352 return;
30353 cm_id = cm_node->cm_id;
30354
30355 - atomic_inc(&cm_connect_reqs);
30356 + atomic_inc_unchecked(&cm_connect_reqs);
30357 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
30358 cm_node, cm_id, jiffies);
30359
30360 @@ -3770,7 +3770,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
30361 return;
30362 cm_id = cm_node->cm_id;
30363
30364 - atomic_inc(&cm_connect_reqs);
30365 + atomic_inc_unchecked(&cm_connect_reqs);
30366 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
30367 cm_node, cm_id, jiffies);
30368
30369 diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
30370 index b3b2a24..7bfaf1e 100644
30371 --- a/drivers/infiniband/hw/nes/nes_mgt.c
30372 +++ b/drivers/infiniband/hw/nes/nes_mgt.c
30373 @@ -40,8 +40,8 @@
30374 #include "nes.h"
30375 #include "nes_mgt.h"
30376
30377 -atomic_t pau_qps_created;
30378 -atomic_t pau_qps_destroyed;
30379 +atomic_unchecked_t pau_qps_created;
30380 +atomic_unchecked_t pau_qps_destroyed;
30381
30382 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
30383 {
30384 @@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
30385 {
30386 struct sk_buff *skb;
30387 unsigned long flags;
30388 - atomic_inc(&pau_qps_destroyed);
30389 + atomic_inc_unchecked(&pau_qps_destroyed);
30390
30391 /* Free packets that have not yet been forwarded */
30392 /* Lock is acquired by skb_dequeue when removing the skb */
30393 @@ -812,7 +812,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
30394 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
30395 skb_queue_head_init(&nesqp->pau_list);
30396 spin_lock_init(&nesqp->pau_lock);
30397 - atomic_inc(&pau_qps_created);
30398 + atomic_inc_unchecked(&pau_qps_created);
30399 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
30400 }
30401
30402 diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
30403 index c00d2f3..8834298 100644
30404 --- a/drivers/infiniband/hw/nes/nes_nic.c
30405 +++ b/drivers/infiniband/hw/nes/nes_nic.c
30406 @@ -1277,39 +1277,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
30407 target_stat_values[++index] = mh_detected;
30408 target_stat_values[++index] = mh_pauses_sent;
30409 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
30410 - target_stat_values[++index] = atomic_read(&cm_connects);
30411 - target_stat_values[++index] = atomic_read(&cm_accepts);
30412 - target_stat_values[++index] = atomic_read(&cm_disconnects);
30413 - target_stat_values[++index] = atomic_read(&cm_connecteds);
30414 - target_stat_values[++index] = atomic_read(&cm_connect_reqs);
30415 - target_stat_values[++index] = atomic_read(&cm_rejects);
30416 - target_stat_values[++index] = atomic_read(&mod_qp_timouts);
30417 - target_stat_values[++index] = atomic_read(&qps_created);
30418 - target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
30419 - target_stat_values[++index] = atomic_read(&qps_destroyed);
30420 - target_stat_values[++index] = atomic_read(&cm_closes);
30421 + target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
30422 + target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
30423 + target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
30424 + target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
30425 + target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
30426 + target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
30427 + target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
30428 + target_stat_values[++index] = atomic_read_unchecked(&qps_created);
30429 + target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
30430 + target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
30431 + target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
30432 target_stat_values[++index] = cm_packets_sent;
30433 target_stat_values[++index] = cm_packets_bounced;
30434 target_stat_values[++index] = cm_packets_created;
30435 target_stat_values[++index] = cm_packets_received;
30436 target_stat_values[++index] = cm_packets_dropped;
30437 target_stat_values[++index] = cm_packets_retrans;
30438 - target_stat_values[++index] = atomic_read(&cm_listens_created);
30439 - target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
30440 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
30441 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
30442 target_stat_values[++index] = cm_backlog_drops;
30443 - target_stat_values[++index] = atomic_read(&cm_loopbacks);
30444 - target_stat_values[++index] = atomic_read(&cm_nodes_created);
30445 - target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
30446 - target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
30447 - target_stat_values[++index] = atomic_read(&cm_resets_recvd);
30448 + target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
30449 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
30450 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
30451 + target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
30452 + target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
30453 target_stat_values[++index] = nesadapter->free_4kpbl;
30454 target_stat_values[++index] = nesadapter->free_256pbl;
30455 target_stat_values[++index] = int_mod_timer_init;
30456 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
30457 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
30458 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
30459 - target_stat_values[++index] = atomic_read(&pau_qps_created);
30460 - target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
30461 + target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
30462 + target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
30463 }
30464
30465 /**
30466 diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
30467 index 5095bc4..41e8fff 100644
30468 --- a/drivers/infiniband/hw/nes/nes_verbs.c
30469 +++ b/drivers/infiniband/hw/nes/nes_verbs.c
30470 @@ -46,9 +46,9 @@
30471
30472 #include <rdma/ib_umem.h>
30473
30474 -atomic_t mod_qp_timouts;
30475 -atomic_t qps_created;
30476 -atomic_t sw_qps_destroyed;
30477 +atomic_unchecked_t mod_qp_timouts;
30478 +atomic_unchecked_t qps_created;
30479 +atomic_unchecked_t sw_qps_destroyed;
30480
30481 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
30482
30483 @@ -1131,7 +1131,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
30484 if (init_attr->create_flags)
30485 return ERR_PTR(-EINVAL);
30486
30487 - atomic_inc(&qps_created);
30488 + atomic_inc_unchecked(&qps_created);
30489 switch (init_attr->qp_type) {
30490 case IB_QPT_RC:
30491 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
30492 @@ -1460,7 +1460,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
30493 struct iw_cm_event cm_event;
30494 int ret = 0;
30495
30496 - atomic_inc(&sw_qps_destroyed);
30497 + atomic_inc_unchecked(&sw_qps_destroyed);
30498 nesqp->destroyed = 1;
30499
30500 /* Blow away the connection if it exists. */
30501 diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
30502 index b881bdc..c2e360c 100644
30503 --- a/drivers/infiniband/hw/qib/qib.h
30504 +++ b/drivers/infiniband/hw/qib/qib.h
30505 @@ -51,6 +51,7 @@
30506 #include <linux/completion.h>
30507 #include <linux/kref.h>
30508 #include <linux/sched.h>
30509 +#include <linux/slab.h>
30510
30511 #include "qib_common.h"
30512 #include "qib_verbs.h"
30513 diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
30514 index c351aa4..e6967c2 100644
30515 --- a/drivers/input/gameport/gameport.c
30516 +++ b/drivers/input/gameport/gameport.c
30517 @@ -488,14 +488,14 @@ EXPORT_SYMBOL(gameport_set_phys);
30518 */
30519 static void gameport_init_port(struct gameport *gameport)
30520 {
30521 - static atomic_t gameport_no = ATOMIC_INIT(0);
30522 + static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
30523
30524 __module_get(THIS_MODULE);
30525
30526 mutex_init(&gameport->drv_mutex);
30527 device_initialize(&gameport->dev);
30528 dev_set_name(&gameport->dev, "gameport%lu",
30529 - (unsigned long)atomic_inc_return(&gameport_no) - 1);
30530 + (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
30531 gameport->dev.bus = &gameport_bus;
30532 gameport->dev.release = gameport_release_port;
30533 if (gameport->parent)
30534 diff --git a/drivers/input/input.c b/drivers/input/input.c
30535 index da38d97..2aa0b79 100644
30536 --- a/drivers/input/input.c
30537 +++ b/drivers/input/input.c
30538 @@ -1814,7 +1814,7 @@ static void input_cleanse_bitmasks(struct input_dev *dev)
30539 */
30540 int input_register_device(struct input_dev *dev)
30541 {
30542 - static atomic_t input_no = ATOMIC_INIT(0);
30543 + static atomic_unchecked_t input_no = ATOMIC_INIT(0);
30544 struct input_handler *handler;
30545 const char *path;
30546 int error;
30547 @@ -1851,7 +1851,7 @@ int input_register_device(struct input_dev *dev)
30548 dev->setkeycode = input_default_setkeycode;
30549
30550 dev_set_name(&dev->dev, "input%ld",
30551 - (unsigned long) atomic_inc_return(&input_no) - 1);
30552 + (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
30553
30554 error = device_add(&dev->dev);
30555 if (error)
30556 diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
30557 index b8d8611..7a4a04b 100644
30558 --- a/drivers/input/joystick/sidewinder.c
30559 +++ b/drivers/input/joystick/sidewinder.c
30560 @@ -30,6 +30,7 @@
30561 #include <linux/kernel.h>
30562 #include <linux/module.h>
30563 #include <linux/slab.h>
30564 +#include <linux/sched.h>
30565 #include <linux/init.h>
30566 #include <linux/input.h>
30567 #include <linux/gameport.h>
30568 diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
30569 index d728875..844c89b 100644
30570 --- a/drivers/input/joystick/xpad.c
30571 +++ b/drivers/input/joystick/xpad.c
30572 @@ -710,7 +710,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
30573
30574 static int xpad_led_probe(struct usb_xpad *xpad)
30575 {
30576 - static atomic_t led_seq = ATOMIC_INIT(0);
30577 + static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
30578 long led_no;
30579 struct xpad_led *led;
30580 struct led_classdev *led_cdev;
30581 @@ -723,7 +723,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
30582 if (!led)
30583 return -ENOMEM;
30584
30585 - led_no = (long)atomic_inc_return(&led_seq) - 1;
30586 + led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
30587
30588 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
30589 led->xpad = xpad;
30590 diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
30591 index 0110b5a..d3ad144 100644
30592 --- a/drivers/input/mousedev.c
30593 +++ b/drivers/input/mousedev.c
30594 @@ -763,7 +763,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
30595
30596 spin_unlock_irq(&client->packet_lock);
30597
30598 - if (copy_to_user(buffer, data, count))
30599 + if (count > sizeof(data) || copy_to_user(buffer, data, count))
30600 return -EFAULT;
30601
30602 return count;
30603 diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
30604 index ba70058..571d25d 100644
30605 --- a/drivers/input/serio/serio.c
30606 +++ b/drivers/input/serio/serio.c
30607 @@ -497,7 +497,7 @@ static void serio_release_port(struct device *dev)
30608 */
30609 static void serio_init_port(struct serio *serio)
30610 {
30611 - static atomic_t serio_no = ATOMIC_INIT(0);
30612 + static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
30613
30614 __module_get(THIS_MODULE);
30615
30616 @@ -508,7 +508,7 @@ static void serio_init_port(struct serio *serio)
30617 mutex_init(&serio->drv_mutex);
30618 device_initialize(&serio->dev);
30619 dev_set_name(&serio->dev, "serio%ld",
30620 - (long)atomic_inc_return(&serio_no) - 1);
30621 + (long)atomic_inc_return_unchecked(&serio_no) - 1);
30622 serio->dev.bus = &serio_bus;
30623 serio->dev.release = serio_release_port;
30624 serio->dev.groups = serio_device_attr_groups;
30625 diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
30626 index e44933d..9ba484a 100644
30627 --- a/drivers/isdn/capi/capi.c
30628 +++ b/drivers/isdn/capi/capi.c
30629 @@ -83,8 +83,8 @@ struct capiminor {
30630
30631 struct capi20_appl *ap;
30632 u32 ncci;
30633 - atomic_t datahandle;
30634 - atomic_t msgid;
30635 + atomic_unchecked_t datahandle;
30636 + atomic_unchecked_t msgid;
30637
30638 struct tty_port port;
30639 int ttyinstop;
30640 @@ -397,7 +397,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
30641 capimsg_setu16(s, 2, mp->ap->applid);
30642 capimsg_setu8 (s, 4, CAPI_DATA_B3);
30643 capimsg_setu8 (s, 5, CAPI_RESP);
30644 - capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
30645 + capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
30646 capimsg_setu32(s, 8, mp->ncci);
30647 capimsg_setu16(s, 12, datahandle);
30648 }
30649 @@ -518,14 +518,14 @@ static void handle_minor_send(struct capiminor *mp)
30650 mp->outbytes -= len;
30651 spin_unlock_bh(&mp->outlock);
30652
30653 - datahandle = atomic_inc_return(&mp->datahandle);
30654 + datahandle = atomic_inc_return_unchecked(&mp->datahandle);
30655 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
30656 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
30657 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
30658 capimsg_setu16(skb->data, 2, mp->ap->applid);
30659 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
30660 capimsg_setu8 (skb->data, 5, CAPI_REQ);
30661 - capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
30662 + capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
30663 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
30664 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
30665 capimsg_setu16(skb->data, 16, len); /* Data length */
30666 diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
30667 index db621db..825ea1a 100644
30668 --- a/drivers/isdn/gigaset/common.c
30669 +++ b/drivers/isdn/gigaset/common.c
30670 @@ -723,7 +723,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
30671 cs->commands_pending = 0;
30672 cs->cur_at_seq = 0;
30673 cs->gotfwver = -1;
30674 - cs->open_count = 0;
30675 + local_set(&cs->open_count, 0);
30676 cs->dev = NULL;
30677 cs->tty = NULL;
30678 cs->tty_dev = NULL;
30679 diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
30680 index 212efaf..f187c6b 100644
30681 --- a/drivers/isdn/gigaset/gigaset.h
30682 +++ b/drivers/isdn/gigaset/gigaset.h
30683 @@ -35,6 +35,7 @@
30684 #include <linux/tty_driver.h>
30685 #include <linux/list.h>
30686 #include <linux/atomic.h>
30687 +#include <asm/local.h>
30688
30689 #define GIG_VERSION {0, 5, 0, 0}
30690 #define GIG_COMPAT {0, 4, 0, 0}
30691 @@ -433,7 +434,7 @@ struct cardstate {
30692 spinlock_t cmdlock;
30693 unsigned curlen, cmdbytes;
30694
30695 - unsigned open_count;
30696 + local_t open_count;
30697 struct tty_struct *tty;
30698 struct tasklet_struct if_wake_tasklet;
30699 unsigned control_state;
30700 diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
30701 index ee0a549..a7c9798 100644
30702 --- a/drivers/isdn/gigaset/interface.c
30703 +++ b/drivers/isdn/gigaset/interface.c
30704 @@ -163,9 +163,7 @@ static int if_open(struct tty_struct *tty, struct file *filp)
30705 }
30706 tty->driver_data = cs;
30707
30708 - ++cs->open_count;
30709 -
30710 - if (cs->open_count == 1) {
30711 + if (local_inc_return(&cs->open_count) == 1) {
30712 spin_lock_irqsave(&cs->lock, flags);
30713 cs->tty = tty;
30714 spin_unlock_irqrestore(&cs->lock, flags);
30715 @@ -193,10 +191,10 @@ static void if_close(struct tty_struct *tty, struct file *filp)
30716
30717 if (!cs->connected)
30718 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
30719 - else if (!cs->open_count)
30720 + else if (!local_read(&cs->open_count))
30721 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30722 else {
30723 - if (!--cs->open_count) {
30724 + if (!local_dec_return(&cs->open_count)) {
30725 spin_lock_irqsave(&cs->lock, flags);
30726 cs->tty = NULL;
30727 spin_unlock_irqrestore(&cs->lock, flags);
30728 @@ -231,7 +229,7 @@ static int if_ioctl(struct tty_struct *tty,
30729 if (!cs->connected) {
30730 gig_dbg(DEBUG_IF, "not connected");
30731 retval = -ENODEV;
30732 - } else if (!cs->open_count)
30733 + } else if (!local_read(&cs->open_count))
30734 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30735 else {
30736 retval = 0;
30737 @@ -361,7 +359,7 @@ static int if_write(struct tty_struct *tty, const unsigned char *buf, int count)
30738 retval = -ENODEV;
30739 goto done;
30740 }
30741 - if (!cs->open_count) {
30742 + if (!local_read(&cs->open_count)) {
30743 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30744 retval = -ENODEV;
30745 goto done;
30746 @@ -414,7 +412,7 @@ static int if_write_room(struct tty_struct *tty)
30747 if (!cs->connected) {
30748 gig_dbg(DEBUG_IF, "not connected");
30749 retval = -ENODEV;
30750 - } else if (!cs->open_count)
30751 + } else if (!local_read(&cs->open_count))
30752 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30753 else if (cs->mstate != MS_LOCKED) {
30754 dev_warn(cs->dev, "can't write to unlocked device\n");
30755 @@ -444,7 +442,7 @@ static int if_chars_in_buffer(struct tty_struct *tty)
30756
30757 if (!cs->connected)
30758 gig_dbg(DEBUG_IF, "not connected");
30759 - else if (!cs->open_count)
30760 + else if (!local_read(&cs->open_count))
30761 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30762 else if (cs->mstate != MS_LOCKED)
30763 dev_warn(cs->dev, "can't write to unlocked device\n");
30764 @@ -472,7 +470,7 @@ static void if_throttle(struct tty_struct *tty)
30765
30766 if (!cs->connected)
30767 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
30768 - else if (!cs->open_count)
30769 + else if (!local_read(&cs->open_count))
30770 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30771 else
30772 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
30773 @@ -496,7 +494,7 @@ static void if_unthrottle(struct tty_struct *tty)
30774
30775 if (!cs->connected)
30776 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
30777 - else if (!cs->open_count)
30778 + else if (!local_read(&cs->open_count))
30779 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30780 else
30781 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
30782 @@ -527,7 +525,7 @@ static void if_set_termios(struct tty_struct *tty, struct ktermios *old)
30783 goto out;
30784 }
30785
30786 - if (!cs->open_count) {
30787 + if (!local_read(&cs->open_count)) {
30788 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30789 goto out;
30790 }
30791 diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
30792 index 2a57da59..e7a12ed 100644
30793 --- a/drivers/isdn/hardware/avm/b1.c
30794 +++ b/drivers/isdn/hardware/avm/b1.c
30795 @@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart * t4file)
30796 }
30797 if (left) {
30798 if (t4file->user) {
30799 - if (copy_from_user(buf, dp, left))
30800 + if (left > sizeof buf || copy_from_user(buf, dp, left))
30801 return -EFAULT;
30802 } else {
30803 memcpy(buf, dp, left);
30804 @@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart * config)
30805 }
30806 if (left) {
30807 if (config->user) {
30808 - if (copy_from_user(buf, dp, left))
30809 + if (left > sizeof buf || copy_from_user(buf, dp, left))
30810 return -EFAULT;
30811 } else {
30812 memcpy(buf, dp, left);
30813 diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h
30814 index 85784a7..a19ca98 100644
30815 --- a/drivers/isdn/hardware/eicon/divasync.h
30816 +++ b/drivers/isdn/hardware/eicon/divasync.h
30817 @@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
30818 } diva_didd_add_adapter_t;
30819 typedef struct _diva_didd_remove_adapter {
30820 IDI_CALL p_request;
30821 -} diva_didd_remove_adapter_t;
30822 +} __no_const diva_didd_remove_adapter_t;
30823 typedef struct _diva_didd_read_adapter_array {
30824 void * buffer;
30825 dword length;
30826 diff --git a/drivers/isdn/hardware/eicon/xdi_adapter.h b/drivers/isdn/hardware/eicon/xdi_adapter.h
30827 index a3bd163..8956575 100644
30828 --- a/drivers/isdn/hardware/eicon/xdi_adapter.h
30829 +++ b/drivers/isdn/hardware/eicon/xdi_adapter.h
30830 @@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
30831 typedef struct _diva_os_idi_adapter_interface {
30832 diva_init_card_proc_t cleanup_adapter_proc;
30833 diva_cmd_card_proc_t cmd_proc;
30834 -} diva_os_idi_adapter_interface_t;
30835 +} __no_const diva_os_idi_adapter_interface_t;
30836
30837 typedef struct _diva_os_xdi_adapter {
30838 struct list_head link;
30839 diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
30840 index 1f355bb..43f1fea 100644
30841 --- a/drivers/isdn/icn/icn.c
30842 +++ b/drivers/isdn/icn/icn.c
30843 @@ -1045,7 +1045,7 @@ icn_writecmd(const u_char * buf, int len, int user, icn_card * card)
30844 if (count > len)
30845 count = len;
30846 if (user) {
30847 - if (copy_from_user(msg, buf, count))
30848 + if (count > sizeof msg || copy_from_user(msg, buf, count))
30849 return -EFAULT;
30850 } else
30851 memcpy(msg, buf, count);
30852 diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
30853 index b5fdcb7..5b6c59f 100644
30854 --- a/drivers/lguest/core.c
30855 +++ b/drivers/lguest/core.c
30856 @@ -92,9 +92,17 @@ static __init int map_switcher(void)
30857 * it's worked so far. The end address needs +1 because __get_vm_area
30858 * allocates an extra guard page, so we need space for that.
30859 */
30860 +
30861 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
30862 + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
30863 + VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
30864 + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
30865 +#else
30866 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
30867 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
30868 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
30869 +#endif
30870 +
30871 if (!switcher_vma) {
30872 err = -ENOMEM;
30873 printk("lguest: could not map switcher pages high\n");
30874 @@ -119,7 +127,7 @@ static __init int map_switcher(void)
30875 * Now the Switcher is mapped at the right address, we can't fail!
30876 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
30877 */
30878 - memcpy(switcher_vma->addr, start_switcher_text,
30879 + memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
30880 end_switcher_text - start_switcher_text);
30881
30882 printk(KERN_INFO "lguest: mapped switcher at %p\n",
30883 diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
30884 index 65af42f..530c87a 100644
30885 --- a/drivers/lguest/x86/core.c
30886 +++ b/drivers/lguest/x86/core.c
30887 @@ -59,7 +59,7 @@ static struct {
30888 /* Offset from where switcher.S was compiled to where we've copied it */
30889 static unsigned long switcher_offset(void)
30890 {
30891 - return SWITCHER_ADDR - (unsigned long)start_switcher_text;
30892 + return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
30893 }
30894
30895 /* This cpu's struct lguest_pages. */
30896 @@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
30897 * These copies are pretty cheap, so we do them unconditionally: */
30898 /* Save the current Host top-level page directory.
30899 */
30900 +
30901 +#ifdef CONFIG_PAX_PER_CPU_PGD
30902 + pages->state.host_cr3 = read_cr3();
30903 +#else
30904 pages->state.host_cr3 = __pa(current->mm->pgd);
30905 +#endif
30906 +
30907 /*
30908 * Set up the Guest's page tables to see this CPU's pages (and no
30909 * other CPU's pages).
30910 @@ -472,7 +478,7 @@ void __init lguest_arch_host_init(void)
30911 * compiled-in switcher code and the high-mapped copy we just made.
30912 */
30913 for (i = 0; i < IDT_ENTRIES; i++)
30914 - default_idt_entries[i] += switcher_offset();
30915 + default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
30916
30917 /*
30918 * Set up the Switcher's per-cpu areas.
30919 @@ -555,7 +561,7 @@ void __init lguest_arch_host_init(void)
30920 * it will be undisturbed when we switch. To change %cs and jump we
30921 * need this structure to feed to Intel's "lcall" instruction.
30922 */
30923 - lguest_entry.offset = (long)switch_to_guest + switcher_offset();
30924 + lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
30925 lguest_entry.segment = LGUEST_CS;
30926
30927 /*
30928 diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
30929 index 40634b0..4f5855e 100644
30930 --- a/drivers/lguest/x86/switcher_32.S
30931 +++ b/drivers/lguest/x86/switcher_32.S
30932 @@ -87,6 +87,7 @@
30933 #include <asm/page.h>
30934 #include <asm/segment.h>
30935 #include <asm/lguest.h>
30936 +#include <asm/processor-flags.h>
30937
30938 // We mark the start of the code to copy
30939 // It's placed in .text tho it's never run here
30940 @@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
30941 // Changes type when we load it: damn Intel!
30942 // For after we switch over our page tables
30943 // That entry will be read-only: we'd crash.
30944 +
30945 +#ifdef CONFIG_PAX_KERNEXEC
30946 + mov %cr0, %edx
30947 + xor $X86_CR0_WP, %edx
30948 + mov %edx, %cr0
30949 +#endif
30950 +
30951 movl $(GDT_ENTRY_TSS*8), %edx
30952 ltr %dx
30953
30954 @@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
30955 // Let's clear it again for our return.
30956 // The GDT descriptor of the Host
30957 // Points to the table after two "size" bytes
30958 - movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
30959 + movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
30960 // Clear "used" from type field (byte 5, bit 2)
30961 - andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
30962 + andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
30963 +
30964 +#ifdef CONFIG_PAX_KERNEXEC
30965 + mov %cr0, %eax
30966 + xor $X86_CR0_WP, %eax
30967 + mov %eax, %cr0
30968 +#endif
30969
30970 // Once our page table's switched, the Guest is live!
30971 // The Host fades as we run this final step.
30972 @@ -295,13 +309,12 @@ deliver_to_host:
30973 // I consulted gcc, and it gave
30974 // These instructions, which I gladly credit:
30975 leal (%edx,%ebx,8), %eax
30976 - movzwl (%eax),%edx
30977 - movl 4(%eax), %eax
30978 - xorw %ax, %ax
30979 - orl %eax, %edx
30980 + movl 4(%eax), %edx
30981 + movw (%eax), %dx
30982 // Now the address of the handler's in %edx
30983 // We call it now: its "iret" drops us home.
30984 - jmp *%edx
30985 + ljmp $__KERNEL_CS, $1f
30986 +1: jmp *%edx
30987
30988 // Every interrupt can come to us here
30989 // But we must truly tell each apart.
30990 diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
30991 index 4daf9e5..b8d1d0f 100644
30992 --- a/drivers/macintosh/macio_asic.c
30993 +++ b/drivers/macintosh/macio_asic.c
30994 @@ -748,7 +748,7 @@ static void __devexit macio_pci_remove(struct pci_dev* pdev)
30995 * MacIO is matched against any Apple ID, it's probe() function
30996 * will then decide wether it applies or not
30997 */
30998 -static const struct pci_device_id __devinitdata pci_ids [] = { {
30999 +static const struct pci_device_id __devinitconst pci_ids [] = { {
31000 .vendor = PCI_VENDOR_ID_APPLE,
31001 .device = PCI_ANY_ID,
31002 .subvendor = PCI_ANY_ID,
31003 diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
31004 index 31c2dc2..a2de7a6 100644
31005 --- a/drivers/md/dm-ioctl.c
31006 +++ b/drivers/md/dm-ioctl.c
31007 @@ -1589,7 +1589,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
31008 cmd == DM_LIST_VERSIONS_CMD)
31009 return 0;
31010
31011 - if ((cmd == DM_DEV_CREATE_CMD)) {
31012 + if (cmd == DM_DEV_CREATE_CMD) {
31013 if (!*param->name) {
31014 DMWARN("name not supplied when creating device");
31015 return -EINVAL;
31016 diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
31017 index 9bfd057..01180bc 100644
31018 --- a/drivers/md/dm-raid1.c
31019 +++ b/drivers/md/dm-raid1.c
31020 @@ -40,7 +40,7 @@ enum dm_raid1_error {
31021
31022 struct mirror {
31023 struct mirror_set *ms;
31024 - atomic_t error_count;
31025 + atomic_unchecked_t error_count;
31026 unsigned long error_type;
31027 struct dm_dev *dev;
31028 sector_t offset;
31029 @@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
31030 struct mirror *m;
31031
31032 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
31033 - if (!atomic_read(&m->error_count))
31034 + if (!atomic_read_unchecked(&m->error_count))
31035 return m;
31036
31037 return NULL;
31038 @@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
31039 * simple way to tell if a device has encountered
31040 * errors.
31041 */
31042 - atomic_inc(&m->error_count);
31043 + atomic_inc_unchecked(&m->error_count);
31044
31045 if (test_and_set_bit(error_type, &m->error_type))
31046 return;
31047 @@ -408,7 +408,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
31048 struct mirror *m = get_default_mirror(ms);
31049
31050 do {
31051 - if (likely(!atomic_read(&m->error_count)))
31052 + if (likely(!atomic_read_unchecked(&m->error_count)))
31053 return m;
31054
31055 if (m-- == ms->mirror)
31056 @@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
31057 {
31058 struct mirror *default_mirror = get_default_mirror(m->ms);
31059
31060 - return !atomic_read(&default_mirror->error_count);
31061 + return !atomic_read_unchecked(&default_mirror->error_count);
31062 }
31063
31064 static int mirror_available(struct mirror_set *ms, struct bio *bio)
31065 @@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
31066 */
31067 if (likely(region_in_sync(ms, region, 1)))
31068 m = choose_mirror(ms, bio->bi_sector);
31069 - else if (m && atomic_read(&m->error_count))
31070 + else if (m && atomic_read_unchecked(&m->error_count))
31071 m = NULL;
31072
31073 if (likely(m))
31074 @@ -937,7 +937,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
31075 }
31076
31077 ms->mirror[mirror].ms = ms;
31078 - atomic_set(&(ms->mirror[mirror].error_count), 0);
31079 + atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
31080 ms->mirror[mirror].error_type = 0;
31081 ms->mirror[mirror].offset = offset;
31082
31083 @@ -1347,7 +1347,7 @@ static void mirror_resume(struct dm_target *ti)
31084 */
31085 static char device_status_char(struct mirror *m)
31086 {
31087 - if (!atomic_read(&(m->error_count)))
31088 + if (!atomic_read_unchecked(&(m->error_count)))
31089 return 'A';
31090
31091 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
31092 diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
31093 index 3d80cf0..b77cc47 100644
31094 --- a/drivers/md/dm-stripe.c
31095 +++ b/drivers/md/dm-stripe.c
31096 @@ -20,7 +20,7 @@ struct stripe {
31097 struct dm_dev *dev;
31098 sector_t physical_start;
31099
31100 - atomic_t error_count;
31101 + atomic_unchecked_t error_count;
31102 };
31103
31104 struct stripe_c {
31105 @@ -192,7 +192,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
31106 kfree(sc);
31107 return r;
31108 }
31109 - atomic_set(&(sc->stripe[i].error_count), 0);
31110 + atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
31111 }
31112
31113 ti->private = sc;
31114 @@ -314,7 +314,7 @@ static int stripe_status(struct dm_target *ti,
31115 DMEMIT("%d ", sc->stripes);
31116 for (i = 0; i < sc->stripes; i++) {
31117 DMEMIT("%s ", sc->stripe[i].dev->name);
31118 - buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
31119 + buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
31120 'D' : 'A';
31121 }
31122 buffer[i] = '\0';
31123 @@ -361,8 +361,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
31124 */
31125 for (i = 0; i < sc->stripes; i++)
31126 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
31127 - atomic_inc(&(sc->stripe[i].error_count));
31128 - if (atomic_read(&(sc->stripe[i].error_count)) <
31129 + atomic_inc_unchecked(&(sc->stripe[i].error_count));
31130 + if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
31131 DM_IO_ERROR_THRESHOLD)
31132 schedule_work(&sc->trigger_event);
31133 }
31134 diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
31135 index 8e91321..fd17aef 100644
31136 --- a/drivers/md/dm-table.c
31137 +++ b/drivers/md/dm-table.c
31138 @@ -391,7 +391,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
31139 if (!dev_size)
31140 return 0;
31141
31142 - if ((start >= dev_size) || (start + len > dev_size)) {
31143 + if ((start >= dev_size) || (len > dev_size - start)) {
31144 DMWARN("%s: %s too small for target: "
31145 "start=%llu, len=%llu, dev_size=%llu",
31146 dm_device_name(ti->table->md), bdevname(bdev, b),
31147 diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
31148 index 59c4f04..4c7b661 100644
31149 --- a/drivers/md/dm-thin-metadata.c
31150 +++ b/drivers/md/dm-thin-metadata.c
31151 @@ -431,7 +431,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
31152
31153 pmd->info.tm = tm;
31154 pmd->info.levels = 2;
31155 - pmd->info.value_type.context = pmd->data_sm;
31156 + pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
31157 pmd->info.value_type.size = sizeof(__le64);
31158 pmd->info.value_type.inc = data_block_inc;
31159 pmd->info.value_type.dec = data_block_dec;
31160 @@ -450,7 +450,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
31161
31162 pmd->bl_info.tm = tm;
31163 pmd->bl_info.levels = 1;
31164 - pmd->bl_info.value_type.context = pmd->data_sm;
31165 + pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
31166 pmd->bl_info.value_type.size = sizeof(__le64);
31167 pmd->bl_info.value_type.inc = data_block_inc;
31168 pmd->bl_info.value_type.dec = data_block_dec;
31169 diff --git a/drivers/md/dm.c b/drivers/md/dm.c
31170 index 4720f68..78d1df7 100644
31171 --- a/drivers/md/dm.c
31172 +++ b/drivers/md/dm.c
31173 @@ -177,9 +177,9 @@ struct mapped_device {
31174 /*
31175 * Event handling.
31176 */
31177 - atomic_t event_nr;
31178 + atomic_unchecked_t event_nr;
31179 wait_queue_head_t eventq;
31180 - atomic_t uevent_seq;
31181 + atomic_unchecked_t uevent_seq;
31182 struct list_head uevent_list;
31183 spinlock_t uevent_lock; /* Protect access to uevent_list */
31184
31185 @@ -1845,8 +1845,8 @@ static struct mapped_device *alloc_dev(int minor)
31186 rwlock_init(&md->map_lock);
31187 atomic_set(&md->holders, 1);
31188 atomic_set(&md->open_count, 0);
31189 - atomic_set(&md->event_nr, 0);
31190 - atomic_set(&md->uevent_seq, 0);
31191 + atomic_set_unchecked(&md->event_nr, 0);
31192 + atomic_set_unchecked(&md->uevent_seq, 0);
31193 INIT_LIST_HEAD(&md->uevent_list);
31194 spin_lock_init(&md->uevent_lock);
31195
31196 @@ -1980,7 +1980,7 @@ static void event_callback(void *context)
31197
31198 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
31199
31200 - atomic_inc(&md->event_nr);
31201 + atomic_inc_unchecked(&md->event_nr);
31202 wake_up(&md->eventq);
31203 }
31204
31205 @@ -2622,18 +2622,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
31206
31207 uint32_t dm_next_uevent_seq(struct mapped_device *md)
31208 {
31209 - return atomic_add_return(1, &md->uevent_seq);
31210 + return atomic_add_return_unchecked(1, &md->uevent_seq);
31211 }
31212
31213 uint32_t dm_get_event_nr(struct mapped_device *md)
31214 {
31215 - return atomic_read(&md->event_nr);
31216 + return atomic_read_unchecked(&md->event_nr);
31217 }
31218
31219 int dm_wait_event(struct mapped_device *md, int event_nr)
31220 {
31221 return wait_event_interruptible(md->eventq,
31222 - (event_nr != atomic_read(&md->event_nr)));
31223 + (event_nr != atomic_read_unchecked(&md->event_nr)));
31224 }
31225
31226 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
31227 diff --git a/drivers/md/md.c b/drivers/md/md.c
31228 index f47f1f8..b7f559e 100644
31229 --- a/drivers/md/md.c
31230 +++ b/drivers/md/md.c
31231 @@ -278,10 +278,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
31232 * start build, activate spare
31233 */
31234 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
31235 -static atomic_t md_event_count;
31236 +static atomic_unchecked_t md_event_count;
31237 void md_new_event(struct mddev *mddev)
31238 {
31239 - atomic_inc(&md_event_count);
31240 + atomic_inc_unchecked(&md_event_count);
31241 wake_up(&md_event_waiters);
31242 }
31243 EXPORT_SYMBOL_GPL(md_new_event);
31244 @@ -291,7 +291,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
31245 */
31246 static void md_new_event_inintr(struct mddev *mddev)
31247 {
31248 - atomic_inc(&md_event_count);
31249 + atomic_inc_unchecked(&md_event_count);
31250 wake_up(&md_event_waiters);
31251 }
31252
31253 @@ -1525,7 +1525,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
31254
31255 rdev->preferred_minor = 0xffff;
31256 rdev->data_offset = le64_to_cpu(sb->data_offset);
31257 - atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
31258 + atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
31259
31260 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
31261 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
31262 @@ -1742,7 +1742,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
31263 else
31264 sb->resync_offset = cpu_to_le64(0);
31265
31266 - sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
31267 + sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
31268
31269 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
31270 sb->size = cpu_to_le64(mddev->dev_sectors);
31271 @@ -2639,7 +2639,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
31272 static ssize_t
31273 errors_show(struct md_rdev *rdev, char *page)
31274 {
31275 - return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
31276 + return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
31277 }
31278
31279 static ssize_t
31280 @@ -2648,7 +2648,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
31281 char *e;
31282 unsigned long n = simple_strtoul(buf, &e, 10);
31283 if (*buf && (*e == 0 || *e == '\n')) {
31284 - atomic_set(&rdev->corrected_errors, n);
31285 + atomic_set_unchecked(&rdev->corrected_errors, n);
31286 return len;
31287 }
31288 return -EINVAL;
31289 @@ -3039,8 +3039,8 @@ int md_rdev_init(struct md_rdev *rdev)
31290 rdev->sb_loaded = 0;
31291 rdev->bb_page = NULL;
31292 atomic_set(&rdev->nr_pending, 0);
31293 - atomic_set(&rdev->read_errors, 0);
31294 - atomic_set(&rdev->corrected_errors, 0);
31295 + atomic_set_unchecked(&rdev->read_errors, 0);
31296 + atomic_set_unchecked(&rdev->corrected_errors, 0);
31297
31298 INIT_LIST_HEAD(&rdev->same_set);
31299 init_waitqueue_head(&rdev->blocked_wait);
31300 @@ -6683,7 +6683,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
31301
31302 spin_unlock(&pers_lock);
31303 seq_printf(seq, "\n");
31304 - seq->poll_event = atomic_read(&md_event_count);
31305 + seq->poll_event = atomic_read_unchecked(&md_event_count);
31306 return 0;
31307 }
31308 if (v == (void*)2) {
31309 @@ -6772,7 +6772,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
31310 chunk_kb ? "KB" : "B");
31311 if (bitmap->file) {
31312 seq_printf(seq, ", file: ");
31313 - seq_path(seq, &bitmap->file->f_path, " \t\n");
31314 + seq_path(seq, &bitmap->file->f_path, " \t\n\\");
31315 }
31316
31317 seq_printf(seq, "\n");
31318 @@ -6803,7 +6803,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
31319 return error;
31320
31321 seq = file->private_data;
31322 - seq->poll_event = atomic_read(&md_event_count);
31323 + seq->poll_event = atomic_read_unchecked(&md_event_count);
31324 return error;
31325 }
31326
31327 @@ -6817,7 +6817,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
31328 /* always allow read */
31329 mask = POLLIN | POLLRDNORM;
31330
31331 - if (seq->poll_event != atomic_read(&md_event_count))
31332 + if (seq->poll_event != atomic_read_unchecked(&md_event_count))
31333 mask |= POLLERR | POLLPRI;
31334 return mask;
31335 }
31336 @@ -6861,7 +6861,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
31337 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
31338 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
31339 (int)part_stat_read(&disk->part0, sectors[1]) -
31340 - atomic_read(&disk->sync_io);
31341 + atomic_read_unchecked(&disk->sync_io);
31342 /* sync IO will cause sync_io to increase before the disk_stats
31343 * as sync_io is counted when a request starts, and
31344 * disk_stats is counted when it completes.
31345 diff --git a/drivers/md/md.h b/drivers/md/md.h
31346 index cf742d9..7c7c745 100644
31347 --- a/drivers/md/md.h
31348 +++ b/drivers/md/md.h
31349 @@ -120,13 +120,13 @@ struct md_rdev {
31350 * only maintained for arrays that
31351 * support hot removal
31352 */
31353 - atomic_t read_errors; /* number of consecutive read errors that
31354 + atomic_unchecked_t read_errors; /* number of consecutive read errors that
31355 * we have tried to ignore.
31356 */
31357 struct timespec last_read_error; /* monotonic time since our
31358 * last read error
31359 */
31360 - atomic_t corrected_errors; /* number of corrected read errors,
31361 + atomic_unchecked_t corrected_errors; /* number of corrected read errors,
31362 * for reporting to userspace and storing
31363 * in superblock.
31364 */
31365 @@ -410,7 +410,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
31366
31367 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
31368 {
31369 - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
31370 + atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
31371 }
31372
31373 struct md_personality
31374 diff --git a/drivers/md/persistent-data/dm-space-map-checker.c b/drivers/md/persistent-data/dm-space-map-checker.c
31375 index 50ed53b..4f29d7d 100644
31376 --- a/drivers/md/persistent-data/dm-space-map-checker.c
31377 +++ b/drivers/md/persistent-data/dm-space-map-checker.c
31378 @@ -159,7 +159,7 @@ static void ca_destroy(struct count_array *ca)
31379 /*----------------------------------------------------------------*/
31380
31381 struct sm_checker {
31382 - struct dm_space_map sm;
31383 + dm_space_map_no_const sm;
31384
31385 struct count_array old_counts;
31386 struct count_array counts;
31387 diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
31388 index fc469ba..2d91555 100644
31389 --- a/drivers/md/persistent-data/dm-space-map-disk.c
31390 +++ b/drivers/md/persistent-data/dm-space-map-disk.c
31391 @@ -23,7 +23,7 @@
31392 * Space map interface.
31393 */
31394 struct sm_disk {
31395 - struct dm_space_map sm;
31396 + dm_space_map_no_const sm;
31397
31398 struct ll_disk ll;
31399 struct ll_disk old_ll;
31400 diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
31401 index e89ae5e..062e4c2 100644
31402 --- a/drivers/md/persistent-data/dm-space-map-metadata.c
31403 +++ b/drivers/md/persistent-data/dm-space-map-metadata.c
31404 @@ -43,7 +43,7 @@ struct block_op {
31405 };
31406
31407 struct sm_metadata {
31408 - struct dm_space_map sm;
31409 + dm_space_map_no_const sm;
31410
31411 struct ll_disk ll;
31412 struct ll_disk old_ll;
31413 diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
31414 index 1cbfc6b..56e1dbb 100644
31415 --- a/drivers/md/persistent-data/dm-space-map.h
31416 +++ b/drivers/md/persistent-data/dm-space-map.h
31417 @@ -60,6 +60,7 @@ struct dm_space_map {
31418 int (*root_size)(struct dm_space_map *sm, size_t *result);
31419 int (*copy_root)(struct dm_space_map *sm, void *copy_to_here_le, size_t len);
31420 };
31421 +typedef struct dm_space_map __no_const dm_space_map_no_const;
31422
31423 /*----------------------------------------------------------------*/
31424
31425 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
31426 index 7d9e071..015b1d5 100644
31427 --- a/drivers/md/raid1.c
31428 +++ b/drivers/md/raid1.c
31429 @@ -1568,7 +1568,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
31430 if (r1_sync_page_io(rdev, sect, s,
31431 bio->bi_io_vec[idx].bv_page,
31432 READ) != 0)
31433 - atomic_add(s, &rdev->corrected_errors);
31434 + atomic_add_unchecked(s, &rdev->corrected_errors);
31435 }
31436 sectors -= s;
31437 sect += s;
31438 @@ -1781,7 +1781,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
31439 test_bit(In_sync, &rdev->flags)) {
31440 if (r1_sync_page_io(rdev, sect, s,
31441 conf->tmppage, READ)) {
31442 - atomic_add(s, &rdev->corrected_errors);
31443 + atomic_add_unchecked(s, &rdev->corrected_errors);
31444 printk(KERN_INFO
31445 "md/raid1:%s: read error corrected "
31446 "(%d sectors at %llu on %s)\n",
31447 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
31448 index 685ddf3..955b087 100644
31449 --- a/drivers/md/raid10.c
31450 +++ b/drivers/md/raid10.c
31451 @@ -1440,7 +1440,7 @@ static void end_sync_read(struct bio *bio, int error)
31452 /* The write handler will notice the lack of
31453 * R10BIO_Uptodate and record any errors etc
31454 */
31455 - atomic_add(r10_bio->sectors,
31456 + atomic_add_unchecked(r10_bio->sectors,
31457 &conf->mirrors[d].rdev->corrected_errors);
31458
31459 /* for reconstruct, we always reschedule after a read.
31460 @@ -1740,7 +1740,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
31461 {
31462 struct timespec cur_time_mon;
31463 unsigned long hours_since_last;
31464 - unsigned int read_errors = atomic_read(&rdev->read_errors);
31465 + unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
31466
31467 ktime_get_ts(&cur_time_mon);
31468
31469 @@ -1762,9 +1762,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
31470 * overflowing the shift of read_errors by hours_since_last.
31471 */
31472 if (hours_since_last >= 8 * sizeof(read_errors))
31473 - atomic_set(&rdev->read_errors, 0);
31474 + atomic_set_unchecked(&rdev->read_errors, 0);
31475 else
31476 - atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
31477 + atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
31478 }
31479
31480 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
31481 @@ -1814,8 +1814,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
31482 return;
31483
31484 check_decay_read_errors(mddev, rdev);
31485 - atomic_inc(&rdev->read_errors);
31486 - if (atomic_read(&rdev->read_errors) > max_read_errors) {
31487 + atomic_inc_unchecked(&rdev->read_errors);
31488 + if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
31489 char b[BDEVNAME_SIZE];
31490 bdevname(rdev->bdev, b);
31491
31492 @@ -1823,7 +1823,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
31493 "md/raid10:%s: %s: Raid device exceeded "
31494 "read_error threshold [cur %d:max %d]\n",
31495 mdname(mddev), b,
31496 - atomic_read(&rdev->read_errors), max_read_errors);
31497 + atomic_read_unchecked(&rdev->read_errors), max_read_errors);
31498 printk(KERN_NOTICE
31499 "md/raid10:%s: %s: Failing raid device\n",
31500 mdname(mddev), b);
31501 @@ -1968,7 +1968,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
31502 (unsigned long long)(
31503 sect + rdev->data_offset),
31504 bdevname(rdev->bdev, b));
31505 - atomic_add(s, &rdev->corrected_errors);
31506 + atomic_add_unchecked(s, &rdev->corrected_errors);
31507 }
31508
31509 rdev_dec_pending(rdev, mddev);
31510 diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
31511 index 858fdbb..b2dac95 100644
31512 --- a/drivers/md/raid5.c
31513 +++ b/drivers/md/raid5.c
31514 @@ -1610,19 +1610,19 @@ static void raid5_end_read_request(struct bio * bi, int error)
31515 (unsigned long long)(sh->sector
31516 + rdev->data_offset),
31517 bdevname(rdev->bdev, b));
31518 - atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
31519 + atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
31520 clear_bit(R5_ReadError, &sh->dev[i].flags);
31521 clear_bit(R5_ReWrite, &sh->dev[i].flags);
31522 }
31523 - if (atomic_read(&conf->disks[i].rdev->read_errors))
31524 - atomic_set(&conf->disks[i].rdev->read_errors, 0);
31525 + if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
31526 + atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
31527 } else {
31528 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
31529 int retry = 0;
31530 rdev = conf->disks[i].rdev;
31531
31532 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
31533 - atomic_inc(&rdev->read_errors);
31534 + atomic_inc_unchecked(&rdev->read_errors);
31535 if (conf->mddev->degraded >= conf->max_degraded)
31536 printk_ratelimited(
31537 KERN_WARNING
31538 @@ -1642,7 +1642,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
31539 (unsigned long long)(sh->sector
31540 + rdev->data_offset),
31541 bdn);
31542 - else if (atomic_read(&rdev->read_errors)
31543 + else if (atomic_read_unchecked(&rdev->read_errors)
31544 > conf->max_nr_stripes)
31545 printk(KERN_WARNING
31546 "md/raid:%s: Too many read errors, failing device %s.\n",
31547 diff --git a/drivers/media/dvb/ddbridge/ddbridge-core.c b/drivers/media/dvb/ddbridge/ddbridge-core.c
31548 index ba9a643..e474ab5 100644
31549 --- a/drivers/media/dvb/ddbridge/ddbridge-core.c
31550 +++ b/drivers/media/dvb/ddbridge/ddbridge-core.c
31551 @@ -1678,7 +1678,7 @@ static struct ddb_info ddb_v6 = {
31552 .subvendor = _subvend, .subdevice = _subdev, \
31553 .driver_data = (unsigned long)&_driverdata }
31554
31555 -static const struct pci_device_id ddb_id_tbl[] __devinitdata = {
31556 +static const struct pci_device_id ddb_id_tbl[] __devinitconst = {
31557 DDB_ID(DDVID, 0x0002, DDVID, 0x0001, ddb_octopus),
31558 DDB_ID(DDVID, 0x0003, DDVID, 0x0001, ddb_octopus),
31559 DDB_ID(DDVID, 0x0003, DDVID, 0x0002, ddb_octopus_le),
31560 diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
31561 index a7d876f..8c21b61 100644
31562 --- a/drivers/media/dvb/dvb-core/dvb_demux.h
31563 +++ b/drivers/media/dvb/dvb-core/dvb_demux.h
31564 @@ -73,7 +73,7 @@ struct dvb_demux_feed {
31565 union {
31566 dmx_ts_cb ts;
31567 dmx_section_cb sec;
31568 - } cb;
31569 + } __no_const cb;
31570
31571 struct dvb_demux *demux;
31572 void *priv;
31573 diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
31574 index f732877..d38c35a 100644
31575 --- a/drivers/media/dvb/dvb-core/dvbdev.c
31576 +++ b/drivers/media/dvb/dvb-core/dvbdev.c
31577 @@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
31578 const struct dvb_device *template, void *priv, int type)
31579 {
31580 struct dvb_device *dvbdev;
31581 - struct file_operations *dvbdevfops;
31582 + file_operations_no_const *dvbdevfops;
31583 struct device *clsdev;
31584 int minor;
31585 int id;
31586 diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
31587 index 9f2a02c..5920f88 100644
31588 --- a/drivers/media/dvb/dvb-usb/cxusb.c
31589 +++ b/drivers/media/dvb/dvb-usb/cxusb.c
31590 @@ -1069,7 +1069,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
31591 struct dib0700_adapter_state {
31592 int (*set_param_save) (struct dvb_frontend *,
31593 struct dvb_frontend_parameters *);
31594 -};
31595 +} __no_const;
31596
31597 static int dib7070_set_param_override(struct dvb_frontend *fe,
31598 struct dvb_frontend_parameters *fep)
31599 diff --git a/drivers/media/dvb/dvb-usb/dw2102.c b/drivers/media/dvb/dvb-usb/dw2102.c
31600 index f103ec1..5e8968b 100644
31601 --- a/drivers/media/dvb/dvb-usb/dw2102.c
31602 +++ b/drivers/media/dvb/dvb-usb/dw2102.c
31603 @@ -95,7 +95,7 @@ struct su3000_state {
31604
31605 struct s6x0_state {
31606 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
31607 -};
31608 +} __no_const;
31609
31610 /* debug */
31611 static int dvb_usb_dw2102_debug;
31612 diff --git a/drivers/media/dvb/frontends/dib3000.h b/drivers/media/dvb/frontends/dib3000.h
31613 index 404f63a..4796533 100644
31614 --- a/drivers/media/dvb/frontends/dib3000.h
31615 +++ b/drivers/media/dvb/frontends/dib3000.h
31616 @@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
31617 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
31618 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
31619 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
31620 -};
31621 +} __no_const;
31622
31623 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
31624 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
31625 diff --git a/drivers/media/dvb/frontends/ds3000.c b/drivers/media/dvb/frontends/ds3000.c
31626 index 90bf573..e8463da 100644
31627 --- a/drivers/media/dvb/frontends/ds3000.c
31628 +++ b/drivers/media/dvb/frontends/ds3000.c
31629 @@ -1210,7 +1210,7 @@ static int ds3000_set_frontend(struct dvb_frontend *fe,
31630
31631 for (i = 0; i < 30 ; i++) {
31632 ds3000_read_status(fe, &status);
31633 - if (status && FE_HAS_LOCK)
31634 + if (status & FE_HAS_LOCK)
31635 break;
31636
31637 msleep(10);
31638 diff --git a/drivers/media/dvb/ngene/ngene-cards.c b/drivers/media/dvb/ngene/ngene-cards.c
31639 index 0564192..75b16f5 100644
31640 --- a/drivers/media/dvb/ngene/ngene-cards.c
31641 +++ b/drivers/media/dvb/ngene/ngene-cards.c
31642 @@ -477,7 +477,7 @@ static struct ngene_info ngene_info_m780 = {
31643
31644 /****************************************************************************/
31645
31646 -static const struct pci_device_id ngene_id_tbl[] __devinitdata = {
31647 +static const struct pci_device_id ngene_id_tbl[] __devinitconst = {
31648 NGENE_ID(0x18c3, 0xabc3, ngene_info_cineS2),
31649 NGENE_ID(0x18c3, 0xabc4, ngene_info_cineS2),
31650 NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2),
31651 diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
31652 index 16a089f..ab1667d 100644
31653 --- a/drivers/media/radio/radio-cadet.c
31654 +++ b/drivers/media/radio/radio-cadet.c
31655 @@ -326,6 +326,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
31656 unsigned char readbuf[RDS_BUFFER];
31657 int i = 0;
31658
31659 + if (count > RDS_BUFFER)
31660 + return -EFAULT;
31661 mutex_lock(&dev->lock);
31662 if (dev->rdsstat == 0) {
31663 dev->rdsstat = 1;
31664 diff --git a/drivers/media/rc/redrat3.c b/drivers/media/rc/redrat3.c
31665 index 61287fc..8b08712 100644
31666 --- a/drivers/media/rc/redrat3.c
31667 +++ b/drivers/media/rc/redrat3.c
31668 @@ -905,7 +905,7 @@ static int redrat3_set_tx_carrier(struct rc_dev *dev, u32 carrier)
31669 return carrier;
31670 }
31671
31672 -static int redrat3_transmit_ir(struct rc_dev *rcdev, int *txbuf, u32 n)
31673 +static int redrat3_transmit_ir(struct rc_dev *rcdev, unsigned *txbuf, u32 n)
31674 {
31675 struct redrat3_dev *rr3 = rcdev->priv;
31676 struct device *dev = rr3->dev;
31677 diff --git a/drivers/media/video/au0828/au0828.h b/drivers/media/video/au0828/au0828.h
31678 index 9cde353..8c6a1c3 100644
31679 --- a/drivers/media/video/au0828/au0828.h
31680 +++ b/drivers/media/video/au0828/au0828.h
31681 @@ -191,7 +191,7 @@ struct au0828_dev {
31682
31683 /* I2C */
31684 struct i2c_adapter i2c_adap;
31685 - struct i2c_algorithm i2c_algo;
31686 + i2c_algorithm_no_const i2c_algo;
31687 struct i2c_client i2c_client;
31688 u32 i2c_rc;
31689
31690 diff --git a/drivers/media/video/cx88/cx88-alsa.c b/drivers/media/video/cx88/cx88-alsa.c
31691 index 68d1240..46b32eb 100644
31692 --- a/drivers/media/video/cx88/cx88-alsa.c
31693 +++ b/drivers/media/video/cx88/cx88-alsa.c
31694 @@ -766,7 +766,7 @@ static struct snd_kcontrol_new snd_cx88_alc_switch = {
31695 * Only boards with eeprom and byte 1 at eeprom=1 have it
31696 */
31697
31698 -static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitdata = {
31699 +static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitconst = {
31700 {0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
31701 {0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
31702 {0, }
31703 diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
31704 index 305e6aa..0143317 100644
31705 --- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
31706 +++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
31707 @@ -196,7 +196,7 @@ struct pvr2_hdw {
31708
31709 /* I2C stuff */
31710 struct i2c_adapter i2c_adap;
31711 - struct i2c_algorithm i2c_algo;
31712 + i2c_algorithm_no_const i2c_algo;
31713 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
31714 int i2c_cx25840_hack_state;
31715 int i2c_linked;
31716 diff --git a/drivers/media/video/timblogiw.c b/drivers/media/video/timblogiw.c
31717 index a0895bf..b7ebb1b 100644
31718 --- a/drivers/media/video/timblogiw.c
31719 +++ b/drivers/media/video/timblogiw.c
31720 @@ -745,7 +745,7 @@ static int timblogiw_mmap(struct file *file, struct vm_area_struct *vma)
31721
31722 /* Platform device functions */
31723
31724 -static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
31725 +static __devinitconst v4l2_ioctl_ops_no_const timblogiw_ioctl_ops = {
31726 .vidioc_querycap = timblogiw_querycap,
31727 .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
31728 .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
31729 @@ -767,7 +767,7 @@ static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
31730 .vidioc_enum_framesizes = timblogiw_enum_framesizes,
31731 };
31732
31733 -static __devinitconst struct v4l2_file_operations timblogiw_fops = {
31734 +static __devinitconst v4l2_file_operations_no_const timblogiw_fops = {
31735 .owner = THIS_MODULE,
31736 .open = timblogiw_open,
31737 .release = timblogiw_close,
31738 diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
31739 index e9c6a60..daf6a33 100644
31740 --- a/drivers/message/fusion/mptbase.c
31741 +++ b/drivers/message/fusion/mptbase.c
31742 @@ -6753,8 +6753,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
31743 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
31744 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
31745
31746 +#ifdef CONFIG_GRKERNSEC_HIDESYM
31747 + seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
31748 +#else
31749 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
31750 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
31751 +#endif
31752 +
31753 /*
31754 * Rounding UP to nearest 4-kB boundary here...
31755 */
31756 diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
31757 index 9d95042..b808101 100644
31758 --- a/drivers/message/fusion/mptsas.c
31759 +++ b/drivers/message/fusion/mptsas.c
31760 @@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
31761 return 0;
31762 }
31763
31764 +static inline void
31765 +mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
31766 +{
31767 + if (phy_info->port_details) {
31768 + phy_info->port_details->rphy = rphy;
31769 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
31770 + ioc->name, rphy));
31771 + }
31772 +
31773 + if (rphy) {
31774 + dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
31775 + &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
31776 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
31777 + ioc->name, rphy, rphy->dev.release));
31778 + }
31779 +}
31780 +
31781 /* no mutex */
31782 static void
31783 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
31784 @@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
31785 return NULL;
31786 }
31787
31788 -static inline void
31789 -mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
31790 -{
31791 - if (phy_info->port_details) {
31792 - phy_info->port_details->rphy = rphy;
31793 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
31794 - ioc->name, rphy));
31795 - }
31796 -
31797 - if (rphy) {
31798 - dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
31799 - &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
31800 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
31801 - ioc->name, rphy, rphy->dev.release));
31802 - }
31803 -}
31804 -
31805 static inline struct sas_port *
31806 mptsas_get_port(struct mptsas_phyinfo *phy_info)
31807 {
31808 diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
31809 index 0c3ced7..1fe34ec 100644
31810 --- a/drivers/message/fusion/mptscsih.c
31811 +++ b/drivers/message/fusion/mptscsih.c
31812 @@ -1270,15 +1270,16 @@ mptscsih_info(struct Scsi_Host *SChost)
31813
31814 h = shost_priv(SChost);
31815
31816 - if (h) {
31817 - if (h->info_kbuf == NULL)
31818 - if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
31819 - return h->info_kbuf;
31820 - h->info_kbuf[0] = '\0';
31821 + if (!h)
31822 + return NULL;
31823
31824 - mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
31825 - h->info_kbuf[size-1] = '\0';
31826 - }
31827 + if (h->info_kbuf == NULL)
31828 + if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
31829 + return h->info_kbuf;
31830 + h->info_kbuf[0] = '\0';
31831 +
31832 + mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
31833 + h->info_kbuf[size-1] = '\0';
31834
31835 return h->info_kbuf;
31836 }
31837 diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
31838 index 07dbeaf..5533142 100644
31839 --- a/drivers/message/i2o/i2o_proc.c
31840 +++ b/drivers/message/i2o/i2o_proc.c
31841 @@ -255,13 +255,6 @@ static char *scsi_devices[] = {
31842 "Array Controller Device"
31843 };
31844
31845 -static char *chtostr(u8 * chars, int n)
31846 -{
31847 - char tmp[256];
31848 - tmp[0] = 0;
31849 - return strncat(tmp, (char *)chars, n);
31850 -}
31851 -
31852 static int i2o_report_query_status(struct seq_file *seq, int block_status,
31853 char *group)
31854 {
31855 @@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
31856
31857 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
31858 seq_printf(seq, "%-#8x", ddm_table.module_id);
31859 - seq_printf(seq, "%-29s",
31860 - chtostr(ddm_table.module_name_version, 28));
31861 + seq_printf(seq, "%-.28s", ddm_table.module_name_version);
31862 seq_printf(seq, "%9d ", ddm_table.data_size);
31863 seq_printf(seq, "%8d", ddm_table.code_size);
31864
31865 @@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
31866
31867 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
31868 seq_printf(seq, "%-#8x", dst->module_id);
31869 - seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
31870 - seq_printf(seq, "%-9s", chtostr(dst->date, 8));
31871 + seq_printf(seq, "%-.28s", dst->module_name_version);
31872 + seq_printf(seq, "%-.8s", dst->date);
31873 seq_printf(seq, "%8d ", dst->module_size);
31874 seq_printf(seq, "%8d ", dst->mpb_size);
31875 seq_printf(seq, "0x%04x", dst->module_flags);
31876 @@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
31877 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
31878 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
31879 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
31880 - seq_printf(seq, "Vendor info : %s\n",
31881 - chtostr((u8 *) (work32 + 2), 16));
31882 - seq_printf(seq, "Product info : %s\n",
31883 - chtostr((u8 *) (work32 + 6), 16));
31884 - seq_printf(seq, "Description : %s\n",
31885 - chtostr((u8 *) (work32 + 10), 16));
31886 - seq_printf(seq, "Product rev. : %s\n",
31887 - chtostr((u8 *) (work32 + 14), 8));
31888 + seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
31889 + seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
31890 + seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
31891 + seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
31892
31893 seq_printf(seq, "Serial number : ");
31894 print_serial_number(seq, (u8 *) (work32 + 16),
31895 @@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
31896 }
31897
31898 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
31899 - seq_printf(seq, "Module name : %s\n",
31900 - chtostr(result.module_name, 24));
31901 - seq_printf(seq, "Module revision : %s\n",
31902 - chtostr(result.module_rev, 8));
31903 + seq_printf(seq, "Module name : %.24s\n", result.module_name);
31904 + seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
31905
31906 seq_printf(seq, "Serial number : ");
31907 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
31908 @@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
31909 return 0;
31910 }
31911
31912 - seq_printf(seq, "Device name : %s\n",
31913 - chtostr(result.device_name, 64));
31914 - seq_printf(seq, "Service name : %s\n",
31915 - chtostr(result.service_name, 64));
31916 - seq_printf(seq, "Physical name : %s\n",
31917 - chtostr(result.physical_location, 64));
31918 - seq_printf(seq, "Instance number : %s\n",
31919 - chtostr(result.instance_number, 4));
31920 + seq_printf(seq, "Device name : %.64s\n", result.device_name);
31921 + seq_printf(seq, "Service name : %.64s\n", result.service_name);
31922 + seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
31923 + seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
31924
31925 return 0;
31926 }
31927 diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
31928 index a8c08f3..155fe3d 100644
31929 --- a/drivers/message/i2o/iop.c
31930 +++ b/drivers/message/i2o/iop.c
31931 @@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
31932
31933 spin_lock_irqsave(&c->context_list_lock, flags);
31934
31935 - if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
31936 - atomic_inc(&c->context_list_counter);
31937 + if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
31938 + atomic_inc_unchecked(&c->context_list_counter);
31939
31940 - entry->context = atomic_read(&c->context_list_counter);
31941 + entry->context = atomic_read_unchecked(&c->context_list_counter);
31942
31943 list_add(&entry->list, &c->context_list);
31944
31945 @@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void)
31946
31947 #if BITS_PER_LONG == 64
31948 spin_lock_init(&c->context_list_lock);
31949 - atomic_set(&c->context_list_counter, 0);
31950 + atomic_set_unchecked(&c->context_list_counter, 0);
31951 INIT_LIST_HEAD(&c->context_list);
31952 #endif
31953
31954 diff --git a/drivers/mfd/abx500-core.c b/drivers/mfd/abx500-core.c
31955 index 7ce65f4..e66e9bc 100644
31956 --- a/drivers/mfd/abx500-core.c
31957 +++ b/drivers/mfd/abx500-core.c
31958 @@ -15,7 +15,7 @@ static LIST_HEAD(abx500_list);
31959
31960 struct abx500_device_entry {
31961 struct list_head list;
31962 - struct abx500_ops ops;
31963 + abx500_ops_no_const ops;
31964 struct device *dev;
31965 };
31966
31967 diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
31968 index 5c2a06a..8fa077c 100644
31969 --- a/drivers/mfd/janz-cmodio.c
31970 +++ b/drivers/mfd/janz-cmodio.c
31971 @@ -13,6 +13,7 @@
31972
31973 #include <linux/kernel.h>
31974 #include <linux/module.h>
31975 +#include <linux/slab.h>
31976 #include <linux/init.h>
31977 #include <linux/pci.h>
31978 #include <linux/interrupt.h>
31979 diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
31980 index 29d12a7..f900ba4 100644
31981 --- a/drivers/misc/lis3lv02d/lis3lv02d.c
31982 +++ b/drivers/misc/lis3lv02d/lis3lv02d.c
31983 @@ -464,7 +464,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
31984 * the lid is closed. This leads to interrupts as soon as a little move
31985 * is done.
31986 */
31987 - atomic_inc(&lis3->count);
31988 + atomic_inc_unchecked(&lis3->count);
31989
31990 wake_up_interruptible(&lis3->misc_wait);
31991 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
31992 @@ -550,7 +550,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
31993 if (lis3->pm_dev)
31994 pm_runtime_get_sync(lis3->pm_dev);
31995
31996 - atomic_set(&lis3->count, 0);
31997 + atomic_set_unchecked(&lis3->count, 0);
31998 return 0;
31999 }
32000
32001 @@ -583,7 +583,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
32002 add_wait_queue(&lis3->misc_wait, &wait);
32003 while (true) {
32004 set_current_state(TASK_INTERRUPTIBLE);
32005 - data = atomic_xchg(&lis3->count, 0);
32006 + data = atomic_xchg_unchecked(&lis3->count, 0);
32007 if (data)
32008 break;
32009
32010 @@ -624,7 +624,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
32011 struct lis3lv02d, miscdev);
32012
32013 poll_wait(file, &lis3->misc_wait, wait);
32014 - if (atomic_read(&lis3->count))
32015 + if (atomic_read_unchecked(&lis3->count))
32016 return POLLIN | POLLRDNORM;
32017 return 0;
32018 }
32019 diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
32020 index 2b1482a..5d33616 100644
32021 --- a/drivers/misc/lis3lv02d/lis3lv02d.h
32022 +++ b/drivers/misc/lis3lv02d/lis3lv02d.h
32023 @@ -266,7 +266,7 @@ struct lis3lv02d {
32024 struct input_polled_dev *idev; /* input device */
32025 struct platform_device *pdev; /* platform device */
32026 struct regulator_bulk_data regulators[2];
32027 - atomic_t count; /* interrupt count after last read */
32028 + atomic_unchecked_t count; /* interrupt count after last read */
32029 union axis_conversion ac; /* hw -> logical axis */
32030 int mapped_btns[3];
32031
32032 diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
32033 index 2f30bad..c4c13d0 100644
32034 --- a/drivers/misc/sgi-gru/gruhandles.c
32035 +++ b/drivers/misc/sgi-gru/gruhandles.c
32036 @@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
32037 unsigned long nsec;
32038
32039 nsec = CLKS2NSEC(clks);
32040 - atomic_long_inc(&mcs_op_statistics[op].count);
32041 - atomic_long_add(nsec, &mcs_op_statistics[op].total);
32042 + atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
32043 + atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
32044 if (mcs_op_statistics[op].max < nsec)
32045 mcs_op_statistics[op].max = nsec;
32046 }
32047 diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
32048 index 7768b87..f8aac38 100644
32049 --- a/drivers/misc/sgi-gru/gruprocfs.c
32050 +++ b/drivers/misc/sgi-gru/gruprocfs.c
32051 @@ -32,9 +32,9 @@
32052
32053 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
32054
32055 -static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
32056 +static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
32057 {
32058 - unsigned long val = atomic_long_read(v);
32059 + unsigned long val = atomic_long_read_unchecked(v);
32060
32061 seq_printf(s, "%16lu %s\n", val, id);
32062 }
32063 @@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
32064
32065 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
32066 for (op = 0; op < mcsop_last; op++) {
32067 - count = atomic_long_read(&mcs_op_statistics[op].count);
32068 - total = atomic_long_read(&mcs_op_statistics[op].total);
32069 + count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
32070 + total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
32071 max = mcs_op_statistics[op].max;
32072 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
32073 count ? total / count : 0, max);
32074 diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
32075 index 5c3ce24..4915ccb 100644
32076 --- a/drivers/misc/sgi-gru/grutables.h
32077 +++ b/drivers/misc/sgi-gru/grutables.h
32078 @@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
32079 * GRU statistics.
32080 */
32081 struct gru_stats_s {
32082 - atomic_long_t vdata_alloc;
32083 - atomic_long_t vdata_free;
32084 - atomic_long_t gts_alloc;
32085 - atomic_long_t gts_free;
32086 - atomic_long_t gms_alloc;
32087 - atomic_long_t gms_free;
32088 - atomic_long_t gts_double_allocate;
32089 - atomic_long_t assign_context;
32090 - atomic_long_t assign_context_failed;
32091 - atomic_long_t free_context;
32092 - atomic_long_t load_user_context;
32093 - atomic_long_t load_kernel_context;
32094 - atomic_long_t lock_kernel_context;
32095 - atomic_long_t unlock_kernel_context;
32096 - atomic_long_t steal_user_context;
32097 - atomic_long_t steal_kernel_context;
32098 - atomic_long_t steal_context_failed;
32099 - atomic_long_t nopfn;
32100 - atomic_long_t asid_new;
32101 - atomic_long_t asid_next;
32102 - atomic_long_t asid_wrap;
32103 - atomic_long_t asid_reuse;
32104 - atomic_long_t intr;
32105 - atomic_long_t intr_cbr;
32106 - atomic_long_t intr_tfh;
32107 - atomic_long_t intr_spurious;
32108 - atomic_long_t intr_mm_lock_failed;
32109 - atomic_long_t call_os;
32110 - atomic_long_t call_os_wait_queue;
32111 - atomic_long_t user_flush_tlb;
32112 - atomic_long_t user_unload_context;
32113 - atomic_long_t user_exception;
32114 - atomic_long_t set_context_option;
32115 - atomic_long_t check_context_retarget_intr;
32116 - atomic_long_t check_context_unload;
32117 - atomic_long_t tlb_dropin;
32118 - atomic_long_t tlb_preload_page;
32119 - atomic_long_t tlb_dropin_fail_no_asid;
32120 - atomic_long_t tlb_dropin_fail_upm;
32121 - atomic_long_t tlb_dropin_fail_invalid;
32122 - atomic_long_t tlb_dropin_fail_range_active;
32123 - atomic_long_t tlb_dropin_fail_idle;
32124 - atomic_long_t tlb_dropin_fail_fmm;
32125 - atomic_long_t tlb_dropin_fail_no_exception;
32126 - atomic_long_t tfh_stale_on_fault;
32127 - atomic_long_t mmu_invalidate_range;
32128 - atomic_long_t mmu_invalidate_page;
32129 - atomic_long_t flush_tlb;
32130 - atomic_long_t flush_tlb_gru;
32131 - atomic_long_t flush_tlb_gru_tgh;
32132 - atomic_long_t flush_tlb_gru_zero_asid;
32133 + atomic_long_unchecked_t vdata_alloc;
32134 + atomic_long_unchecked_t vdata_free;
32135 + atomic_long_unchecked_t gts_alloc;
32136 + atomic_long_unchecked_t gts_free;
32137 + atomic_long_unchecked_t gms_alloc;
32138 + atomic_long_unchecked_t gms_free;
32139 + atomic_long_unchecked_t gts_double_allocate;
32140 + atomic_long_unchecked_t assign_context;
32141 + atomic_long_unchecked_t assign_context_failed;
32142 + atomic_long_unchecked_t free_context;
32143 + atomic_long_unchecked_t load_user_context;
32144 + atomic_long_unchecked_t load_kernel_context;
32145 + atomic_long_unchecked_t lock_kernel_context;
32146 + atomic_long_unchecked_t unlock_kernel_context;
32147 + atomic_long_unchecked_t steal_user_context;
32148 + atomic_long_unchecked_t steal_kernel_context;
32149 + atomic_long_unchecked_t steal_context_failed;
32150 + atomic_long_unchecked_t nopfn;
32151 + atomic_long_unchecked_t asid_new;
32152 + atomic_long_unchecked_t asid_next;
32153 + atomic_long_unchecked_t asid_wrap;
32154 + atomic_long_unchecked_t asid_reuse;
32155 + atomic_long_unchecked_t intr;
32156 + atomic_long_unchecked_t intr_cbr;
32157 + atomic_long_unchecked_t intr_tfh;
32158 + atomic_long_unchecked_t intr_spurious;
32159 + atomic_long_unchecked_t intr_mm_lock_failed;
32160 + atomic_long_unchecked_t call_os;
32161 + atomic_long_unchecked_t call_os_wait_queue;
32162 + atomic_long_unchecked_t user_flush_tlb;
32163 + atomic_long_unchecked_t user_unload_context;
32164 + atomic_long_unchecked_t user_exception;
32165 + atomic_long_unchecked_t set_context_option;
32166 + atomic_long_unchecked_t check_context_retarget_intr;
32167 + atomic_long_unchecked_t check_context_unload;
32168 + atomic_long_unchecked_t tlb_dropin;
32169 + atomic_long_unchecked_t tlb_preload_page;
32170 + atomic_long_unchecked_t tlb_dropin_fail_no_asid;
32171 + atomic_long_unchecked_t tlb_dropin_fail_upm;
32172 + atomic_long_unchecked_t tlb_dropin_fail_invalid;
32173 + atomic_long_unchecked_t tlb_dropin_fail_range_active;
32174 + atomic_long_unchecked_t tlb_dropin_fail_idle;
32175 + atomic_long_unchecked_t tlb_dropin_fail_fmm;
32176 + atomic_long_unchecked_t tlb_dropin_fail_no_exception;
32177 + atomic_long_unchecked_t tfh_stale_on_fault;
32178 + atomic_long_unchecked_t mmu_invalidate_range;
32179 + atomic_long_unchecked_t mmu_invalidate_page;
32180 + atomic_long_unchecked_t flush_tlb;
32181 + atomic_long_unchecked_t flush_tlb_gru;
32182 + atomic_long_unchecked_t flush_tlb_gru_tgh;
32183 + atomic_long_unchecked_t flush_tlb_gru_zero_asid;
32184
32185 - atomic_long_t copy_gpa;
32186 - atomic_long_t read_gpa;
32187 + atomic_long_unchecked_t copy_gpa;
32188 + atomic_long_unchecked_t read_gpa;
32189
32190 - atomic_long_t mesq_receive;
32191 - atomic_long_t mesq_receive_none;
32192 - atomic_long_t mesq_send;
32193 - atomic_long_t mesq_send_failed;
32194 - atomic_long_t mesq_noop;
32195 - atomic_long_t mesq_send_unexpected_error;
32196 - atomic_long_t mesq_send_lb_overflow;
32197 - atomic_long_t mesq_send_qlimit_reached;
32198 - atomic_long_t mesq_send_amo_nacked;
32199 - atomic_long_t mesq_send_put_nacked;
32200 - atomic_long_t mesq_page_overflow;
32201 - atomic_long_t mesq_qf_locked;
32202 - atomic_long_t mesq_qf_noop_not_full;
32203 - atomic_long_t mesq_qf_switch_head_failed;
32204 - atomic_long_t mesq_qf_unexpected_error;
32205 - atomic_long_t mesq_noop_unexpected_error;
32206 - atomic_long_t mesq_noop_lb_overflow;
32207 - atomic_long_t mesq_noop_qlimit_reached;
32208 - atomic_long_t mesq_noop_amo_nacked;
32209 - atomic_long_t mesq_noop_put_nacked;
32210 - atomic_long_t mesq_noop_page_overflow;
32211 + atomic_long_unchecked_t mesq_receive;
32212 + atomic_long_unchecked_t mesq_receive_none;
32213 + atomic_long_unchecked_t mesq_send;
32214 + atomic_long_unchecked_t mesq_send_failed;
32215 + atomic_long_unchecked_t mesq_noop;
32216 + atomic_long_unchecked_t mesq_send_unexpected_error;
32217 + atomic_long_unchecked_t mesq_send_lb_overflow;
32218 + atomic_long_unchecked_t mesq_send_qlimit_reached;
32219 + atomic_long_unchecked_t mesq_send_amo_nacked;
32220 + atomic_long_unchecked_t mesq_send_put_nacked;
32221 + atomic_long_unchecked_t mesq_page_overflow;
32222 + atomic_long_unchecked_t mesq_qf_locked;
32223 + atomic_long_unchecked_t mesq_qf_noop_not_full;
32224 + atomic_long_unchecked_t mesq_qf_switch_head_failed;
32225 + atomic_long_unchecked_t mesq_qf_unexpected_error;
32226 + atomic_long_unchecked_t mesq_noop_unexpected_error;
32227 + atomic_long_unchecked_t mesq_noop_lb_overflow;
32228 + atomic_long_unchecked_t mesq_noop_qlimit_reached;
32229 + atomic_long_unchecked_t mesq_noop_amo_nacked;
32230 + atomic_long_unchecked_t mesq_noop_put_nacked;
32231 + atomic_long_unchecked_t mesq_noop_page_overflow;
32232
32233 };
32234
32235 @@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
32236 tghop_invalidate, mcsop_last};
32237
32238 struct mcs_op_statistic {
32239 - atomic_long_t count;
32240 - atomic_long_t total;
32241 + atomic_long_unchecked_t count;
32242 + atomic_long_unchecked_t total;
32243 unsigned long max;
32244 };
32245
32246 @@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
32247
32248 #define STAT(id) do { \
32249 if (gru_options & OPT_STATS) \
32250 - atomic_long_inc(&gru_stats.id); \
32251 + atomic_long_inc_unchecked(&gru_stats.id); \
32252 } while (0)
32253
32254 #ifdef CONFIG_SGI_GRU_DEBUG
32255 diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
32256 index 851b2f2..a4ec097 100644
32257 --- a/drivers/misc/sgi-xp/xp.h
32258 +++ b/drivers/misc/sgi-xp/xp.h
32259 @@ -289,7 +289,7 @@ struct xpc_interface {
32260 xpc_notify_func, void *);
32261 void (*received) (short, int, void *);
32262 enum xp_retval (*partid_to_nasids) (short, void *);
32263 -};
32264 +} __no_const;
32265
32266 extern struct xpc_interface xpc_interface;
32267
32268 diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
32269 index b94d5f7..7f494c5 100644
32270 --- a/drivers/misc/sgi-xp/xpc.h
32271 +++ b/drivers/misc/sgi-xp/xpc.h
32272 @@ -835,6 +835,7 @@ struct xpc_arch_operations {
32273 void (*received_payload) (struct xpc_channel *, void *);
32274 void (*notify_senders_of_disconnect) (struct xpc_channel *);
32275 };
32276 +typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
32277
32278 /* struct xpc_partition act_state values (for XPC HB) */
32279
32280 @@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
32281 /* found in xpc_main.c */
32282 extern struct device *xpc_part;
32283 extern struct device *xpc_chan;
32284 -extern struct xpc_arch_operations xpc_arch_ops;
32285 +extern xpc_arch_operations_no_const xpc_arch_ops;
32286 extern int xpc_disengage_timelimit;
32287 extern int xpc_disengage_timedout;
32288 extern int xpc_activate_IRQ_rcvd;
32289 diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
32290 index 8d082b4..aa749ae 100644
32291 --- a/drivers/misc/sgi-xp/xpc_main.c
32292 +++ b/drivers/misc/sgi-xp/xpc_main.c
32293 @@ -162,7 +162,7 @@ static struct notifier_block xpc_die_notifier = {
32294 .notifier_call = xpc_system_die,
32295 };
32296
32297 -struct xpc_arch_operations xpc_arch_ops;
32298 +xpc_arch_operations_no_const xpc_arch_ops;
32299
32300 /*
32301 * Timer function to enforce the timelimit on the partition disengage.
32302 diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
32303 index 6878a94..fe5c5f1 100644
32304 --- a/drivers/mmc/host/sdhci-pci.c
32305 +++ b/drivers/mmc/host/sdhci-pci.c
32306 @@ -673,7 +673,7 @@ static const struct sdhci_pci_fixes sdhci_via = {
32307 .probe = via_probe,
32308 };
32309
32310 -static const struct pci_device_id pci_ids[] __devinitdata = {
32311 +static const struct pci_device_id pci_ids[] __devinitconst = {
32312 {
32313 .vendor = PCI_VENDOR_ID_RICOH,
32314 .device = PCI_DEVICE_ID_RICOH_R5C822,
32315 diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
32316 index e9fad91..0a7a16a 100644
32317 --- a/drivers/mtd/devices/doc2000.c
32318 +++ b/drivers/mtd/devices/doc2000.c
32319 @@ -773,7 +773,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
32320
32321 /* The ECC will not be calculated correctly if less than 512 is written */
32322 /* DBB-
32323 - if (len != 0x200 && eccbuf)
32324 + if (len != 0x200)
32325 printk(KERN_WARNING
32326 "ECC needs a full sector write (adr: %lx size %lx)\n",
32327 (long) to, (long) len);
32328 diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
32329 index a3f7a27..234016e 100644
32330 --- a/drivers/mtd/devices/doc2001.c
32331 +++ b/drivers/mtd/devices/doc2001.c
32332 @@ -392,7 +392,7 @@ static int doc_read (struct mtd_info *mtd, loff_t from, size_t len,
32333 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
32334
32335 /* Don't allow read past end of device */
32336 - if (from >= this->totlen)
32337 + if (from >= this->totlen || !len)
32338 return -EINVAL;
32339
32340 /* Don't allow a single read to cross a 512-byte block boundary */
32341 diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
32342 index 3984d48..28aa897 100644
32343 --- a/drivers/mtd/nand/denali.c
32344 +++ b/drivers/mtd/nand/denali.c
32345 @@ -26,6 +26,7 @@
32346 #include <linux/pci.h>
32347 #include <linux/mtd/mtd.h>
32348 #include <linux/module.h>
32349 +#include <linux/slab.h>
32350
32351 #include "denali.h"
32352
32353 diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
32354 index ac40925..483b753 100644
32355 --- a/drivers/mtd/nftlmount.c
32356 +++ b/drivers/mtd/nftlmount.c
32357 @@ -24,6 +24,7 @@
32358 #include <asm/errno.h>
32359 #include <linux/delay.h>
32360 #include <linux/slab.h>
32361 +#include <linux/sched.h>
32362 #include <linux/mtd/mtd.h>
32363 #include <linux/mtd/nand.h>
32364 #include <linux/mtd/nftl.h>
32365 diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
32366 index 6c3fb5a..c542a81 100644
32367 --- a/drivers/mtd/ubi/build.c
32368 +++ b/drivers/mtd/ubi/build.c
32369 @@ -1311,7 +1311,7 @@ module_exit(ubi_exit);
32370 static int __init bytes_str_to_int(const char *str)
32371 {
32372 char *endp;
32373 - unsigned long result;
32374 + unsigned long result, scale = 1;
32375
32376 result = simple_strtoul(str, &endp, 0);
32377 if (str == endp || result >= INT_MAX) {
32378 @@ -1322,11 +1322,11 @@ static int __init bytes_str_to_int(const char *str)
32379
32380 switch (*endp) {
32381 case 'G':
32382 - result *= 1024;
32383 + scale *= 1024;
32384 case 'M':
32385 - result *= 1024;
32386 + scale *= 1024;
32387 case 'K':
32388 - result *= 1024;
32389 + scale *= 1024;
32390 if (endp[1] == 'i' && endp[2] == 'B')
32391 endp += 2;
32392 case '\0':
32393 @@ -1337,7 +1337,13 @@ static int __init bytes_str_to_int(const char *str)
32394 return -EINVAL;
32395 }
32396
32397 - return result;
32398 + if ((intoverflow_t)result*scale >= INT_MAX) {
32399 + printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
32400 + str);
32401 + return -EINVAL;
32402 + }
32403 +
32404 + return result*scale;
32405 }
32406
32407 /**
32408 diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
32409 index 1feae59..c2a61d2 100644
32410 --- a/drivers/net/ethernet/atheros/atlx/atl2.c
32411 +++ b/drivers/net/ethernet/atheros/atlx/atl2.c
32412 @@ -2857,7 +2857,7 @@ static void atl2_force_ps(struct atl2_hw *hw)
32413 */
32414
32415 #define ATL2_PARAM(X, desc) \
32416 - static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
32417 + static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
32418 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
32419 MODULE_PARM_DESC(X, desc);
32420 #else
32421 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
32422 index 9a517c2..a50cfcb 100644
32423 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
32424 +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
32425 @@ -449,7 +449,7 @@ struct bnx2x_rx_mode_obj {
32426
32427 int (*wait_comp)(struct bnx2x *bp,
32428 struct bnx2x_rx_mode_ramrod_params *p);
32429 -};
32430 +} __no_const;
32431
32432 /********************** Set multicast group ***********************************/
32433
32434 diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
32435 index 94b4bd0..73c02de 100644
32436 --- a/drivers/net/ethernet/broadcom/tg3.h
32437 +++ b/drivers/net/ethernet/broadcom/tg3.h
32438 @@ -134,6 +134,7 @@
32439 #define CHIPREV_ID_5750_A0 0x4000
32440 #define CHIPREV_ID_5750_A1 0x4001
32441 #define CHIPREV_ID_5750_A3 0x4003
32442 +#define CHIPREV_ID_5750_C1 0x4201
32443 #define CHIPREV_ID_5750_C2 0x4202
32444 #define CHIPREV_ID_5752_A0_HW 0x5000
32445 #define CHIPREV_ID_5752_A0 0x6000
32446 diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
32447 index c5f5479..2e8c260 100644
32448 --- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
32449 +++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
32450 @@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
32451 */
32452 struct l2t_skb_cb {
32453 arp_failure_handler_func arp_failure_handler;
32454 -};
32455 +} __no_const;
32456
32457 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
32458
32459 diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
32460 index 871bcaa..4043505 100644
32461 --- a/drivers/net/ethernet/dec/tulip/de4x5.c
32462 +++ b/drivers/net/ethernet/dec/tulip/de4x5.c
32463 @@ -5397,7 +5397,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
32464 for (i=0; i<ETH_ALEN; i++) {
32465 tmp.addr[i] = dev->dev_addr[i];
32466 }
32467 - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
32468 + if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
32469 break;
32470
32471 case DE4X5_SET_HWADDR: /* Set the hardware address */
32472 @@ -5437,7 +5437,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
32473 spin_lock_irqsave(&lp->lock, flags);
32474 memcpy(&statbuf, &lp->pktStats, ioc->len);
32475 spin_unlock_irqrestore(&lp->lock, flags);
32476 - if (copy_to_user(ioc->data, &statbuf, ioc->len))
32477 + if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
32478 return -EFAULT;
32479 break;
32480 }
32481 diff --git a/drivers/net/ethernet/dec/tulip/eeprom.c b/drivers/net/ethernet/dec/tulip/eeprom.c
32482 index 14d5b61..1398636 100644
32483 --- a/drivers/net/ethernet/dec/tulip/eeprom.c
32484 +++ b/drivers/net/ethernet/dec/tulip/eeprom.c
32485 @@ -79,7 +79,7 @@ static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
32486 {NULL}};
32487
32488
32489 -static const char *block_name[] __devinitdata = {
32490 +static const char *block_name[] __devinitconst = {
32491 "21140 non-MII",
32492 "21140 MII PHY",
32493 "21142 Serial PHY",
32494 diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
32495 index 4d01219..b58d26d 100644
32496 --- a/drivers/net/ethernet/dec/tulip/winbond-840.c
32497 +++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
32498 @@ -236,7 +236,7 @@ struct pci_id_info {
32499 int drv_flags; /* Driver use, intended as capability flags. */
32500 };
32501
32502 -static const struct pci_id_info pci_id_tbl[] __devinitdata = {
32503 +static const struct pci_id_info pci_id_tbl[] __devinitconst = {
32504 { /* Sometime a Level-One switch card. */
32505 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
32506 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
32507 diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
32508 index dcd7f7a..ecb7fb3 100644
32509 --- a/drivers/net/ethernet/dlink/sundance.c
32510 +++ b/drivers/net/ethernet/dlink/sundance.c
32511 @@ -218,7 +218,7 @@ enum {
32512 struct pci_id_info {
32513 const char *name;
32514 };
32515 -static const struct pci_id_info pci_id_tbl[] __devinitdata = {
32516 +static const struct pci_id_info pci_id_tbl[] __devinitconst = {
32517 {"D-Link DFE-550TX FAST Ethernet Adapter"},
32518 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
32519 {"D-Link DFE-580TX 4 port Server Adapter"},
32520 diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
32521 index bf266a0..e024af7 100644
32522 --- a/drivers/net/ethernet/emulex/benet/be_main.c
32523 +++ b/drivers/net/ethernet/emulex/benet/be_main.c
32524 @@ -397,7 +397,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
32525
32526 if (wrapped)
32527 newacc += 65536;
32528 - ACCESS_ONCE(*acc) = newacc;
32529 + ACCESS_ONCE_RW(*acc) = newacc;
32530 }
32531
32532 void be_parse_stats(struct be_adapter *adapter)
32533 diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
32534 index 61d2bdd..7f1154a 100644
32535 --- a/drivers/net/ethernet/fealnx.c
32536 +++ b/drivers/net/ethernet/fealnx.c
32537 @@ -150,7 +150,7 @@ struct chip_info {
32538 int flags;
32539 };
32540
32541 -static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
32542 +static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
32543 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
32544 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
32545 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
32546 diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
32547 index e1159e5..e18684d 100644
32548 --- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
32549 +++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
32550 @@ -205,7 +205,7 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
32551 {
32552 struct e1000_hw *hw = &adapter->hw;
32553 struct e1000_mac_info *mac = &hw->mac;
32554 - struct e1000_mac_operations *func = &mac->ops;
32555 + e1000_mac_operations_no_const *func = &mac->ops;
32556
32557 /* Set media type */
32558 switch (adapter->pdev->device) {
32559 diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
32560 index a3e65fd..f451444 100644
32561 --- a/drivers/net/ethernet/intel/e1000e/82571.c
32562 +++ b/drivers/net/ethernet/intel/e1000e/82571.c
32563 @@ -239,7 +239,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
32564 {
32565 struct e1000_hw *hw = &adapter->hw;
32566 struct e1000_mac_info *mac = &hw->mac;
32567 - struct e1000_mac_operations *func = &mac->ops;
32568 + e1000_mac_operations_no_const *func = &mac->ops;
32569 u32 swsm = 0;
32570 u32 swsm2 = 0;
32571 bool force_clear_smbi = false;
32572 diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
32573 index 2967039..ca8c40c 100644
32574 --- a/drivers/net/ethernet/intel/e1000e/hw.h
32575 +++ b/drivers/net/ethernet/intel/e1000e/hw.h
32576 @@ -778,6 +778,7 @@ struct e1000_mac_operations {
32577 void (*write_vfta)(struct e1000_hw *, u32, u32);
32578 s32 (*read_mac_addr)(struct e1000_hw *);
32579 };
32580 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
32581
32582 /*
32583 * When to use various PHY register access functions:
32584 @@ -818,6 +819,7 @@ struct e1000_phy_operations {
32585 void (*power_up)(struct e1000_hw *);
32586 void (*power_down)(struct e1000_hw *);
32587 };
32588 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
32589
32590 /* Function pointers for the NVM. */
32591 struct e1000_nvm_operations {
32592 @@ -829,9 +831,10 @@ struct e1000_nvm_operations {
32593 s32 (*validate)(struct e1000_hw *);
32594 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
32595 };
32596 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
32597
32598 struct e1000_mac_info {
32599 - struct e1000_mac_operations ops;
32600 + e1000_mac_operations_no_const ops;
32601 u8 addr[ETH_ALEN];
32602 u8 perm_addr[ETH_ALEN];
32603
32604 @@ -872,7 +875,7 @@ struct e1000_mac_info {
32605 };
32606
32607 struct e1000_phy_info {
32608 - struct e1000_phy_operations ops;
32609 + e1000_phy_operations_no_const ops;
32610
32611 enum e1000_phy_type type;
32612
32613 @@ -906,7 +909,7 @@ struct e1000_phy_info {
32614 };
32615
32616 struct e1000_nvm_info {
32617 - struct e1000_nvm_operations ops;
32618 + e1000_nvm_operations_no_const ops;
32619
32620 enum e1000_nvm_type type;
32621 enum e1000_nvm_override override;
32622 diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
32623 index 4519a13..f97fcd0 100644
32624 --- a/drivers/net/ethernet/intel/igb/e1000_hw.h
32625 +++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
32626 @@ -314,6 +314,7 @@ struct e1000_mac_operations {
32627 s32 (*read_mac_addr)(struct e1000_hw *);
32628 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
32629 };
32630 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
32631
32632 struct e1000_phy_operations {
32633 s32 (*acquire)(struct e1000_hw *);
32634 @@ -330,6 +331,7 @@ struct e1000_phy_operations {
32635 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
32636 s32 (*write_reg)(struct e1000_hw *, u32, u16);
32637 };
32638 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
32639
32640 struct e1000_nvm_operations {
32641 s32 (*acquire)(struct e1000_hw *);
32642 @@ -339,6 +341,7 @@ struct e1000_nvm_operations {
32643 s32 (*update)(struct e1000_hw *);
32644 s32 (*validate)(struct e1000_hw *);
32645 };
32646 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
32647
32648 struct e1000_info {
32649 s32 (*get_invariants)(struct e1000_hw *);
32650 @@ -350,7 +353,7 @@ struct e1000_info {
32651 extern const struct e1000_info e1000_82575_info;
32652
32653 struct e1000_mac_info {
32654 - struct e1000_mac_operations ops;
32655 + e1000_mac_operations_no_const ops;
32656
32657 u8 addr[6];
32658 u8 perm_addr[6];
32659 @@ -388,7 +391,7 @@ struct e1000_mac_info {
32660 };
32661
32662 struct e1000_phy_info {
32663 - struct e1000_phy_operations ops;
32664 + e1000_phy_operations_no_const ops;
32665
32666 enum e1000_phy_type type;
32667
32668 @@ -423,7 +426,7 @@ struct e1000_phy_info {
32669 };
32670
32671 struct e1000_nvm_info {
32672 - struct e1000_nvm_operations ops;
32673 + e1000_nvm_operations_no_const ops;
32674 enum e1000_nvm_type type;
32675 enum e1000_nvm_override override;
32676
32677 @@ -468,6 +471,7 @@ struct e1000_mbx_operations {
32678 s32 (*check_for_ack)(struct e1000_hw *, u16);
32679 s32 (*check_for_rst)(struct e1000_hw *, u16);
32680 };
32681 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
32682
32683 struct e1000_mbx_stats {
32684 u32 msgs_tx;
32685 @@ -479,7 +483,7 @@ struct e1000_mbx_stats {
32686 };
32687
32688 struct e1000_mbx_info {
32689 - struct e1000_mbx_operations ops;
32690 + e1000_mbx_operations_no_const ops;
32691 struct e1000_mbx_stats stats;
32692 u32 timeout;
32693 u32 usec_delay;
32694 diff --git a/drivers/net/ethernet/intel/igbvf/vf.h b/drivers/net/ethernet/intel/igbvf/vf.h
32695 index d7ed58f..64cde36 100644
32696 --- a/drivers/net/ethernet/intel/igbvf/vf.h
32697 +++ b/drivers/net/ethernet/intel/igbvf/vf.h
32698 @@ -189,9 +189,10 @@ struct e1000_mac_operations {
32699 s32 (*read_mac_addr)(struct e1000_hw *);
32700 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
32701 };
32702 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
32703
32704 struct e1000_mac_info {
32705 - struct e1000_mac_operations ops;
32706 + e1000_mac_operations_no_const ops;
32707 u8 addr[6];
32708 u8 perm_addr[6];
32709
32710 @@ -213,6 +214,7 @@ struct e1000_mbx_operations {
32711 s32 (*check_for_ack)(struct e1000_hw *);
32712 s32 (*check_for_rst)(struct e1000_hw *);
32713 };
32714 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
32715
32716 struct e1000_mbx_stats {
32717 u32 msgs_tx;
32718 @@ -224,7 +226,7 @@ struct e1000_mbx_stats {
32719 };
32720
32721 struct e1000_mbx_info {
32722 - struct e1000_mbx_operations ops;
32723 + e1000_mbx_operations_no_const ops;
32724 struct e1000_mbx_stats stats;
32725 u32 timeout;
32726 u32 usec_delay;
32727 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
32728 index 6c5cca8..de8ef63 100644
32729 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
32730 +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
32731 @@ -2708,6 +2708,7 @@ struct ixgbe_eeprom_operations {
32732 s32 (*update_checksum)(struct ixgbe_hw *);
32733 u16 (*calc_checksum)(struct ixgbe_hw *);
32734 };
32735 +typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
32736
32737 struct ixgbe_mac_operations {
32738 s32 (*init_hw)(struct ixgbe_hw *);
32739 @@ -2769,6 +2770,7 @@ struct ixgbe_mac_operations {
32740 /* Manageability interface */
32741 s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
32742 };
32743 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
32744
32745 struct ixgbe_phy_operations {
32746 s32 (*identify)(struct ixgbe_hw *);
32747 @@ -2788,9 +2790,10 @@ struct ixgbe_phy_operations {
32748 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
32749 s32 (*check_overtemp)(struct ixgbe_hw *);
32750 };
32751 +typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
32752
32753 struct ixgbe_eeprom_info {
32754 - struct ixgbe_eeprom_operations ops;
32755 + ixgbe_eeprom_operations_no_const ops;
32756 enum ixgbe_eeprom_type type;
32757 u32 semaphore_delay;
32758 u16 word_size;
32759 @@ -2800,7 +2803,7 @@ struct ixgbe_eeprom_info {
32760
32761 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
32762 struct ixgbe_mac_info {
32763 - struct ixgbe_mac_operations ops;
32764 + ixgbe_mac_operations_no_const ops;
32765 enum ixgbe_mac_type type;
32766 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
32767 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
32768 @@ -2828,7 +2831,7 @@ struct ixgbe_mac_info {
32769 };
32770
32771 struct ixgbe_phy_info {
32772 - struct ixgbe_phy_operations ops;
32773 + ixgbe_phy_operations_no_const ops;
32774 struct mdio_if_info mdio;
32775 enum ixgbe_phy_type type;
32776 u32 id;
32777 @@ -2856,6 +2859,7 @@ struct ixgbe_mbx_operations {
32778 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
32779 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
32780 };
32781 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
32782
32783 struct ixgbe_mbx_stats {
32784 u32 msgs_tx;
32785 @@ -2867,7 +2871,7 @@ struct ixgbe_mbx_stats {
32786 };
32787
32788 struct ixgbe_mbx_info {
32789 - struct ixgbe_mbx_operations ops;
32790 + ixgbe_mbx_operations_no_const ops;
32791 struct ixgbe_mbx_stats stats;
32792 u32 timeout;
32793 u32 usec_delay;
32794 diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
32795 index 10306b4..28df758 100644
32796 --- a/drivers/net/ethernet/intel/ixgbevf/vf.h
32797 +++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
32798 @@ -70,6 +70,7 @@ struct ixgbe_mac_operations {
32799 s32 (*clear_vfta)(struct ixgbe_hw *);
32800 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
32801 };
32802 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
32803
32804 enum ixgbe_mac_type {
32805 ixgbe_mac_unknown = 0,
32806 @@ -79,7 +80,7 @@ enum ixgbe_mac_type {
32807 };
32808
32809 struct ixgbe_mac_info {
32810 - struct ixgbe_mac_operations ops;
32811 + ixgbe_mac_operations_no_const ops;
32812 u8 addr[6];
32813 u8 perm_addr[6];
32814
32815 @@ -103,6 +104,7 @@ struct ixgbe_mbx_operations {
32816 s32 (*check_for_ack)(struct ixgbe_hw *);
32817 s32 (*check_for_rst)(struct ixgbe_hw *);
32818 };
32819 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
32820
32821 struct ixgbe_mbx_stats {
32822 u32 msgs_tx;
32823 @@ -114,7 +116,7 @@ struct ixgbe_mbx_stats {
32824 };
32825
32826 struct ixgbe_mbx_info {
32827 - struct ixgbe_mbx_operations ops;
32828 + ixgbe_mbx_operations_no_const ops;
32829 struct ixgbe_mbx_stats stats;
32830 u32 timeout;
32831 u32 udelay;
32832 diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
32833 index 94bbc85..78c12e6 100644
32834 --- a/drivers/net/ethernet/mellanox/mlx4/main.c
32835 +++ b/drivers/net/ethernet/mellanox/mlx4/main.c
32836 @@ -40,6 +40,7 @@
32837 #include <linux/dma-mapping.h>
32838 #include <linux/slab.h>
32839 #include <linux/io-mapping.h>
32840 +#include <linux/sched.h>
32841
32842 #include <linux/mlx4/device.h>
32843 #include <linux/mlx4/doorbell.h>
32844 diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h
32845 index 5046a64..71ca936 100644
32846 --- a/drivers/net/ethernet/neterion/vxge/vxge-config.h
32847 +++ b/drivers/net/ethernet/neterion/vxge/vxge-config.h
32848 @@ -514,7 +514,7 @@ struct vxge_hw_uld_cbs {
32849 void (*link_down)(struct __vxge_hw_device *devh);
32850 void (*crit_err)(struct __vxge_hw_device *devh,
32851 enum vxge_hw_event type, u64 ext_data);
32852 -};
32853 +} __no_const;
32854
32855 /*
32856 * struct __vxge_hw_blockpool_entry - Block private data structure
32857 diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
32858 index 4a518a3..936b334 100644
32859 --- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
32860 +++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
32861 @@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
32862 struct vxge_hw_mempool_dma *dma_object,
32863 u32 index,
32864 u32 is_last);
32865 -};
32866 +} __no_const;
32867
32868 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
32869 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
32870 diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
32871 index c8f47f1..5da9840 100644
32872 --- a/drivers/net/ethernet/realtek/r8169.c
32873 +++ b/drivers/net/ethernet/realtek/r8169.c
32874 @@ -698,17 +698,17 @@ struct rtl8169_private {
32875 struct mdio_ops {
32876 void (*write)(void __iomem *, int, int);
32877 int (*read)(void __iomem *, int);
32878 - } mdio_ops;
32879 + } __no_const mdio_ops;
32880
32881 struct pll_power_ops {
32882 void (*down)(struct rtl8169_private *);
32883 void (*up)(struct rtl8169_private *);
32884 - } pll_power_ops;
32885 + } __no_const pll_power_ops;
32886
32887 struct jumbo_ops {
32888 void (*enable)(struct rtl8169_private *);
32889 void (*disable)(struct rtl8169_private *);
32890 - } jumbo_ops;
32891 + } __no_const jumbo_ops;
32892
32893 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
32894 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
32895 diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
32896 index 1b4658c..a30dabb 100644
32897 --- a/drivers/net/ethernet/sis/sis190.c
32898 +++ b/drivers/net/ethernet/sis/sis190.c
32899 @@ -1624,7 +1624,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
32900 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
32901 struct net_device *dev)
32902 {
32903 - static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
32904 + static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
32905 struct sis190_private *tp = netdev_priv(dev);
32906 struct pci_dev *isa_bridge;
32907 u8 reg, tmp8;
32908 diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
32909 index edfa15d..002bfa9 100644
32910 --- a/drivers/net/ppp/ppp_generic.c
32911 +++ b/drivers/net/ppp/ppp_generic.c
32912 @@ -987,7 +987,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
32913 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
32914 struct ppp_stats stats;
32915 struct ppp_comp_stats cstats;
32916 - char *vers;
32917
32918 switch (cmd) {
32919 case SIOCGPPPSTATS:
32920 @@ -1009,8 +1008,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
32921 break;
32922
32923 case SIOCGPPPVER:
32924 - vers = PPP_VERSION;
32925 - if (copy_to_user(addr, vers, strlen(vers) + 1))
32926 + if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
32927 break;
32928 err = 0;
32929 break;
32930 diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
32931 index 515f122..41dd273 100644
32932 --- a/drivers/net/tokenring/abyss.c
32933 +++ b/drivers/net/tokenring/abyss.c
32934 @@ -451,10 +451,12 @@ static struct pci_driver abyss_driver = {
32935
32936 static int __init abyss_init (void)
32937 {
32938 - abyss_netdev_ops = tms380tr_netdev_ops;
32939 + pax_open_kernel();
32940 + memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
32941
32942 - abyss_netdev_ops.ndo_open = abyss_open;
32943 - abyss_netdev_ops.ndo_stop = abyss_close;
32944 + *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
32945 + *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
32946 + pax_close_kernel();
32947
32948 return pci_register_driver(&abyss_driver);
32949 }
32950 diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
32951 index 6153cfd..cf69c1c 100644
32952 --- a/drivers/net/tokenring/madgemc.c
32953 +++ b/drivers/net/tokenring/madgemc.c
32954 @@ -744,9 +744,11 @@ static struct mca_driver madgemc_driver = {
32955
32956 static int __init madgemc_init (void)
32957 {
32958 - madgemc_netdev_ops = tms380tr_netdev_ops;
32959 - madgemc_netdev_ops.ndo_open = madgemc_open;
32960 - madgemc_netdev_ops.ndo_stop = madgemc_close;
32961 + pax_open_kernel();
32962 + memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
32963 + *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
32964 + *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
32965 + pax_close_kernel();
32966
32967 return mca_register_driver (&madgemc_driver);
32968 }
32969 diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
32970 index 8d362e6..f91cc52 100644
32971 --- a/drivers/net/tokenring/proteon.c
32972 +++ b/drivers/net/tokenring/proteon.c
32973 @@ -353,9 +353,11 @@ static int __init proteon_init(void)
32974 struct platform_device *pdev;
32975 int i, num = 0, err = 0;
32976
32977 - proteon_netdev_ops = tms380tr_netdev_ops;
32978 - proteon_netdev_ops.ndo_open = proteon_open;
32979 - proteon_netdev_ops.ndo_stop = tms380tr_close;
32980 + pax_open_kernel();
32981 + memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
32982 + *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
32983 + *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
32984 + pax_close_kernel();
32985
32986 err = platform_driver_register(&proteon_driver);
32987 if (err)
32988 diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
32989 index 46db5c5..37c1536 100644
32990 --- a/drivers/net/tokenring/skisa.c
32991 +++ b/drivers/net/tokenring/skisa.c
32992 @@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
32993 struct platform_device *pdev;
32994 int i, num = 0, err = 0;
32995
32996 - sk_isa_netdev_ops = tms380tr_netdev_ops;
32997 - sk_isa_netdev_ops.ndo_open = sk_isa_open;
32998 - sk_isa_netdev_ops.ndo_stop = tms380tr_close;
32999 + pax_open_kernel();
33000 + memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
33001 + *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
33002 + *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
33003 + pax_close_kernel();
33004
33005 err = platform_driver_register(&sk_isa_driver);
33006 if (err)
33007 diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
33008 index 304fe78..db112fa 100644
33009 --- a/drivers/net/usb/hso.c
33010 +++ b/drivers/net/usb/hso.c
33011 @@ -71,7 +71,7 @@
33012 #include <asm/byteorder.h>
33013 #include <linux/serial_core.h>
33014 #include <linux/serial.h>
33015 -
33016 +#include <asm/local.h>
33017
33018 #define MOD_AUTHOR "Option Wireless"
33019 #define MOD_DESCRIPTION "USB High Speed Option driver"
33020 @@ -257,7 +257,7 @@ struct hso_serial {
33021
33022 /* from usb_serial_port */
33023 struct tty_struct *tty;
33024 - int open_count;
33025 + local_t open_count;
33026 spinlock_t serial_lock;
33027
33028 int (*write_data) (struct hso_serial *serial);
33029 @@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
33030 struct urb *urb;
33031
33032 urb = serial->rx_urb[0];
33033 - if (serial->open_count > 0) {
33034 + if (local_read(&serial->open_count) > 0) {
33035 count = put_rxbuf_data(urb, serial);
33036 if (count == -1)
33037 return;
33038 @@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
33039 DUMP1(urb->transfer_buffer, urb->actual_length);
33040
33041 /* Anyone listening? */
33042 - if (serial->open_count == 0)
33043 + if (local_read(&serial->open_count) == 0)
33044 return;
33045
33046 if (status == 0) {
33047 @@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
33048 spin_unlock_irq(&serial->serial_lock);
33049
33050 /* check for port already opened, if not set the termios */
33051 - serial->open_count++;
33052 - if (serial->open_count == 1) {
33053 + if (local_inc_return(&serial->open_count) == 1) {
33054 serial->rx_state = RX_IDLE;
33055 /* Force default termio settings */
33056 _hso_serial_set_termios(tty, NULL);
33057 @@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
33058 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
33059 if (result) {
33060 hso_stop_serial_device(serial->parent);
33061 - serial->open_count--;
33062 + local_dec(&serial->open_count);
33063 kref_put(&serial->parent->ref, hso_serial_ref_free);
33064 }
33065 } else {
33066 @@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
33067
33068 /* reset the rts and dtr */
33069 /* do the actual close */
33070 - serial->open_count--;
33071 + local_dec(&serial->open_count);
33072
33073 - if (serial->open_count <= 0) {
33074 - serial->open_count = 0;
33075 + if (local_read(&serial->open_count) <= 0) {
33076 + local_set(&serial->open_count, 0);
33077 spin_lock_irq(&serial->serial_lock);
33078 if (serial->tty == tty) {
33079 serial->tty->driver_data = NULL;
33080 @@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
33081
33082 /* the actual setup */
33083 spin_lock_irqsave(&serial->serial_lock, flags);
33084 - if (serial->open_count)
33085 + if (local_read(&serial->open_count))
33086 _hso_serial_set_termios(tty, old);
33087 else
33088 tty->termios = old;
33089 @@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *urb)
33090 D1("Pending read interrupt on port %d\n", i);
33091 spin_lock(&serial->serial_lock);
33092 if (serial->rx_state == RX_IDLE &&
33093 - serial->open_count > 0) {
33094 + local_read(&serial->open_count) > 0) {
33095 /* Setup and send a ctrl req read on
33096 * port i */
33097 if (!serial->rx_urb_filled[0]) {
33098 @@ -3098,7 +3097,7 @@ static int hso_resume(struct usb_interface *iface)
33099 /* Start all serial ports */
33100 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
33101 if (serial_table[i] && (serial_table[i]->interface == iface)) {
33102 - if (dev2ser(serial_table[i])->open_count) {
33103 + if (local_read(&dev2ser(serial_table[i])->open_count)) {
33104 result =
33105 hso_start_serial_device(serial_table[i], GFP_NOIO);
33106 hso_kick_transmit(dev2ser(serial_table[i]));
33107 diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
33108 index e662cbc..8d4a102 100644
33109 --- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
33110 +++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
33111 @@ -601,8 +601,7 @@ vmxnet3_set_rss_indir(struct net_device *netdev,
33112 * Return with error code if any of the queue indices
33113 * is out of range
33114 */
33115 - if (p->ring_index[i] < 0 ||
33116 - p->ring_index[i] >= adapter->num_rx_queues)
33117 + if (p->ring_index[i] >= adapter->num_rx_queues)
33118 return -EINVAL;
33119 }
33120
33121 diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
33122 index 0f9ee46..e2d6e65 100644
33123 --- a/drivers/net/wireless/ath/ath.h
33124 +++ b/drivers/net/wireless/ath/ath.h
33125 @@ -119,6 +119,7 @@ struct ath_ops {
33126 void (*write_flush) (void *);
33127 u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
33128 };
33129 +typedef struct ath_ops __no_const ath_ops_no_const;
33130
33131 struct ath_common;
33132 struct ath_bus_ops;
33133 diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
33134 index b592016..fe47870 100644
33135 --- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
33136 +++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
33137 @@ -183,8 +183,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33138 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
33139 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
33140
33141 - ACCESS_ONCE(ads->ds_link) = i->link;
33142 - ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
33143 + ACCESS_ONCE_RW(ads->ds_link) = i->link;
33144 + ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
33145
33146 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
33147 ctl6 = SM(i->keytype, AR_EncrType);
33148 @@ -198,26 +198,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33149
33150 if ((i->is_first || i->is_last) &&
33151 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
33152 - ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
33153 + ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
33154 | set11nTries(i->rates, 1)
33155 | set11nTries(i->rates, 2)
33156 | set11nTries(i->rates, 3)
33157 | (i->dur_update ? AR_DurUpdateEna : 0)
33158 | SM(0, AR_BurstDur);
33159
33160 - ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
33161 + ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
33162 | set11nRate(i->rates, 1)
33163 | set11nRate(i->rates, 2)
33164 | set11nRate(i->rates, 3);
33165 } else {
33166 - ACCESS_ONCE(ads->ds_ctl2) = 0;
33167 - ACCESS_ONCE(ads->ds_ctl3) = 0;
33168 + ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
33169 + ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
33170 }
33171
33172 if (!i->is_first) {
33173 - ACCESS_ONCE(ads->ds_ctl0) = 0;
33174 - ACCESS_ONCE(ads->ds_ctl1) = ctl1;
33175 - ACCESS_ONCE(ads->ds_ctl6) = ctl6;
33176 + ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
33177 + ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
33178 + ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
33179 return;
33180 }
33181
33182 @@ -242,7 +242,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33183 break;
33184 }
33185
33186 - ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
33187 + ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
33188 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
33189 | SM(i->txpower, AR_XmitPower)
33190 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
33191 @@ -252,19 +252,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33192 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
33193 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
33194
33195 - ACCESS_ONCE(ads->ds_ctl1) = ctl1;
33196 - ACCESS_ONCE(ads->ds_ctl6) = ctl6;
33197 + ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
33198 + ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
33199
33200 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
33201 return;
33202
33203 - ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
33204 + ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
33205 | set11nPktDurRTSCTS(i->rates, 1);
33206
33207 - ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
33208 + ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
33209 | set11nPktDurRTSCTS(i->rates, 3);
33210
33211 - ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
33212 + ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
33213 | set11nRateFlags(i->rates, 1)
33214 | set11nRateFlags(i->rates, 2)
33215 | set11nRateFlags(i->rates, 3)
33216 diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
33217 index f5ae3c6..7936af3 100644
33218 --- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
33219 +++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
33220 @@ -35,47 +35,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33221 (i->qcu << AR_TxQcuNum_S) | 0x17;
33222
33223 checksum += val;
33224 - ACCESS_ONCE(ads->info) = val;
33225 + ACCESS_ONCE_RW(ads->info) = val;
33226
33227 checksum += i->link;
33228 - ACCESS_ONCE(ads->link) = i->link;
33229 + ACCESS_ONCE_RW(ads->link) = i->link;
33230
33231 checksum += i->buf_addr[0];
33232 - ACCESS_ONCE(ads->data0) = i->buf_addr[0];
33233 + ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
33234 checksum += i->buf_addr[1];
33235 - ACCESS_ONCE(ads->data1) = i->buf_addr[1];
33236 + ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
33237 checksum += i->buf_addr[2];
33238 - ACCESS_ONCE(ads->data2) = i->buf_addr[2];
33239 + ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
33240 checksum += i->buf_addr[3];
33241 - ACCESS_ONCE(ads->data3) = i->buf_addr[3];
33242 + ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
33243
33244 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
33245 - ACCESS_ONCE(ads->ctl3) = val;
33246 + ACCESS_ONCE_RW(ads->ctl3) = val;
33247 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
33248 - ACCESS_ONCE(ads->ctl5) = val;
33249 + ACCESS_ONCE_RW(ads->ctl5) = val;
33250 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
33251 - ACCESS_ONCE(ads->ctl7) = val;
33252 + ACCESS_ONCE_RW(ads->ctl7) = val;
33253 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
33254 - ACCESS_ONCE(ads->ctl9) = val;
33255 + ACCESS_ONCE_RW(ads->ctl9) = val;
33256
33257 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
33258 - ACCESS_ONCE(ads->ctl10) = checksum;
33259 + ACCESS_ONCE_RW(ads->ctl10) = checksum;
33260
33261 if (i->is_first || i->is_last) {
33262 - ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
33263 + ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
33264 | set11nTries(i->rates, 1)
33265 | set11nTries(i->rates, 2)
33266 | set11nTries(i->rates, 3)
33267 | (i->dur_update ? AR_DurUpdateEna : 0)
33268 | SM(0, AR_BurstDur);
33269
33270 - ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
33271 + ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
33272 | set11nRate(i->rates, 1)
33273 | set11nRate(i->rates, 2)
33274 | set11nRate(i->rates, 3);
33275 } else {
33276 - ACCESS_ONCE(ads->ctl13) = 0;
33277 - ACCESS_ONCE(ads->ctl14) = 0;
33278 + ACCESS_ONCE_RW(ads->ctl13) = 0;
33279 + ACCESS_ONCE_RW(ads->ctl14) = 0;
33280 }
33281
33282 ads->ctl20 = 0;
33283 @@ -84,17 +84,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33284
33285 ctl17 = SM(i->keytype, AR_EncrType);
33286 if (!i->is_first) {
33287 - ACCESS_ONCE(ads->ctl11) = 0;
33288 - ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
33289 - ACCESS_ONCE(ads->ctl15) = 0;
33290 - ACCESS_ONCE(ads->ctl16) = 0;
33291 - ACCESS_ONCE(ads->ctl17) = ctl17;
33292 - ACCESS_ONCE(ads->ctl18) = 0;
33293 - ACCESS_ONCE(ads->ctl19) = 0;
33294 + ACCESS_ONCE_RW(ads->ctl11) = 0;
33295 + ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
33296 + ACCESS_ONCE_RW(ads->ctl15) = 0;
33297 + ACCESS_ONCE_RW(ads->ctl16) = 0;
33298 + ACCESS_ONCE_RW(ads->ctl17) = ctl17;
33299 + ACCESS_ONCE_RW(ads->ctl18) = 0;
33300 + ACCESS_ONCE_RW(ads->ctl19) = 0;
33301 return;
33302 }
33303
33304 - ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
33305 + ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
33306 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
33307 | SM(i->txpower, AR_XmitPower)
33308 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
33309 @@ -130,22 +130,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33310 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
33311 ctl12 |= SM(val, AR_PAPRDChainMask);
33312
33313 - ACCESS_ONCE(ads->ctl12) = ctl12;
33314 - ACCESS_ONCE(ads->ctl17) = ctl17;
33315 + ACCESS_ONCE_RW(ads->ctl12) = ctl12;
33316 + ACCESS_ONCE_RW(ads->ctl17) = ctl17;
33317
33318 - ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
33319 + ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
33320 | set11nPktDurRTSCTS(i->rates, 1);
33321
33322 - ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
33323 + ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
33324 | set11nPktDurRTSCTS(i->rates, 3);
33325
33326 - ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
33327 + ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
33328 | set11nRateFlags(i->rates, 1)
33329 | set11nRateFlags(i->rates, 2)
33330 | set11nRateFlags(i->rates, 3)
33331 | SM(i->rtscts_rate, AR_RTSCTSRate);
33332
33333 - ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
33334 + ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
33335 }
33336
33337 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
33338 diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
33339 index f389b3c..7359e18 100644
33340 --- a/drivers/net/wireless/ath/ath9k/hw.h
33341 +++ b/drivers/net/wireless/ath/ath9k/hw.h
33342 @@ -605,7 +605,7 @@ struct ath_hw_private_ops {
33343
33344 /* ANI */
33345 void (*ani_cache_ini_regs)(struct ath_hw *ah);
33346 -};
33347 +} __no_const;
33348
33349 /**
33350 * struct ath_hw_ops - callbacks used by hardware code and driver code
33351 @@ -635,7 +635,7 @@ struct ath_hw_ops {
33352 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
33353 struct ath_hw_antcomb_conf *antconf);
33354
33355 -};
33356 +} __no_const;
33357
33358 struct ath_nf_limits {
33359 s16 max;
33360 @@ -655,7 +655,7 @@ enum ath_cal_list {
33361 #define AH_FASTCC 0x4
33362
33363 struct ath_hw {
33364 - struct ath_ops reg_ops;
33365 + ath_ops_no_const reg_ops;
33366
33367 struct ieee80211_hw *hw;
33368 struct ath_common common;
33369 diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
33370 index bea8524..c677c06 100644
33371 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
33372 +++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
33373 @@ -547,7 +547,7 @@ struct phy_func_ptr {
33374 void (*carrsuppr)(struct brcms_phy *);
33375 s32 (*rxsigpwr)(struct brcms_phy *, s32);
33376 void (*detach)(struct brcms_phy *);
33377 -};
33378 +} __no_const;
33379
33380 struct brcms_phy {
33381 struct brcms_phy_pub pubpi_ro;
33382 diff --git a/drivers/net/wireless/iwlegacy/iwl3945-base.c b/drivers/net/wireless/iwlegacy/iwl3945-base.c
33383 index 05f2ad1..ae00eea 100644
33384 --- a/drivers/net/wireless/iwlegacy/iwl3945-base.c
33385 +++ b/drivers/net/wireless/iwlegacy/iwl3945-base.c
33386 @@ -3685,7 +3685,9 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
33387 */
33388 if (iwl3945_mod_params.disable_hw_scan) {
33389 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
33390 - iwl3945_hw_ops.hw_scan = NULL;
33391 + pax_open_kernel();
33392 + *(void **)&iwl3945_hw_ops.hw_scan = NULL;
33393 + pax_close_kernel();
33394 }
33395
33396 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
33397 diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
33398 index 69a77e2..552b42c 100644
33399 --- a/drivers/net/wireless/iwlwifi/iwl-debug.h
33400 +++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
33401 @@ -71,8 +71,8 @@ do { \
33402 } while (0)
33403
33404 #else
33405 -#define IWL_DEBUG(m, level, fmt, args...)
33406 -#define IWL_DEBUG_LIMIT(m, level, fmt, args...)
33407 +#define IWL_DEBUG(m, level, fmt, args...) do {} while (0)
33408 +#define IWL_DEBUG_LIMIT(m, level, fmt, args...) do {} while (0)
33409 #define iwl_print_hex_dump(m, level, p, len)
33410 #endif /* CONFIG_IWLWIFI_DEBUG */
33411
33412 diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
33413 index 523ad55..f8c5dc5 100644
33414 --- a/drivers/net/wireless/mac80211_hwsim.c
33415 +++ b/drivers/net/wireless/mac80211_hwsim.c
33416 @@ -1678,9 +1678,11 @@ static int __init init_mac80211_hwsim(void)
33417 return -EINVAL;
33418
33419 if (fake_hw_scan) {
33420 - mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
33421 - mac80211_hwsim_ops.sw_scan_start = NULL;
33422 - mac80211_hwsim_ops.sw_scan_complete = NULL;
33423 + pax_open_kernel();
33424 + *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
33425 + *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
33426 + *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
33427 + pax_close_kernel();
33428 }
33429
33430 spin_lock_init(&hwsim_radio_lock);
33431 diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
33432 index 30f138b..c904585 100644
33433 --- a/drivers/net/wireless/mwifiex/main.h
33434 +++ b/drivers/net/wireless/mwifiex/main.h
33435 @@ -543,7 +543,7 @@ struct mwifiex_if_ops {
33436 void (*cleanup_mpa_buf) (struct mwifiex_adapter *);
33437 int (*cmdrsp_complete) (struct mwifiex_adapter *, struct sk_buff *);
33438 int (*event_complete) (struct mwifiex_adapter *, struct sk_buff *);
33439 -};
33440 +} __no_const;
33441
33442 struct mwifiex_adapter {
33443 u8 iface_type;
33444 diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
33445 index 0c13840..a5c3ed6 100644
33446 --- a/drivers/net/wireless/rndis_wlan.c
33447 +++ b/drivers/net/wireless/rndis_wlan.c
33448 @@ -1275,7 +1275,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
33449
33450 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
33451
33452 - if (rts_threshold < 0 || rts_threshold > 2347)
33453 + if (rts_threshold > 2347)
33454 rts_threshold = 2347;
33455
33456 tmp = cpu_to_le32(rts_threshold);
33457 diff --git a/drivers/net/wireless/wl1251/wl1251.h b/drivers/net/wireless/wl1251/wl1251.h
33458 index a77f1bb..c608b2b 100644
33459 --- a/drivers/net/wireless/wl1251/wl1251.h
33460 +++ b/drivers/net/wireless/wl1251/wl1251.h
33461 @@ -266,7 +266,7 @@ struct wl1251_if_operations {
33462 void (*reset)(struct wl1251 *wl);
33463 void (*enable_irq)(struct wl1251 *wl);
33464 void (*disable_irq)(struct wl1251 *wl);
33465 -};
33466 +} __no_const;
33467
33468 struct wl1251 {
33469 struct ieee80211_hw *hw;
33470 diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
33471 index f34b5b2..b5abb9f 100644
33472 --- a/drivers/oprofile/buffer_sync.c
33473 +++ b/drivers/oprofile/buffer_sync.c
33474 @@ -343,7 +343,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
33475 if (cookie == NO_COOKIE)
33476 offset = pc;
33477 if (cookie == INVALID_COOKIE) {
33478 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
33479 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
33480 offset = pc;
33481 }
33482 if (cookie != last_cookie) {
33483 @@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
33484 /* add userspace sample */
33485
33486 if (!mm) {
33487 - atomic_inc(&oprofile_stats.sample_lost_no_mm);
33488 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
33489 return 0;
33490 }
33491
33492 cookie = lookup_dcookie(mm, s->eip, &offset);
33493
33494 if (cookie == INVALID_COOKIE) {
33495 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
33496 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
33497 return 0;
33498 }
33499
33500 @@ -563,7 +563,7 @@ void sync_buffer(int cpu)
33501 /* ignore backtraces if failed to add a sample */
33502 if (state == sb_bt_start) {
33503 state = sb_bt_ignore;
33504 - atomic_inc(&oprofile_stats.bt_lost_no_mapping);
33505 + atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
33506 }
33507 }
33508 release_mm(mm);
33509 diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
33510 index c0cc4e7..44d4e54 100644
33511 --- a/drivers/oprofile/event_buffer.c
33512 +++ b/drivers/oprofile/event_buffer.c
33513 @@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
33514 }
33515
33516 if (buffer_pos == buffer_size) {
33517 - atomic_inc(&oprofile_stats.event_lost_overflow);
33518 + atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
33519 return;
33520 }
33521
33522 diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
33523 index f8c752e..28bf4fc 100644
33524 --- a/drivers/oprofile/oprof.c
33525 +++ b/drivers/oprofile/oprof.c
33526 @@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
33527 if (oprofile_ops.switch_events())
33528 return;
33529
33530 - atomic_inc(&oprofile_stats.multiplex_counter);
33531 + atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
33532 start_switch_worker();
33533 }
33534
33535 diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
33536 index 917d28e..d62d981 100644
33537 --- a/drivers/oprofile/oprofile_stats.c
33538 +++ b/drivers/oprofile/oprofile_stats.c
33539 @@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
33540 cpu_buf->sample_invalid_eip = 0;
33541 }
33542
33543 - atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
33544 - atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
33545 - atomic_set(&oprofile_stats.event_lost_overflow, 0);
33546 - atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
33547 - atomic_set(&oprofile_stats.multiplex_counter, 0);
33548 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
33549 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
33550 + atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
33551 + atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
33552 + atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
33553 }
33554
33555
33556 diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
33557 index 38b6fc0..b5cbfce 100644
33558 --- a/drivers/oprofile/oprofile_stats.h
33559 +++ b/drivers/oprofile/oprofile_stats.h
33560 @@ -13,11 +13,11 @@
33561 #include <linux/atomic.h>
33562
33563 struct oprofile_stat_struct {
33564 - atomic_t sample_lost_no_mm;
33565 - atomic_t sample_lost_no_mapping;
33566 - atomic_t bt_lost_no_mapping;
33567 - atomic_t event_lost_overflow;
33568 - atomic_t multiplex_counter;
33569 + atomic_unchecked_t sample_lost_no_mm;
33570 + atomic_unchecked_t sample_lost_no_mapping;
33571 + atomic_unchecked_t bt_lost_no_mapping;
33572 + atomic_unchecked_t event_lost_overflow;
33573 + atomic_unchecked_t multiplex_counter;
33574 };
33575
33576 extern struct oprofile_stat_struct oprofile_stats;
33577 diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
33578 index 2f0aa0f..90fab02 100644
33579 --- a/drivers/oprofile/oprofilefs.c
33580 +++ b/drivers/oprofile/oprofilefs.c
33581 @@ -193,7 +193,7 @@ static const struct file_operations atomic_ro_fops = {
33582
33583
33584 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
33585 - char const *name, atomic_t *val)
33586 + char const *name, atomic_unchecked_t *val)
33587 {
33588 return __oprofilefs_create_file(sb, root, name,
33589 &atomic_ro_fops, 0444, val);
33590 diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
33591 index 3f56bc0..707d642 100644
33592 --- a/drivers/parport/procfs.c
33593 +++ b/drivers/parport/procfs.c
33594 @@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
33595
33596 *ppos += len;
33597
33598 - return copy_to_user(result, buffer, len) ? -EFAULT : 0;
33599 + return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
33600 }
33601
33602 #ifdef CONFIG_PARPORT_1284
33603 @@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
33604
33605 *ppos += len;
33606
33607 - return copy_to_user (result, buffer, len) ? -EFAULT : 0;
33608 + return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
33609 }
33610 #endif /* IEEE1284.3 support. */
33611
33612 diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
33613 index 9fff878..ad0ad53 100644
33614 --- a/drivers/pci/hotplug/cpci_hotplug.h
33615 +++ b/drivers/pci/hotplug/cpci_hotplug.h
33616 @@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
33617 int (*hardware_test) (struct slot* slot, u32 value);
33618 u8 (*get_power) (struct slot* slot);
33619 int (*set_power) (struct slot* slot, int value);
33620 -};
33621 +} __no_const;
33622
33623 struct cpci_hp_controller {
33624 unsigned int irq;
33625 diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
33626 index 76ba8a1..20ca857 100644
33627 --- a/drivers/pci/hotplug/cpqphp_nvram.c
33628 +++ b/drivers/pci/hotplug/cpqphp_nvram.c
33629 @@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
33630
33631 void compaq_nvram_init (void __iomem *rom_start)
33632 {
33633 +
33634 +#ifndef CONFIG_PAX_KERNEXEC
33635 if (rom_start) {
33636 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
33637 }
33638 +#endif
33639 +
33640 dbg("int15 entry = %p\n", compaq_int15_entry_point);
33641
33642 /* initialize our int15 lock */
33643 diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
33644 index cbfbab1..6a9fced 100644
33645 --- a/drivers/pci/pcie/aspm.c
33646 +++ b/drivers/pci/pcie/aspm.c
33647 @@ -27,9 +27,9 @@
33648 #define MODULE_PARAM_PREFIX "pcie_aspm."
33649
33650 /* Note: those are not register definitions */
33651 -#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
33652 -#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
33653 -#define ASPM_STATE_L1 (4) /* L1 state */
33654 +#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
33655 +#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
33656 +#define ASPM_STATE_L1 (4U) /* L1 state */
33657 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
33658 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
33659
33660 diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
33661 index 04e74f4..a960176 100644
33662 --- a/drivers/pci/probe.c
33663 +++ b/drivers/pci/probe.c
33664 @@ -136,7 +136,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
33665 u32 l, sz, mask;
33666 u16 orig_cmd;
33667
33668 - mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
33669 + mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
33670
33671 if (!dev->mmio_always_on) {
33672 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
33673 diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
33674 index 27911b5..5b6db88 100644
33675 --- a/drivers/pci/proc.c
33676 +++ b/drivers/pci/proc.c
33677 @@ -476,7 +476,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
33678 static int __init pci_proc_init(void)
33679 {
33680 struct pci_dev *dev = NULL;
33681 +
33682 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
33683 +#ifdef CONFIG_GRKERNSEC_PROC_USER
33684 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
33685 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
33686 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
33687 +#endif
33688 +#else
33689 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
33690 +#endif
33691 proc_create("devices", 0, proc_bus_pci_dir,
33692 &proc_bus_pci_dev_operations);
33693 proc_initialized = 1;
33694 diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
33695 index 7b82868..b9344c9 100644
33696 --- a/drivers/platform/x86/thinkpad_acpi.c
33697 +++ b/drivers/platform/x86/thinkpad_acpi.c
33698 @@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
33699 return 0;
33700 }
33701
33702 -void static hotkey_mask_warn_incomplete_mask(void)
33703 +static void hotkey_mask_warn_incomplete_mask(void)
33704 {
33705 /* log only what the user can fix... */
33706 const u32 wantedmask = hotkey_driver_mask &
33707 @@ -2325,11 +2325,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
33708 }
33709 }
33710
33711 -static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33712 - struct tp_nvram_state *newn,
33713 - const u32 event_mask)
33714 -{
33715 -
33716 #define TPACPI_COMPARE_KEY(__scancode, __member) \
33717 do { \
33718 if ((event_mask & (1 << __scancode)) && \
33719 @@ -2343,36 +2338,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33720 tpacpi_hotkey_send_key(__scancode); \
33721 } while (0)
33722
33723 - void issue_volchange(const unsigned int oldvol,
33724 - const unsigned int newvol)
33725 - {
33726 - unsigned int i = oldvol;
33727 +static void issue_volchange(const unsigned int oldvol,
33728 + const unsigned int newvol,
33729 + const u32 event_mask)
33730 +{
33731 + unsigned int i = oldvol;
33732
33733 - while (i > newvol) {
33734 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
33735 - i--;
33736 - }
33737 - while (i < newvol) {
33738 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
33739 - i++;
33740 - }
33741 + while (i > newvol) {
33742 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
33743 + i--;
33744 }
33745 + while (i < newvol) {
33746 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
33747 + i++;
33748 + }
33749 +}
33750
33751 - void issue_brightnesschange(const unsigned int oldbrt,
33752 - const unsigned int newbrt)
33753 - {
33754 - unsigned int i = oldbrt;
33755 +static void issue_brightnesschange(const unsigned int oldbrt,
33756 + const unsigned int newbrt,
33757 + const u32 event_mask)
33758 +{
33759 + unsigned int i = oldbrt;
33760
33761 - while (i > newbrt) {
33762 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
33763 - i--;
33764 - }
33765 - while (i < newbrt) {
33766 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
33767 - i++;
33768 - }
33769 + while (i > newbrt) {
33770 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
33771 + i--;
33772 + }
33773 + while (i < newbrt) {
33774 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
33775 + i++;
33776 }
33777 +}
33778
33779 +static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33780 + struct tp_nvram_state *newn,
33781 + const u32 event_mask)
33782 +{
33783 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
33784 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
33785 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
33786 @@ -2406,7 +2407,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33787 oldn->volume_level != newn->volume_level) {
33788 /* recently muted, or repeated mute keypress, or
33789 * multiple presses ending in mute */
33790 - issue_volchange(oldn->volume_level, newn->volume_level);
33791 + issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
33792 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
33793 }
33794 } else {
33795 @@ -2416,7 +2417,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33796 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
33797 }
33798 if (oldn->volume_level != newn->volume_level) {
33799 - issue_volchange(oldn->volume_level, newn->volume_level);
33800 + issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
33801 } else if (oldn->volume_toggle != newn->volume_toggle) {
33802 /* repeated vol up/down keypress at end of scale ? */
33803 if (newn->volume_level == 0)
33804 @@ -2429,7 +2430,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33805 /* handle brightness */
33806 if (oldn->brightness_level != newn->brightness_level) {
33807 issue_brightnesschange(oldn->brightness_level,
33808 - newn->brightness_level);
33809 + newn->brightness_level,
33810 + event_mask);
33811 } else if (oldn->brightness_toggle != newn->brightness_toggle) {
33812 /* repeated key presses that didn't change state */
33813 if (newn->brightness_level == 0)
33814 @@ -2438,10 +2440,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33815 && !tp_features.bright_unkfw)
33816 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
33817 }
33818 +}
33819
33820 #undef TPACPI_COMPARE_KEY
33821 #undef TPACPI_MAY_SEND_KEY
33822 -}
33823
33824 /*
33825 * Polling driver
33826 diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
33827 index b859d16..5cc6b1a 100644
33828 --- a/drivers/pnp/pnpbios/bioscalls.c
33829 +++ b/drivers/pnp/pnpbios/bioscalls.c
33830 @@ -59,7 +59,7 @@ do { \
33831 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
33832 } while(0)
33833
33834 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
33835 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
33836 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
33837
33838 /*
33839 @@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
33840
33841 cpu = get_cpu();
33842 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
33843 +
33844 + pax_open_kernel();
33845 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
33846 + pax_close_kernel();
33847
33848 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
33849 spin_lock_irqsave(&pnp_bios_lock, flags);
33850 @@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
33851 :"memory");
33852 spin_unlock_irqrestore(&pnp_bios_lock, flags);
33853
33854 + pax_open_kernel();
33855 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
33856 + pax_close_kernel();
33857 +
33858 put_cpu();
33859
33860 /* If we get here and this is set then the PnP BIOS faulted on us. */
33861 @@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
33862 return status;
33863 }
33864
33865 -void pnpbios_calls_init(union pnp_bios_install_struct *header)
33866 +void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
33867 {
33868 int i;
33869
33870 @@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
33871 pnp_bios_callpoint.offset = header->fields.pm16offset;
33872 pnp_bios_callpoint.segment = PNP_CS16;
33873
33874 + pax_open_kernel();
33875 +
33876 for_each_possible_cpu(i) {
33877 struct desc_struct *gdt = get_cpu_gdt_table(i);
33878 if (!gdt)
33879 @@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
33880 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
33881 (unsigned long)__va(header->fields.pm16dseg));
33882 }
33883 +
33884 + pax_close_kernel();
33885 }
33886 diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
33887 index b0ecacb..7c9da2e 100644
33888 --- a/drivers/pnp/resource.c
33889 +++ b/drivers/pnp/resource.c
33890 @@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
33891 return 1;
33892
33893 /* check if the resource is valid */
33894 - if (*irq < 0 || *irq > 15)
33895 + if (*irq > 15)
33896 return 0;
33897
33898 /* check if the resource is reserved */
33899 @@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
33900 return 1;
33901
33902 /* check if the resource is valid */
33903 - if (*dma < 0 || *dma == 4 || *dma > 7)
33904 + if (*dma == 4 || *dma > 7)
33905 return 0;
33906
33907 /* check if the resource is reserved */
33908 diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
33909 index bb16f5b..c751eef 100644
33910 --- a/drivers/power/bq27x00_battery.c
33911 +++ b/drivers/power/bq27x00_battery.c
33912 @@ -67,7 +67,7 @@
33913 struct bq27x00_device_info;
33914 struct bq27x00_access_methods {
33915 int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
33916 -};
33917 +} __no_const;
33918
33919 enum bq27x00_chip { BQ27000, BQ27500 };
33920
33921 diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
33922 index 33f5d9a..d957d3f 100644
33923 --- a/drivers/regulator/max8660.c
33924 +++ b/drivers/regulator/max8660.c
33925 @@ -383,8 +383,10 @@ static int __devinit max8660_probe(struct i2c_client *client,
33926 max8660->shadow_regs[MAX8660_OVER1] = 5;
33927 } else {
33928 /* Otherwise devices can be toggled via software */
33929 - max8660_dcdc_ops.enable = max8660_dcdc_enable;
33930 - max8660_dcdc_ops.disable = max8660_dcdc_disable;
33931 + pax_open_kernel();
33932 + *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
33933 + *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
33934 + pax_close_kernel();
33935 }
33936
33937 /*
33938 diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
33939 index 023d17d..74ef35b 100644
33940 --- a/drivers/regulator/mc13892-regulator.c
33941 +++ b/drivers/regulator/mc13892-regulator.c
33942 @@ -565,10 +565,12 @@ static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
33943 }
33944 mc13xxx_unlock(mc13892);
33945
33946 - mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
33947 + pax_open_kernel();
33948 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
33949 = mc13892_vcam_set_mode;
33950 - mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
33951 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
33952 = mc13892_vcam_get_mode;
33953 + pax_close_kernel();
33954 for (i = 0; i < pdata->num_regulators; i++) {
33955 init_data = &pdata->regulators[i];
33956 priv->regulators[i] = regulator_register(
33957 diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
33958 index cace6d3..f623fda 100644
33959 --- a/drivers/rtc/rtc-dev.c
33960 +++ b/drivers/rtc/rtc-dev.c
33961 @@ -14,6 +14,7 @@
33962 #include <linux/module.h>
33963 #include <linux/rtc.h>
33964 #include <linux/sched.h>
33965 +#include <linux/grsecurity.h>
33966 #include "rtc-core.h"
33967
33968 static dev_t rtc_devt;
33969 @@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *file,
33970 if (copy_from_user(&tm, uarg, sizeof(tm)))
33971 return -EFAULT;
33972
33973 + gr_log_timechange();
33974 +
33975 return rtc_set_time(rtc, &tm);
33976
33977 case RTC_PIE_ON:
33978 diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
33979 index ffb5878..e6d785c 100644
33980 --- a/drivers/scsi/aacraid/aacraid.h
33981 +++ b/drivers/scsi/aacraid/aacraid.h
33982 @@ -492,7 +492,7 @@ struct adapter_ops
33983 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
33984 /* Administrative operations */
33985 int (*adapter_comm)(struct aac_dev * dev, int comm);
33986 -};
33987 +} __no_const;
33988
33989 /*
33990 * Define which interrupt handler needs to be installed
33991 diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
33992 index 705e13e..91c873c 100644
33993 --- a/drivers/scsi/aacraid/linit.c
33994 +++ b/drivers/scsi/aacraid/linit.c
33995 @@ -93,7 +93,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_tbl) = {
33996 #elif defined(__devinitconst)
33997 static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
33998 #else
33999 -static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
34000 +static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
34001 #endif
34002 { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
34003 { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
34004 diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
34005 index d5ff142..49c0ebb 100644
34006 --- a/drivers/scsi/aic94xx/aic94xx_init.c
34007 +++ b/drivers/scsi/aic94xx/aic94xx_init.c
34008 @@ -1012,7 +1012,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
34009 .lldd_control_phy = asd_control_phy,
34010 };
34011
34012 -static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
34013 +static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
34014 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
34015 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
34016 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
34017 diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
34018 index a796de9..1ef20e1 100644
34019 --- a/drivers/scsi/bfa/bfa.h
34020 +++ b/drivers/scsi/bfa/bfa.h
34021 @@ -196,7 +196,7 @@ struct bfa_hwif_s {
34022 u32 *end);
34023 int cpe_vec_q0;
34024 int rme_vec_q0;
34025 -};
34026 +} __no_const;
34027 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
34028
34029 struct bfa_faa_cbfn_s {
34030 diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
34031 index e07bd47..cd1bbbb 100644
34032 --- a/drivers/scsi/bfa/bfa_fcpim.c
34033 +++ b/drivers/scsi/bfa/bfa_fcpim.c
34034 @@ -4121,7 +4121,7 @@ bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
34035
34036 bfa_iotag_attach(fcp);
34037
34038 - fcp->itn_arr = (struct bfa_itn_s *) bfa_mem_kva_curp(fcp);
34039 + fcp->itn_arr = (bfa_itn_s_no_const *) bfa_mem_kva_curp(fcp);
34040 bfa_mem_kva_curp(fcp) = (u8 *)fcp->itn_arr +
34041 (fcp->num_itns * sizeof(struct bfa_itn_s));
34042 memset(fcp->itn_arr, 0,
34043 @@ -4179,7 +4179,7 @@ bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
34044 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m))
34045 {
34046 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
34047 - struct bfa_itn_s *itn;
34048 + bfa_itn_s_no_const *itn;
34049
34050 itn = BFA_ITN_FROM_TAG(fcp, rport->rport_tag);
34051 itn->isr = isr;
34052 diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
34053 index 1080bcb..a3b39e3 100644
34054 --- a/drivers/scsi/bfa/bfa_fcpim.h
34055 +++ b/drivers/scsi/bfa/bfa_fcpim.h
34056 @@ -37,6 +37,7 @@ struct bfa_iotag_s {
34057 struct bfa_itn_s {
34058 bfa_isr_func_t isr;
34059 };
34060 +typedef struct bfa_itn_s __no_const bfa_itn_s_no_const;
34061
34062 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
34063 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
34064 @@ -149,7 +150,7 @@ struct bfa_fcp_mod_s {
34065 struct list_head iotag_tio_free_q; /* free IO resources */
34066 struct list_head iotag_unused_q; /* unused IO resources*/
34067 struct bfa_iotag_s *iotag_arr;
34068 - struct bfa_itn_s *itn_arr;
34069 + bfa_itn_s_no_const *itn_arr;
34070 int num_ioim_reqs;
34071 int num_fwtio_reqs;
34072 int num_itns;
34073 diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
34074 index 546d46b..642fa5b 100644
34075 --- a/drivers/scsi/bfa/bfa_ioc.h
34076 +++ b/drivers/scsi/bfa/bfa_ioc.h
34077 @@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
34078 bfa_ioc_disable_cbfn_t disable_cbfn;
34079 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
34080 bfa_ioc_reset_cbfn_t reset_cbfn;
34081 -};
34082 +} __no_const;
34083
34084 /*
34085 * IOC event notification mechanism.
34086 @@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s {
34087 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
34088 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
34089 bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
34090 -};
34091 +} __no_const;
34092
34093 /*
34094 * Queue element to wait for room in request queue. FIFO order is
34095 diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
34096 index 351dc0b..951dc32 100644
34097 --- a/drivers/scsi/hosts.c
34098 +++ b/drivers/scsi/hosts.c
34099 @@ -42,7 +42,7 @@
34100 #include "scsi_logging.h"
34101
34102
34103 -static atomic_t scsi_host_next_hn; /* host_no for next new host */
34104 +static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
34105
34106
34107 static void scsi_host_cls_release(struct device *dev)
34108 @@ -357,7 +357,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
34109 * subtract one because we increment first then return, but we need to
34110 * know what the next host number was before increment
34111 */
34112 - shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
34113 + shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
34114 shost->dma_channel = 0xff;
34115
34116 /* These three are default values which can be overridden */
34117 diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
34118 index 865d452..e9b7fa7 100644
34119 --- a/drivers/scsi/hpsa.c
34120 +++ b/drivers/scsi/hpsa.c
34121 @@ -505,7 +505,7 @@ static inline u32 next_command(struct ctlr_info *h)
34122 u32 a;
34123
34124 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
34125 - return h->access.command_completed(h);
34126 + return h->access->command_completed(h);
34127
34128 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
34129 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
34130 @@ -2989,7 +2989,7 @@ static void start_io(struct ctlr_info *h)
34131 while (!list_empty(&h->reqQ)) {
34132 c = list_entry(h->reqQ.next, struct CommandList, list);
34133 /* can't do anything if fifo is full */
34134 - if ((h->access.fifo_full(h))) {
34135 + if ((h->access->fifo_full(h))) {
34136 dev_warn(&h->pdev->dev, "fifo full\n");
34137 break;
34138 }
34139 @@ -2999,7 +2999,7 @@ static void start_io(struct ctlr_info *h)
34140 h->Qdepth--;
34141
34142 /* Tell the controller execute command */
34143 - h->access.submit_command(h, c);
34144 + h->access->submit_command(h, c);
34145
34146 /* Put job onto the completed Q */
34147 addQ(&h->cmpQ, c);
34148 @@ -3008,17 +3008,17 @@ static void start_io(struct ctlr_info *h)
34149
34150 static inline unsigned long get_next_completion(struct ctlr_info *h)
34151 {
34152 - return h->access.command_completed(h);
34153 + return h->access->command_completed(h);
34154 }
34155
34156 static inline bool interrupt_pending(struct ctlr_info *h)
34157 {
34158 - return h->access.intr_pending(h);
34159 + return h->access->intr_pending(h);
34160 }
34161
34162 static inline long interrupt_not_for_us(struct ctlr_info *h)
34163 {
34164 - return (h->access.intr_pending(h) == 0) ||
34165 + return (h->access->intr_pending(h) == 0) ||
34166 (h->interrupts_enabled == 0);
34167 }
34168
34169 @@ -3917,7 +3917,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
34170 if (prod_index < 0)
34171 return -ENODEV;
34172 h->product_name = products[prod_index].product_name;
34173 - h->access = *(products[prod_index].access);
34174 + h->access = products[prod_index].access;
34175
34176 if (hpsa_board_disabled(h->pdev)) {
34177 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
34178 @@ -4162,7 +4162,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
34179
34180 assert_spin_locked(&lockup_detector_lock);
34181 remove_ctlr_from_lockup_detector_list(h);
34182 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
34183 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
34184 spin_lock_irqsave(&h->lock, flags);
34185 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
34186 spin_unlock_irqrestore(&h->lock, flags);
34187 @@ -4340,7 +4340,7 @@ reinit_after_soft_reset:
34188 }
34189
34190 /* make sure the board interrupts are off */
34191 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
34192 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
34193
34194 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
34195 goto clean2;
34196 @@ -4374,7 +4374,7 @@ reinit_after_soft_reset:
34197 * fake ones to scoop up any residual completions.
34198 */
34199 spin_lock_irqsave(&h->lock, flags);
34200 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
34201 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
34202 spin_unlock_irqrestore(&h->lock, flags);
34203 free_irq(h->intr[h->intr_mode], h);
34204 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
34205 @@ -4393,9 +4393,9 @@ reinit_after_soft_reset:
34206 dev_info(&h->pdev->dev, "Board READY.\n");
34207 dev_info(&h->pdev->dev,
34208 "Waiting for stale completions to drain.\n");
34209 - h->access.set_intr_mask(h, HPSA_INTR_ON);
34210 + h->access->set_intr_mask(h, HPSA_INTR_ON);
34211 msleep(10000);
34212 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
34213 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
34214
34215 rc = controller_reset_failed(h->cfgtable);
34216 if (rc)
34217 @@ -4416,7 +4416,7 @@ reinit_after_soft_reset:
34218 }
34219
34220 /* Turn the interrupts on so we can service requests */
34221 - h->access.set_intr_mask(h, HPSA_INTR_ON);
34222 + h->access->set_intr_mask(h, HPSA_INTR_ON);
34223
34224 hpsa_hba_inquiry(h);
34225 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
34226 @@ -4468,7 +4468,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
34227 * To write all data in the battery backed cache to disks
34228 */
34229 hpsa_flush_cache(h);
34230 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
34231 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
34232 free_irq(h->intr[h->intr_mode], h);
34233 #ifdef CONFIG_PCI_MSI
34234 if (h->msix_vector)
34235 @@ -4632,7 +4632,7 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
34236 return;
34237 }
34238 /* Change the access methods to the performant access methods */
34239 - h->access = SA5_performant_access;
34240 + h->access = &SA5_performant_access;
34241 h->transMethod = CFGTBL_Trans_Performant;
34242 }
34243
34244 diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
34245 index 91edafb..a9b88ec 100644
34246 --- a/drivers/scsi/hpsa.h
34247 +++ b/drivers/scsi/hpsa.h
34248 @@ -73,7 +73,7 @@ struct ctlr_info {
34249 unsigned int msix_vector;
34250 unsigned int msi_vector;
34251 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
34252 - struct access_method access;
34253 + struct access_method *access;
34254
34255 /* queue and queue Info */
34256 struct list_head reqQ;
34257 diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
34258 index f2df059..a3a9930 100644
34259 --- a/drivers/scsi/ips.h
34260 +++ b/drivers/scsi/ips.h
34261 @@ -1027,7 +1027,7 @@ typedef struct {
34262 int (*intr)(struct ips_ha *);
34263 void (*enableint)(struct ips_ha *);
34264 uint32_t (*statupd)(struct ips_ha *);
34265 -} ips_hw_func_t;
34266 +} __no_const ips_hw_func_t;
34267
34268 typedef struct ips_ha {
34269 uint8_t ha_id[IPS_MAX_CHANNELS+1];
34270 diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
34271 index 9de9db2..1e09660 100644
34272 --- a/drivers/scsi/libfc/fc_exch.c
34273 +++ b/drivers/scsi/libfc/fc_exch.c
34274 @@ -105,12 +105,12 @@ struct fc_exch_mgr {
34275 * all together if not used XXX
34276 */
34277 struct {
34278 - atomic_t no_free_exch;
34279 - atomic_t no_free_exch_xid;
34280 - atomic_t xid_not_found;
34281 - atomic_t xid_busy;
34282 - atomic_t seq_not_found;
34283 - atomic_t non_bls_resp;
34284 + atomic_unchecked_t no_free_exch;
34285 + atomic_unchecked_t no_free_exch_xid;
34286 + atomic_unchecked_t xid_not_found;
34287 + atomic_unchecked_t xid_busy;
34288 + atomic_unchecked_t seq_not_found;
34289 + atomic_unchecked_t non_bls_resp;
34290 } stats;
34291 };
34292
34293 @@ -719,7 +719,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
34294 /* allocate memory for exchange */
34295 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
34296 if (!ep) {
34297 - atomic_inc(&mp->stats.no_free_exch);
34298 + atomic_inc_unchecked(&mp->stats.no_free_exch);
34299 goto out;
34300 }
34301 memset(ep, 0, sizeof(*ep));
34302 @@ -780,7 +780,7 @@ out:
34303 return ep;
34304 err:
34305 spin_unlock_bh(&pool->lock);
34306 - atomic_inc(&mp->stats.no_free_exch_xid);
34307 + atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
34308 mempool_free(ep, mp->ep_pool);
34309 return NULL;
34310 }
34311 @@ -923,7 +923,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
34312 xid = ntohs(fh->fh_ox_id); /* we originated exch */
34313 ep = fc_exch_find(mp, xid);
34314 if (!ep) {
34315 - atomic_inc(&mp->stats.xid_not_found);
34316 + atomic_inc_unchecked(&mp->stats.xid_not_found);
34317 reject = FC_RJT_OX_ID;
34318 goto out;
34319 }
34320 @@ -953,7 +953,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
34321 ep = fc_exch_find(mp, xid);
34322 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
34323 if (ep) {
34324 - atomic_inc(&mp->stats.xid_busy);
34325 + atomic_inc_unchecked(&mp->stats.xid_busy);
34326 reject = FC_RJT_RX_ID;
34327 goto rel;
34328 }
34329 @@ -964,7 +964,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
34330 }
34331 xid = ep->xid; /* get our XID */
34332 } else if (!ep) {
34333 - atomic_inc(&mp->stats.xid_not_found);
34334 + atomic_inc_unchecked(&mp->stats.xid_not_found);
34335 reject = FC_RJT_RX_ID; /* XID not found */
34336 goto out;
34337 }
34338 @@ -981,7 +981,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
34339 } else {
34340 sp = &ep->seq;
34341 if (sp->id != fh->fh_seq_id) {
34342 - atomic_inc(&mp->stats.seq_not_found);
34343 + atomic_inc_unchecked(&mp->stats.seq_not_found);
34344 if (f_ctl & FC_FC_END_SEQ) {
34345 /*
34346 * Update sequence_id based on incoming last
34347 @@ -1431,22 +1431,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
34348
34349 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
34350 if (!ep) {
34351 - atomic_inc(&mp->stats.xid_not_found);
34352 + atomic_inc_unchecked(&mp->stats.xid_not_found);
34353 goto out;
34354 }
34355 if (ep->esb_stat & ESB_ST_COMPLETE) {
34356 - atomic_inc(&mp->stats.xid_not_found);
34357 + atomic_inc_unchecked(&mp->stats.xid_not_found);
34358 goto rel;
34359 }
34360 if (ep->rxid == FC_XID_UNKNOWN)
34361 ep->rxid = ntohs(fh->fh_rx_id);
34362 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
34363 - atomic_inc(&mp->stats.xid_not_found);
34364 + atomic_inc_unchecked(&mp->stats.xid_not_found);
34365 goto rel;
34366 }
34367 if (ep->did != ntoh24(fh->fh_s_id) &&
34368 ep->did != FC_FID_FLOGI) {
34369 - atomic_inc(&mp->stats.xid_not_found);
34370 + atomic_inc_unchecked(&mp->stats.xid_not_found);
34371 goto rel;
34372 }
34373 sof = fr_sof(fp);
34374 @@ -1455,7 +1455,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
34375 sp->ssb_stat |= SSB_ST_RESP;
34376 sp->id = fh->fh_seq_id;
34377 } else if (sp->id != fh->fh_seq_id) {
34378 - atomic_inc(&mp->stats.seq_not_found);
34379 + atomic_inc_unchecked(&mp->stats.seq_not_found);
34380 goto rel;
34381 }
34382
34383 @@ -1519,9 +1519,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
34384 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
34385
34386 if (!sp)
34387 - atomic_inc(&mp->stats.xid_not_found);
34388 + atomic_inc_unchecked(&mp->stats.xid_not_found);
34389 else
34390 - atomic_inc(&mp->stats.non_bls_resp);
34391 + atomic_inc_unchecked(&mp->stats.non_bls_resp);
34392
34393 fc_frame_free(fp);
34394 }
34395 diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
34396 index db9238f..4378ed2 100644
34397 --- a/drivers/scsi/libsas/sas_ata.c
34398 +++ b/drivers/scsi/libsas/sas_ata.c
34399 @@ -368,7 +368,7 @@ static struct ata_port_operations sas_sata_ops = {
34400 .postreset = ata_std_postreset,
34401 .error_handler = ata_std_error_handler,
34402 .post_internal_cmd = sas_ata_post_internal,
34403 - .qc_defer = ata_std_qc_defer,
34404 + .qc_defer = ata_std_qc_defer,
34405 .qc_prep = ata_noop_qc_prep,
34406 .qc_issue = sas_ata_qc_issue,
34407 .qc_fill_rtf = sas_ata_qc_fill_rtf,
34408 diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
34409 index bb4c8e0..f33d849 100644
34410 --- a/drivers/scsi/lpfc/lpfc.h
34411 +++ b/drivers/scsi/lpfc/lpfc.h
34412 @@ -425,7 +425,7 @@ struct lpfc_vport {
34413 struct dentry *debug_nodelist;
34414 struct dentry *vport_debugfs_root;
34415 struct lpfc_debugfs_trc *disc_trc;
34416 - atomic_t disc_trc_cnt;
34417 + atomic_unchecked_t disc_trc_cnt;
34418 #endif
34419 uint8_t stat_data_enabled;
34420 uint8_t stat_data_blocked;
34421 @@ -835,8 +835,8 @@ struct lpfc_hba {
34422 struct timer_list fabric_block_timer;
34423 unsigned long bit_flags;
34424 #define FABRIC_COMANDS_BLOCKED 0
34425 - atomic_t num_rsrc_err;
34426 - atomic_t num_cmd_success;
34427 + atomic_unchecked_t num_rsrc_err;
34428 + atomic_unchecked_t num_cmd_success;
34429 unsigned long last_rsrc_error_time;
34430 unsigned long last_ramp_down_time;
34431 unsigned long last_ramp_up_time;
34432 @@ -866,7 +866,7 @@ struct lpfc_hba {
34433
34434 struct dentry *debug_slow_ring_trc;
34435 struct lpfc_debugfs_trc *slow_ring_trc;
34436 - atomic_t slow_ring_trc_cnt;
34437 + atomic_unchecked_t slow_ring_trc_cnt;
34438 /* iDiag debugfs sub-directory */
34439 struct dentry *idiag_root;
34440 struct dentry *idiag_pci_cfg;
34441 diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
34442 index 2838259..a07cfb5 100644
34443 --- a/drivers/scsi/lpfc/lpfc_debugfs.c
34444 +++ b/drivers/scsi/lpfc/lpfc_debugfs.c
34445 @@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
34446
34447 #include <linux/debugfs.h>
34448
34449 -static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
34450 +static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
34451 static unsigned long lpfc_debugfs_start_time = 0L;
34452
34453 /* iDiag */
34454 @@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
34455 lpfc_debugfs_enable = 0;
34456
34457 len = 0;
34458 - index = (atomic_read(&vport->disc_trc_cnt) + 1) &
34459 + index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
34460 (lpfc_debugfs_max_disc_trc - 1);
34461 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
34462 dtp = vport->disc_trc + i;
34463 @@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
34464 lpfc_debugfs_enable = 0;
34465
34466 len = 0;
34467 - index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
34468 + index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
34469 (lpfc_debugfs_max_slow_ring_trc - 1);
34470 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
34471 dtp = phba->slow_ring_trc + i;
34472 @@ -636,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
34473 !vport || !vport->disc_trc)
34474 return;
34475
34476 - index = atomic_inc_return(&vport->disc_trc_cnt) &
34477 + index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
34478 (lpfc_debugfs_max_disc_trc - 1);
34479 dtp = vport->disc_trc + index;
34480 dtp->fmt = fmt;
34481 dtp->data1 = data1;
34482 dtp->data2 = data2;
34483 dtp->data3 = data3;
34484 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
34485 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
34486 dtp->jif = jiffies;
34487 #endif
34488 return;
34489 @@ -674,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
34490 !phba || !phba->slow_ring_trc)
34491 return;
34492
34493 - index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
34494 + index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
34495 (lpfc_debugfs_max_slow_ring_trc - 1);
34496 dtp = phba->slow_ring_trc + index;
34497 dtp->fmt = fmt;
34498 dtp->data1 = data1;
34499 dtp->data2 = data2;
34500 dtp->data3 = data3;
34501 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
34502 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
34503 dtp->jif = jiffies;
34504 #endif
34505 return;
34506 @@ -3986,7 +3986,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
34507 "slow_ring buffer\n");
34508 goto debug_failed;
34509 }
34510 - atomic_set(&phba->slow_ring_trc_cnt, 0);
34511 + atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
34512 memset(phba->slow_ring_trc, 0,
34513 (sizeof(struct lpfc_debugfs_trc) *
34514 lpfc_debugfs_max_slow_ring_trc));
34515 @@ -4032,7 +4032,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
34516 "buffer\n");
34517 goto debug_failed;
34518 }
34519 - atomic_set(&vport->disc_trc_cnt, 0);
34520 + atomic_set_unchecked(&vport->disc_trc_cnt, 0);
34521
34522 snprintf(name, sizeof(name), "discovery_trace");
34523 vport->debug_disc_trc =
34524 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
34525 index 55bc4fc..a2a109c 100644
34526 --- a/drivers/scsi/lpfc/lpfc_init.c
34527 +++ b/drivers/scsi/lpfc/lpfc_init.c
34528 @@ -10027,8 +10027,10 @@ lpfc_init(void)
34529 printk(LPFC_COPYRIGHT "\n");
34530
34531 if (lpfc_enable_npiv) {
34532 - lpfc_transport_functions.vport_create = lpfc_vport_create;
34533 - lpfc_transport_functions.vport_delete = lpfc_vport_delete;
34534 + pax_open_kernel();
34535 + *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
34536 + *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
34537 + pax_close_kernel();
34538 }
34539 lpfc_transport_template =
34540 fc_attach_transport(&lpfc_transport_functions);
34541 diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
34542 index 2e1e54e..1af0a0d 100644
34543 --- a/drivers/scsi/lpfc/lpfc_scsi.c
34544 +++ b/drivers/scsi/lpfc/lpfc_scsi.c
34545 @@ -305,7 +305,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
34546 uint32_t evt_posted;
34547
34548 spin_lock_irqsave(&phba->hbalock, flags);
34549 - atomic_inc(&phba->num_rsrc_err);
34550 + atomic_inc_unchecked(&phba->num_rsrc_err);
34551 phba->last_rsrc_error_time = jiffies;
34552
34553 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
34554 @@ -346,7 +346,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
34555 unsigned long flags;
34556 struct lpfc_hba *phba = vport->phba;
34557 uint32_t evt_posted;
34558 - atomic_inc(&phba->num_cmd_success);
34559 + atomic_inc_unchecked(&phba->num_cmd_success);
34560
34561 if (vport->cfg_lun_queue_depth <= queue_depth)
34562 return;
34563 @@ -390,8 +390,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
34564 unsigned long num_rsrc_err, num_cmd_success;
34565 int i;
34566
34567 - num_rsrc_err = atomic_read(&phba->num_rsrc_err);
34568 - num_cmd_success = atomic_read(&phba->num_cmd_success);
34569 + num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
34570 + num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
34571
34572 vports = lpfc_create_vport_work_array(phba);
34573 if (vports != NULL)
34574 @@ -411,8 +411,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
34575 }
34576 }
34577 lpfc_destroy_vport_work_array(phba, vports);
34578 - atomic_set(&phba->num_rsrc_err, 0);
34579 - atomic_set(&phba->num_cmd_success, 0);
34580 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
34581 + atomic_set_unchecked(&phba->num_cmd_success, 0);
34582 }
34583
34584 /**
34585 @@ -446,8 +446,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
34586 }
34587 }
34588 lpfc_destroy_vport_work_array(phba, vports);
34589 - atomic_set(&phba->num_rsrc_err, 0);
34590 - atomic_set(&phba->num_cmd_success, 0);
34591 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
34592 + atomic_set_unchecked(&phba->num_cmd_success, 0);
34593 }
34594
34595 /**
34596 diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
34597 index 5163edb..7b142bc 100644
34598 --- a/drivers/scsi/pmcraid.c
34599 +++ b/drivers/scsi/pmcraid.c
34600 @@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
34601 res->scsi_dev = scsi_dev;
34602 scsi_dev->hostdata = res;
34603 res->change_detected = 0;
34604 - atomic_set(&res->read_failures, 0);
34605 - atomic_set(&res->write_failures, 0);
34606 + atomic_set_unchecked(&res->read_failures, 0);
34607 + atomic_set_unchecked(&res->write_failures, 0);
34608 rc = 0;
34609 }
34610 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
34611 @@ -2676,9 +2676,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
34612
34613 /* If this was a SCSI read/write command keep count of errors */
34614 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
34615 - atomic_inc(&res->read_failures);
34616 + atomic_inc_unchecked(&res->read_failures);
34617 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
34618 - atomic_inc(&res->write_failures);
34619 + atomic_inc_unchecked(&res->write_failures);
34620
34621 if (!RES_IS_GSCSI(res->cfg_entry) &&
34622 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
34623 @@ -3534,7 +3534,7 @@ static int pmcraid_queuecommand_lck(
34624 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
34625 * hrrq_id assigned here in queuecommand
34626 */
34627 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
34628 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
34629 pinstance->num_hrrq;
34630 cmd->cmd_done = pmcraid_io_done;
34631
34632 @@ -3859,7 +3859,7 @@ static long pmcraid_ioctl_passthrough(
34633 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
34634 * hrrq_id assigned here in queuecommand
34635 */
34636 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
34637 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
34638 pinstance->num_hrrq;
34639
34640 if (request_size) {
34641 @@ -4497,7 +4497,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
34642
34643 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
34644 /* add resources only after host is added into system */
34645 - if (!atomic_read(&pinstance->expose_resources))
34646 + if (!atomic_read_unchecked(&pinstance->expose_resources))
34647 return;
34648
34649 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
34650 @@ -5331,8 +5331,8 @@ static int __devinit pmcraid_init_instance(
34651 init_waitqueue_head(&pinstance->reset_wait_q);
34652
34653 atomic_set(&pinstance->outstanding_cmds, 0);
34654 - atomic_set(&pinstance->last_message_id, 0);
34655 - atomic_set(&pinstance->expose_resources, 0);
34656 + atomic_set_unchecked(&pinstance->last_message_id, 0);
34657 + atomic_set_unchecked(&pinstance->expose_resources, 0);
34658
34659 INIT_LIST_HEAD(&pinstance->free_res_q);
34660 INIT_LIST_HEAD(&pinstance->used_res_q);
34661 @@ -6047,7 +6047,7 @@ static int __devinit pmcraid_probe(
34662 /* Schedule worker thread to handle CCN and take care of adding and
34663 * removing devices to OS
34664 */
34665 - atomic_set(&pinstance->expose_resources, 1);
34666 + atomic_set_unchecked(&pinstance->expose_resources, 1);
34667 schedule_work(&pinstance->worker_q);
34668 return rc;
34669
34670 diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
34671 index ca496c7..9c791d5 100644
34672 --- a/drivers/scsi/pmcraid.h
34673 +++ b/drivers/scsi/pmcraid.h
34674 @@ -748,7 +748,7 @@ struct pmcraid_instance {
34675 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
34676
34677 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
34678 - atomic_t last_message_id;
34679 + atomic_unchecked_t last_message_id;
34680
34681 /* configuration table */
34682 struct pmcraid_config_table *cfg_table;
34683 @@ -777,7 +777,7 @@ struct pmcraid_instance {
34684 atomic_t outstanding_cmds;
34685
34686 /* should add/delete resources to mid-layer now ?*/
34687 - atomic_t expose_resources;
34688 + atomic_unchecked_t expose_resources;
34689
34690
34691
34692 @@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
34693 struct pmcraid_config_table_entry_ext cfg_entry_ext;
34694 };
34695 struct scsi_device *scsi_dev; /* Link scsi_device structure */
34696 - atomic_t read_failures; /* count of failed READ commands */
34697 - atomic_t write_failures; /* count of failed WRITE commands */
34698 + atomic_unchecked_t read_failures; /* count of failed READ commands */
34699 + atomic_unchecked_t write_failures; /* count of failed WRITE commands */
34700
34701 /* To indicate add/delete/modify during CCN */
34702 u8 change_detected;
34703 diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
34704 index fcf052c..a8025a4 100644
34705 --- a/drivers/scsi/qla2xxx/qla_def.h
34706 +++ b/drivers/scsi/qla2xxx/qla_def.h
34707 @@ -2244,7 +2244,7 @@ struct isp_operations {
34708 int (*get_flash_version) (struct scsi_qla_host *, void *);
34709 int (*start_scsi) (srb_t *);
34710 int (*abort_isp) (struct scsi_qla_host *);
34711 -};
34712 +} __no_const;
34713
34714 /* MSI-X Support *************************************************************/
34715
34716 diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
34717 index fd5edc6..4906148 100644
34718 --- a/drivers/scsi/qla4xxx/ql4_def.h
34719 +++ b/drivers/scsi/qla4xxx/ql4_def.h
34720 @@ -258,7 +258,7 @@ struct ddb_entry {
34721 * (4000 only) */
34722 atomic_t relogin_timer; /* Max Time to wait for
34723 * relogin to complete */
34724 - atomic_t relogin_retry_count; /* Num of times relogin has been
34725 + atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
34726 * retried */
34727 uint32_t default_time2wait; /* Default Min time between
34728 * relogins (+aens) */
34729 diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
34730 index 4169c8b..a8b896b 100644
34731 --- a/drivers/scsi/qla4xxx/ql4_os.c
34732 +++ b/drivers/scsi/qla4xxx/ql4_os.c
34733 @@ -2104,12 +2104,12 @@ void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
34734 */
34735 if (!iscsi_is_session_online(cls_sess)) {
34736 /* Reset retry relogin timer */
34737 - atomic_inc(&ddb_entry->relogin_retry_count);
34738 + atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
34739 DEBUG2(ql4_printk(KERN_INFO, ha,
34740 "%s: index[%d] relogin timed out-retrying"
34741 " relogin (%d), retry (%d)\n", __func__,
34742 ddb_entry->fw_ddb_index,
34743 - atomic_read(&ddb_entry->relogin_retry_count),
34744 + atomic_read_unchecked(&ddb_entry->relogin_retry_count),
34745 ddb_entry->default_time2wait + 4));
34746 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
34747 atomic_set(&ddb_entry->retry_relogin_timer,
34748 @@ -3835,7 +3835,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
34749
34750 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
34751 atomic_set(&ddb_entry->relogin_timer, 0);
34752 - atomic_set(&ddb_entry->relogin_retry_count, 0);
34753 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
34754
34755 ddb_entry->default_relogin_timeout =
34756 le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
34757 diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
34758 index 2aeb2e9..46e3925 100644
34759 --- a/drivers/scsi/scsi.c
34760 +++ b/drivers/scsi/scsi.c
34761 @@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
34762 unsigned long timeout;
34763 int rtn = 0;
34764
34765 - atomic_inc(&cmd->device->iorequest_cnt);
34766 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
34767
34768 /* check if the device is still usable */
34769 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
34770 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
34771 index f85cfa6..a57c9e8 100644
34772 --- a/drivers/scsi/scsi_lib.c
34773 +++ b/drivers/scsi/scsi_lib.c
34774 @@ -1416,7 +1416,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
34775 shost = sdev->host;
34776 scsi_init_cmd_errh(cmd);
34777 cmd->result = DID_NO_CONNECT << 16;
34778 - atomic_inc(&cmd->device->iorequest_cnt);
34779 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
34780
34781 /*
34782 * SCSI request completion path will do scsi_device_unbusy(),
34783 @@ -1442,9 +1442,9 @@ static void scsi_softirq_done(struct request *rq)
34784
34785 INIT_LIST_HEAD(&cmd->eh_entry);
34786
34787 - atomic_inc(&cmd->device->iodone_cnt);
34788 + atomic_inc_unchecked(&cmd->device->iodone_cnt);
34789 if (cmd->result)
34790 - atomic_inc(&cmd->device->ioerr_cnt);
34791 + atomic_inc_unchecked(&cmd->device->ioerr_cnt);
34792
34793 disposition = scsi_decide_disposition(cmd);
34794 if (disposition != SUCCESS &&
34795 diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
34796 index 04c2a27..9d8bd66 100644
34797 --- a/drivers/scsi/scsi_sysfs.c
34798 +++ b/drivers/scsi/scsi_sysfs.c
34799 @@ -660,7 +660,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
34800 char *buf) \
34801 { \
34802 struct scsi_device *sdev = to_scsi_device(dev); \
34803 - unsigned long long count = atomic_read(&sdev->field); \
34804 + unsigned long long count = atomic_read_unchecked(&sdev->field); \
34805 return snprintf(buf, 20, "0x%llx\n", count); \
34806 } \
34807 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
34808 diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
34809 index 84a1fdf..693b0d6 100644
34810 --- a/drivers/scsi/scsi_tgt_lib.c
34811 +++ b/drivers/scsi/scsi_tgt_lib.c
34812 @@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
34813 int err;
34814
34815 dprintk("%lx %u\n", uaddr, len);
34816 - err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
34817 + err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
34818 if (err) {
34819 /*
34820 * TODO: need to fixup sg_tablesize, max_segment_size,
34821 diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
34822 index 1b21491..1b7f60e 100644
34823 --- a/drivers/scsi/scsi_transport_fc.c
34824 +++ b/drivers/scsi/scsi_transport_fc.c
34825 @@ -484,7 +484,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
34826 * Netlink Infrastructure
34827 */
34828
34829 -static atomic_t fc_event_seq;
34830 +static atomic_unchecked_t fc_event_seq;
34831
34832 /**
34833 * fc_get_event_number - Obtain the next sequential FC event number
34834 @@ -497,7 +497,7 @@ static atomic_t fc_event_seq;
34835 u32
34836 fc_get_event_number(void)
34837 {
34838 - return atomic_add_return(1, &fc_event_seq);
34839 + return atomic_add_return_unchecked(1, &fc_event_seq);
34840 }
34841 EXPORT_SYMBOL(fc_get_event_number);
34842
34843 @@ -645,7 +645,7 @@ static __init int fc_transport_init(void)
34844 {
34845 int error;
34846
34847 - atomic_set(&fc_event_seq, 0);
34848 + atomic_set_unchecked(&fc_event_seq, 0);
34849
34850 error = transport_class_register(&fc_host_class);
34851 if (error)
34852 @@ -835,7 +835,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
34853 char *cp;
34854
34855 *val = simple_strtoul(buf, &cp, 0);
34856 - if ((*cp && (*cp != '\n')) || (*val < 0))
34857 + if (*cp && (*cp != '\n'))
34858 return -EINVAL;
34859 /*
34860 * Check for overflow; dev_loss_tmo is u32
34861 diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
34862 index 96029e6..4d77fa0 100644
34863 --- a/drivers/scsi/scsi_transport_iscsi.c
34864 +++ b/drivers/scsi/scsi_transport_iscsi.c
34865 @@ -79,7 +79,7 @@ struct iscsi_internal {
34866 struct transport_container session_cont;
34867 };
34868
34869 -static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
34870 +static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
34871 static struct workqueue_struct *iscsi_eh_timer_workq;
34872
34873 static DEFINE_IDA(iscsi_sess_ida);
34874 @@ -1062,7 +1062,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
34875 int err;
34876
34877 ihost = shost->shost_data;
34878 - session->sid = atomic_add_return(1, &iscsi_session_nr);
34879 + session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
34880
34881 if (target_id == ISCSI_MAX_TARGET) {
34882 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
34883 @@ -2663,7 +2663,7 @@ static __init int iscsi_transport_init(void)
34884 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
34885 ISCSI_TRANSPORT_VERSION);
34886
34887 - atomic_set(&iscsi_session_nr, 0);
34888 + atomic_set_unchecked(&iscsi_session_nr, 0);
34889
34890 err = class_register(&iscsi_transport_class);
34891 if (err)
34892 diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
34893 index 21a045e..ec89e03 100644
34894 --- a/drivers/scsi/scsi_transport_srp.c
34895 +++ b/drivers/scsi/scsi_transport_srp.c
34896 @@ -33,7 +33,7 @@
34897 #include "scsi_transport_srp_internal.h"
34898
34899 struct srp_host_attrs {
34900 - atomic_t next_port_id;
34901 + atomic_unchecked_t next_port_id;
34902 };
34903 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
34904
34905 @@ -62,7 +62,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
34906 struct Scsi_Host *shost = dev_to_shost(dev);
34907 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
34908
34909 - atomic_set(&srp_host->next_port_id, 0);
34910 + atomic_set_unchecked(&srp_host->next_port_id, 0);
34911 return 0;
34912 }
34913
34914 @@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
34915 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
34916 rport->roles = ids->roles;
34917
34918 - id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
34919 + id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
34920 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
34921
34922 transport_setup_device(&rport->dev);
34923 diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
34924 index 441a1c5..07cece7 100644
34925 --- a/drivers/scsi/sg.c
34926 +++ b/drivers/scsi/sg.c
34927 @@ -1077,7 +1077,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
34928 sdp->disk->disk_name,
34929 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
34930 NULL,
34931 - (char *)arg);
34932 + (char __user *)arg);
34933 case BLKTRACESTART:
34934 return blk_trace_startstop(sdp->device->request_queue, 1);
34935 case BLKTRACESTOP:
34936 @@ -2312,7 +2312,7 @@ struct sg_proc_leaf {
34937 const struct file_operations * fops;
34938 };
34939
34940 -static struct sg_proc_leaf sg_proc_leaf_arr[] = {
34941 +static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
34942 {"allow_dio", &adio_fops},
34943 {"debug", &debug_fops},
34944 {"def_reserved_size", &dressz_fops},
34945 @@ -2327,7 +2327,7 @@ sg_proc_init(void)
34946 {
34947 int k, mask;
34948 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
34949 - struct sg_proc_leaf * leaf;
34950 + const struct sg_proc_leaf * leaf;
34951
34952 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
34953 if (!sg_proc_sgp)
34954 diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c
34955 index f64250e..1ee3049 100644
34956 --- a/drivers/spi/spi-dw-pci.c
34957 +++ b/drivers/spi/spi-dw-pci.c
34958 @@ -149,7 +149,7 @@ static int spi_resume(struct pci_dev *pdev)
34959 #define spi_resume NULL
34960 #endif
34961
34962 -static const struct pci_device_id pci_ids[] __devinitdata = {
34963 +static const struct pci_device_id pci_ids[] __devinitconst = {
34964 /* Intel MID platform SPI controller 0 */
34965 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) },
34966 {},
34967 diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
34968 index 77eae99..b7cdcc9 100644
34969 --- a/drivers/spi/spi.c
34970 +++ b/drivers/spi/spi.c
34971 @@ -1024,7 +1024,7 @@ int spi_bus_unlock(struct spi_master *master)
34972 EXPORT_SYMBOL_GPL(spi_bus_unlock);
34973
34974 /* portable code must never pass more than 32 bytes */
34975 -#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
34976 +#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
34977
34978 static u8 *buf;
34979
34980 diff --git a/drivers/staging/gma500/power.c b/drivers/staging/gma500/power.c
34981 index 436fe97..4082570 100644
34982 --- a/drivers/staging/gma500/power.c
34983 +++ b/drivers/staging/gma500/power.c
34984 @@ -266,7 +266,7 @@ bool gma_power_begin(struct drm_device *dev, bool force_on)
34985 ret = gma_resume_pci(dev->pdev);
34986 if (ret == 0) {
34987 /* FIXME: we want to defer this for Medfield/Oaktrail */
34988 - gma_resume_display(dev);
34989 + gma_resume_display(dev->pdev);
34990 psb_irq_preinstall(dev);
34991 psb_irq_postinstall(dev);
34992 pm_runtime_get(&dev->pdev->dev);
34993 diff --git a/drivers/staging/hv/rndis_filter.c b/drivers/staging/hv/rndis_filter.c
34994 index bafccb3..e3ac78d 100644
34995 --- a/drivers/staging/hv/rndis_filter.c
34996 +++ b/drivers/staging/hv/rndis_filter.c
34997 @@ -42,7 +42,7 @@ struct rndis_device {
34998
34999 enum rndis_device_state state;
35000 bool link_state;
35001 - atomic_t new_req_id;
35002 + atomic_unchecked_t new_req_id;
35003
35004 spinlock_t request_lock;
35005 struct list_head req_list;
35006 @@ -116,7 +116,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
35007 * template
35008 */
35009 set = &rndis_msg->msg.set_req;
35010 - set->req_id = atomic_inc_return(&dev->new_req_id);
35011 + set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
35012
35013 /* Add to the request list */
35014 spin_lock_irqsave(&dev->request_lock, flags);
35015 @@ -646,7 +646,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
35016
35017 /* Setup the rndis set */
35018 halt = &request->request_msg.msg.halt_req;
35019 - halt->req_id = atomic_inc_return(&dev->new_req_id);
35020 + halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
35021
35022 /* Ignore return since this msg is optional. */
35023 rndis_filter_send_request(dev, request);
35024 diff --git a/drivers/staging/iio/buffer_generic.h b/drivers/staging/iio/buffer_generic.h
35025 index 9e8f010..af9efb5 100644
35026 --- a/drivers/staging/iio/buffer_generic.h
35027 +++ b/drivers/staging/iio/buffer_generic.h
35028 @@ -64,7 +64,7 @@ struct iio_buffer_access_funcs {
35029
35030 int (*is_enabled)(struct iio_buffer *buffer);
35031 int (*enable)(struct iio_buffer *buffer);
35032 -};
35033 +} __no_const;
35034
35035 /**
35036 * struct iio_buffer_setup_ops - buffer setup related callbacks
35037 diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
35038 index 8b307b4..a97ac91 100644
35039 --- a/drivers/staging/octeon/ethernet-rx.c
35040 +++ b/drivers/staging/octeon/ethernet-rx.c
35041 @@ -420,11 +420,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
35042 /* Increment RX stats for virtual ports */
35043 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
35044 #ifdef CONFIG_64BIT
35045 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
35046 - atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
35047 + atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
35048 + atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
35049 #else
35050 - atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
35051 - atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
35052 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
35053 + atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
35054 #endif
35055 }
35056 netif_receive_skb(skb);
35057 @@ -436,9 +436,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
35058 dev->name);
35059 */
35060 #ifdef CONFIG_64BIT
35061 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
35062 + atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
35063 #else
35064 - atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
35065 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
35066 #endif
35067 dev_kfree_skb_irq(skb);
35068 }
35069 diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
35070 index 076f866..2308070 100644
35071 --- a/drivers/staging/octeon/ethernet.c
35072 +++ b/drivers/staging/octeon/ethernet.c
35073 @@ -258,11 +258,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
35074 * since the RX tasklet also increments it.
35075 */
35076 #ifdef CONFIG_64BIT
35077 - atomic64_add(rx_status.dropped_packets,
35078 - (atomic64_t *)&priv->stats.rx_dropped);
35079 + atomic64_add_unchecked(rx_status.dropped_packets,
35080 + (atomic64_unchecked_t *)&priv->stats.rx_dropped);
35081 #else
35082 - atomic_add(rx_status.dropped_packets,
35083 - (atomic_t *)&priv->stats.rx_dropped);
35084 + atomic_add_unchecked(rx_status.dropped_packets,
35085 + (atomic_unchecked_t *)&priv->stats.rx_dropped);
35086 #endif
35087 }
35088
35089 diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c
35090 index 7a19555..466456d 100644
35091 --- a/drivers/staging/pohmelfs/inode.c
35092 +++ b/drivers/staging/pohmelfs/inode.c
35093 @@ -1861,7 +1861,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
35094 mutex_init(&psb->mcache_lock);
35095 psb->mcache_root = RB_ROOT;
35096 psb->mcache_timeout = msecs_to_jiffies(5000);
35097 - atomic_long_set(&psb->mcache_gen, 0);
35098 + atomic_long_set_unchecked(&psb->mcache_gen, 0);
35099
35100 psb->trans_max_pages = 100;
35101
35102 @@ -1876,7 +1876,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
35103 INIT_LIST_HEAD(&psb->crypto_ready_list);
35104 INIT_LIST_HEAD(&psb->crypto_active_list);
35105
35106 - atomic_set(&psb->trans_gen, 1);
35107 + atomic_set_unchecked(&psb->trans_gen, 1);
35108 atomic_long_set(&psb->total_inodes, 0);
35109
35110 mutex_init(&psb->state_lock);
35111 diff --git a/drivers/staging/pohmelfs/mcache.c b/drivers/staging/pohmelfs/mcache.c
35112 index e22665c..a2a9390 100644
35113 --- a/drivers/staging/pohmelfs/mcache.c
35114 +++ b/drivers/staging/pohmelfs/mcache.c
35115 @@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_alloc(struct pohmelfs_sb *psb, u64 start
35116 m->data = data;
35117 m->start = start;
35118 m->size = size;
35119 - m->gen = atomic_long_inc_return(&psb->mcache_gen);
35120 + m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
35121
35122 mutex_lock(&psb->mcache_lock);
35123 err = pohmelfs_mcache_insert(psb, m);
35124 diff --git a/drivers/staging/pohmelfs/netfs.h b/drivers/staging/pohmelfs/netfs.h
35125 index 985b6b7..7699e05 100644
35126 --- a/drivers/staging/pohmelfs/netfs.h
35127 +++ b/drivers/staging/pohmelfs/netfs.h
35128 @@ -571,14 +571,14 @@ struct pohmelfs_config;
35129 struct pohmelfs_sb {
35130 struct rb_root mcache_root;
35131 struct mutex mcache_lock;
35132 - atomic_long_t mcache_gen;
35133 + atomic_long_unchecked_t mcache_gen;
35134 unsigned long mcache_timeout;
35135
35136 unsigned int idx;
35137
35138 unsigned int trans_retries;
35139
35140 - atomic_t trans_gen;
35141 + atomic_unchecked_t trans_gen;
35142
35143 unsigned int crypto_attached_size;
35144 unsigned int crypto_align_size;
35145 diff --git a/drivers/staging/pohmelfs/trans.c b/drivers/staging/pohmelfs/trans.c
35146 index 06c1a74..866eebc 100644
35147 --- a/drivers/staging/pohmelfs/trans.c
35148 +++ b/drivers/staging/pohmelfs/trans.c
35149 @@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_trans *t, struct pohmelfs_sb *psb)
35150 int err;
35151 struct netfs_cmd *cmd = t->iovec.iov_base;
35152
35153 - t->gen = atomic_inc_return(&psb->trans_gen);
35154 + t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
35155
35156 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
35157 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
35158 diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
35159 index 86308a0..feaa925 100644
35160 --- a/drivers/staging/rtl8712/rtl871x_io.h
35161 +++ b/drivers/staging/rtl8712/rtl871x_io.h
35162 @@ -108,7 +108,7 @@ struct _io_ops {
35163 u8 *pmem);
35164 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
35165 u8 *pmem);
35166 -};
35167 +} __no_const;
35168
35169 struct io_req {
35170 struct list_head list;
35171 diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
35172 index c7b5e8b..783d6cb 100644
35173 --- a/drivers/staging/sbe-2t3e3/netdev.c
35174 +++ b/drivers/staging/sbe-2t3e3/netdev.c
35175 @@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
35176 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
35177
35178 if (rlen)
35179 - if (copy_to_user(data, &resp, rlen))
35180 + if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
35181 return -EFAULT;
35182
35183 return 0;
35184 diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
35185 index be21617..0954e45 100644
35186 --- a/drivers/staging/usbip/usbip_common.h
35187 +++ b/drivers/staging/usbip/usbip_common.h
35188 @@ -289,7 +289,7 @@ struct usbip_device {
35189 void (*shutdown)(struct usbip_device *);
35190 void (*reset)(struct usbip_device *);
35191 void (*unusable)(struct usbip_device *);
35192 - } eh_ops;
35193 + } __no_const eh_ops;
35194 };
35195
35196 #if 0
35197 diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
35198 index 88b3298..3783eee 100644
35199 --- a/drivers/staging/usbip/vhci.h
35200 +++ b/drivers/staging/usbip/vhci.h
35201 @@ -88,7 +88,7 @@ struct vhci_hcd {
35202 unsigned resuming:1;
35203 unsigned long re_timeout;
35204
35205 - atomic_t seqnum;
35206 + atomic_unchecked_t seqnum;
35207
35208 /*
35209 * NOTE:
35210 diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
35211 index 2ee97e2..0420b86 100644
35212 --- a/drivers/staging/usbip/vhci_hcd.c
35213 +++ b/drivers/staging/usbip/vhci_hcd.c
35214 @@ -527,7 +527,7 @@ static void vhci_tx_urb(struct urb *urb)
35215 return;
35216 }
35217
35218 - priv->seqnum = atomic_inc_return(&the_controller->seqnum);
35219 + priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
35220 if (priv->seqnum == 0xffff)
35221 dev_info(&urb->dev->dev, "seqnum max\n");
35222
35223 @@ -779,7 +779,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
35224 return -ENOMEM;
35225 }
35226
35227 - unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
35228 + unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
35229 if (unlink->seqnum == 0xffff)
35230 pr_info("seqnum max\n");
35231
35232 @@ -969,7 +969,7 @@ static int vhci_start(struct usb_hcd *hcd)
35233 vdev->rhport = rhport;
35234 }
35235
35236 - atomic_set(&vhci->seqnum, 0);
35237 + atomic_set_unchecked(&vhci->seqnum, 0);
35238 spin_lock_init(&vhci->lock);
35239
35240 hcd->power_budget = 0; /* no limit */
35241 diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
35242 index 3872b8c..fe6d2f4 100644
35243 --- a/drivers/staging/usbip/vhci_rx.c
35244 +++ b/drivers/staging/usbip/vhci_rx.c
35245 @@ -77,7 +77,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
35246 if (!urb) {
35247 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
35248 pr_info("max seqnum %d\n",
35249 - atomic_read(&the_controller->seqnum));
35250 + atomic_read_unchecked(&the_controller->seqnum));
35251 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
35252 return;
35253 }
35254 diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
35255 index 7735027..30eed13 100644
35256 --- a/drivers/staging/vt6655/hostap.c
35257 +++ b/drivers/staging/vt6655/hostap.c
35258 @@ -79,14 +79,13 @@ static int msglevel =MSG_LEVEL_INFO;
35259 *
35260 */
35261
35262 +static net_device_ops_no_const apdev_netdev_ops;
35263 +
35264 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
35265 {
35266 PSDevice apdev_priv;
35267 struct net_device *dev = pDevice->dev;
35268 int ret;
35269 - const struct net_device_ops apdev_netdev_ops = {
35270 - .ndo_start_xmit = pDevice->tx_80211,
35271 - };
35272
35273 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
35274
35275 @@ -98,6 +97,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
35276 *apdev_priv = *pDevice;
35277 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
35278
35279 + /* only half broken now */
35280 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
35281 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
35282
35283 pDevice->apdev->type = ARPHRD_IEEE80211;
35284 diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
35285 index 51b5adf..098e320 100644
35286 --- a/drivers/staging/vt6656/hostap.c
35287 +++ b/drivers/staging/vt6656/hostap.c
35288 @@ -80,14 +80,13 @@ static int msglevel =MSG_LEVEL_INFO;
35289 *
35290 */
35291
35292 +static net_device_ops_no_const apdev_netdev_ops;
35293 +
35294 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
35295 {
35296 PSDevice apdev_priv;
35297 struct net_device *dev = pDevice->dev;
35298 int ret;
35299 - const struct net_device_ops apdev_netdev_ops = {
35300 - .ndo_start_xmit = pDevice->tx_80211,
35301 - };
35302
35303 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
35304
35305 @@ -99,6 +98,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
35306 *apdev_priv = *pDevice;
35307 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
35308
35309 + /* only half broken now */
35310 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
35311 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
35312
35313 pDevice->apdev->type = ARPHRD_IEEE80211;
35314 diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
35315 index 7843dfd..3db105f 100644
35316 --- a/drivers/staging/wlan-ng/hfa384x_usb.c
35317 +++ b/drivers/staging/wlan-ng/hfa384x_usb.c
35318 @@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx);
35319
35320 struct usbctlx_completor {
35321 int (*complete) (struct usbctlx_completor *);
35322 -};
35323 +} __no_const;
35324
35325 static int
35326 hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
35327 diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
35328 index 1ca66ea..76f1343 100644
35329 --- a/drivers/staging/zcache/tmem.c
35330 +++ b/drivers/staging/zcache/tmem.c
35331 @@ -39,7 +39,7 @@
35332 * A tmem host implementation must use this function to register callbacks
35333 * for memory allocation.
35334 */
35335 -static struct tmem_hostops tmem_hostops;
35336 +static tmem_hostops_no_const tmem_hostops;
35337
35338 static void tmem_objnode_tree_init(void);
35339
35340 @@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_hostops *m)
35341 * A tmem host implementation must use this function to register
35342 * callbacks for a page-accessible memory (PAM) implementation
35343 */
35344 -static struct tmem_pamops tmem_pamops;
35345 +static tmem_pamops_no_const tmem_pamops;
35346
35347 void tmem_register_pamops(struct tmem_pamops *m)
35348 {
35349 diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
35350 index ed147c4..94fc3c6 100644
35351 --- a/drivers/staging/zcache/tmem.h
35352 +++ b/drivers/staging/zcache/tmem.h
35353 @@ -180,6 +180,7 @@ struct tmem_pamops {
35354 void (*new_obj)(struct tmem_obj *);
35355 int (*replace_in_obj)(void *, struct tmem_obj *);
35356 };
35357 +typedef struct tmem_pamops __no_const tmem_pamops_no_const;
35358 extern void tmem_register_pamops(struct tmem_pamops *m);
35359
35360 /* memory allocation methods provided by the host implementation */
35361 @@ -189,6 +190,7 @@ struct tmem_hostops {
35362 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
35363 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
35364 };
35365 +typedef struct tmem_hostops __no_const tmem_hostops_no_const;
35366 extern void tmem_register_hostops(struct tmem_hostops *m);
35367
35368 /* core tmem accessor functions */
35369 diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
35370 index 8599545..7761358 100644
35371 --- a/drivers/target/iscsi/iscsi_target.c
35372 +++ b/drivers/target/iscsi/iscsi_target.c
35373 @@ -1364,7 +1364,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
35374 * outstanding_r2ts reaches zero, go ahead and send the delayed
35375 * TASK_ABORTED status.
35376 */
35377 - if (atomic_read(&se_cmd->t_transport_aborted) != 0) {
35378 + if (atomic_read_unchecked(&se_cmd->t_transport_aborted) != 0) {
35379 if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
35380 if (--cmd->outstanding_r2ts < 1) {
35381 iscsit_stop_dataout_timer(cmd);
35382 diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
35383 index 6845228..df77141 100644
35384 --- a/drivers/target/target_core_tmr.c
35385 +++ b/drivers/target/target_core_tmr.c
35386 @@ -250,7 +250,7 @@ static void core_tmr_drain_task_list(
35387 cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key,
35388 cmd->t_task_list_num,
35389 atomic_read(&cmd->t_task_cdbs_left),
35390 - atomic_read(&cmd->t_task_cdbs_sent),
35391 + atomic_read_unchecked(&cmd->t_task_cdbs_sent),
35392 atomic_read(&cmd->t_transport_active),
35393 atomic_read(&cmd->t_transport_stop),
35394 atomic_read(&cmd->t_transport_sent));
35395 @@ -281,7 +281,7 @@ static void core_tmr_drain_task_list(
35396 pr_debug("LUN_RESET: got t_transport_active = 1 for"
35397 " task: %p, t_fe_count: %d dev: %p\n", task,
35398 fe_count, dev);
35399 - atomic_set(&cmd->t_transport_aborted, 1);
35400 + atomic_set_unchecked(&cmd->t_transport_aborted, 1);
35401 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
35402
35403 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
35404 @@ -289,7 +289,7 @@ static void core_tmr_drain_task_list(
35405 }
35406 pr_debug("LUN_RESET: Got t_transport_active = 0 for task: %p,"
35407 " t_fe_count: %d dev: %p\n", task, fe_count, dev);
35408 - atomic_set(&cmd->t_transport_aborted, 1);
35409 + atomic_set_unchecked(&cmd->t_transport_aborted, 1);
35410 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
35411
35412 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
35413 diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
35414 index e87d0eb..856cbcc 100644
35415 --- a/drivers/target/target_core_transport.c
35416 +++ b/drivers/target/target_core_transport.c
35417 @@ -1343,7 +1343,7 @@ struct se_device *transport_add_device_to_core_hba(
35418
35419 dev->queue_depth = dev_limits->queue_depth;
35420 atomic_set(&dev->depth_left, dev->queue_depth);
35421 - atomic_set(&dev->dev_ordered_id, 0);
35422 + atomic_set_unchecked(&dev->dev_ordered_id, 0);
35423
35424 se_dev_set_default_attribs(dev, dev_limits);
35425
35426 @@ -1530,7 +1530,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
35427 * Used to determine when ORDERED commands should go from
35428 * Dormant to Active status.
35429 */
35430 - cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
35431 + cmd->se_ordered_id = atomic_inc_return_unchecked(&cmd->se_dev->dev_ordered_id);
35432 smp_mb__after_atomic_inc();
35433 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
35434 cmd->se_ordered_id, cmd->sam_task_attr,
35435 @@ -1800,7 +1800,7 @@ static void transport_generic_request_failure(struct se_cmd *cmd)
35436 " t_transport_active: %d t_transport_stop: %d"
35437 " t_transport_sent: %d\n", cmd->t_task_list_num,
35438 atomic_read(&cmd->t_task_cdbs_left),
35439 - atomic_read(&cmd->t_task_cdbs_sent),
35440 + atomic_read_unchecked(&cmd->t_task_cdbs_sent),
35441 atomic_read(&cmd->t_task_cdbs_ex_left),
35442 atomic_read(&cmd->t_transport_active),
35443 atomic_read(&cmd->t_transport_stop),
35444 @@ -2089,9 +2089,9 @@ check_depth:
35445
35446 spin_lock_irqsave(&cmd->t_state_lock, flags);
35447 task->task_flags |= (TF_ACTIVE | TF_SENT);
35448 - atomic_inc(&cmd->t_task_cdbs_sent);
35449 + atomic_inc_unchecked(&cmd->t_task_cdbs_sent);
35450
35451 - if (atomic_read(&cmd->t_task_cdbs_sent) ==
35452 + if (atomic_read_unchecked(&cmd->t_task_cdbs_sent) ==
35453 cmd->t_task_list_num)
35454 atomic_set(&cmd->t_transport_sent, 1);
35455
35456 @@ -4260,7 +4260,7 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)
35457 atomic_set(&cmd->transport_lun_stop, 0);
35458 }
35459 if (!atomic_read(&cmd->t_transport_active) ||
35460 - atomic_read(&cmd->t_transport_aborted)) {
35461 + atomic_read_unchecked(&cmd->t_transport_aborted)) {
35462 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
35463 return false;
35464 }
35465 @@ -4509,7 +4509,7 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
35466 {
35467 int ret = 0;
35468
35469 - if (atomic_read(&cmd->t_transport_aborted) != 0) {
35470 + if (atomic_read_unchecked(&cmd->t_transport_aborted) != 0) {
35471 if (!send_status ||
35472 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
35473 return 1;
35474 @@ -4546,7 +4546,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
35475 */
35476 if (cmd->data_direction == DMA_TO_DEVICE) {
35477 if (cmd->se_tfo->write_pending_status(cmd) != 0) {
35478 - atomic_inc(&cmd->t_transport_aborted);
35479 + atomic_inc_unchecked(&cmd->t_transport_aborted);
35480 smp_mb__after_atomic_inc();
35481 }
35482 }
35483 diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
35484 index b9040be..e3f5aab 100644
35485 --- a/drivers/tty/hvc/hvcs.c
35486 +++ b/drivers/tty/hvc/hvcs.c
35487 @@ -83,6 +83,7 @@
35488 #include <asm/hvcserver.h>
35489 #include <asm/uaccess.h>
35490 #include <asm/vio.h>
35491 +#include <asm/local.h>
35492
35493 /*
35494 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
35495 @@ -270,7 +271,7 @@ struct hvcs_struct {
35496 unsigned int index;
35497
35498 struct tty_struct *tty;
35499 - int open_count;
35500 + local_t open_count;
35501
35502 /*
35503 * Used to tell the driver kernel_thread what operations need to take
35504 @@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
35505
35506 spin_lock_irqsave(&hvcsd->lock, flags);
35507
35508 - if (hvcsd->open_count > 0) {
35509 + if (local_read(&hvcsd->open_count) > 0) {
35510 spin_unlock_irqrestore(&hvcsd->lock, flags);
35511 printk(KERN_INFO "HVCS: vterm state unchanged. "
35512 "The hvcs device node is still in use.\n");
35513 @@ -1145,7 +1146,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
35514 if ((retval = hvcs_partner_connect(hvcsd)))
35515 goto error_release;
35516
35517 - hvcsd->open_count = 1;
35518 + local_set(&hvcsd->open_count, 1);
35519 hvcsd->tty = tty;
35520 tty->driver_data = hvcsd;
35521
35522 @@ -1179,7 +1180,7 @@ fast_open:
35523
35524 spin_lock_irqsave(&hvcsd->lock, flags);
35525 kref_get(&hvcsd->kref);
35526 - hvcsd->open_count++;
35527 + local_inc(&hvcsd->open_count);
35528 hvcsd->todo_mask |= HVCS_SCHED_READ;
35529 spin_unlock_irqrestore(&hvcsd->lock, flags);
35530
35531 @@ -1223,7 +1224,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
35532 hvcsd = tty->driver_data;
35533
35534 spin_lock_irqsave(&hvcsd->lock, flags);
35535 - if (--hvcsd->open_count == 0) {
35536 + if (local_dec_and_test(&hvcsd->open_count)) {
35537
35538 vio_disable_interrupts(hvcsd->vdev);
35539
35540 @@ -1249,10 +1250,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
35541 free_irq(irq, hvcsd);
35542 kref_put(&hvcsd->kref, destroy_hvcs_struct);
35543 return;
35544 - } else if (hvcsd->open_count < 0) {
35545 + } else if (local_read(&hvcsd->open_count) < 0) {
35546 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
35547 " is missmanaged.\n",
35548 - hvcsd->vdev->unit_address, hvcsd->open_count);
35549 + hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
35550 }
35551
35552 spin_unlock_irqrestore(&hvcsd->lock, flags);
35553 @@ -1268,7 +1269,7 @@ static void hvcs_hangup(struct tty_struct * tty)
35554
35555 spin_lock_irqsave(&hvcsd->lock, flags);
35556 /* Preserve this so that we know how many kref refs to put */
35557 - temp_open_count = hvcsd->open_count;
35558 + temp_open_count = local_read(&hvcsd->open_count);
35559
35560 /*
35561 * Don't kref put inside the spinlock because the destruction
35562 @@ -1283,7 +1284,7 @@ static void hvcs_hangup(struct tty_struct * tty)
35563 hvcsd->tty->driver_data = NULL;
35564 hvcsd->tty = NULL;
35565
35566 - hvcsd->open_count = 0;
35567 + local_set(&hvcsd->open_count, 0);
35568
35569 /* This will drop any buffered data on the floor which is OK in a hangup
35570 * scenario. */
35571 @@ -1354,7 +1355,7 @@ static int hvcs_write(struct tty_struct *tty,
35572 * the middle of a write operation? This is a crummy place to do this
35573 * but we want to keep it all in the spinlock.
35574 */
35575 - if (hvcsd->open_count <= 0) {
35576 + if (local_read(&hvcsd->open_count) <= 0) {
35577 spin_unlock_irqrestore(&hvcsd->lock, flags);
35578 return -ENODEV;
35579 }
35580 @@ -1428,7 +1429,7 @@ static int hvcs_write_room(struct tty_struct *tty)
35581 {
35582 struct hvcs_struct *hvcsd = tty->driver_data;
35583
35584 - if (!hvcsd || hvcsd->open_count <= 0)
35585 + if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
35586 return 0;
35587
35588 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
35589 diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
35590 index ef92869..f4ebd88 100644
35591 --- a/drivers/tty/ipwireless/tty.c
35592 +++ b/drivers/tty/ipwireless/tty.c
35593 @@ -29,6 +29,7 @@
35594 #include <linux/tty_driver.h>
35595 #include <linux/tty_flip.h>
35596 #include <linux/uaccess.h>
35597 +#include <asm/local.h>
35598
35599 #include "tty.h"
35600 #include "network.h"
35601 @@ -51,7 +52,7 @@ struct ipw_tty {
35602 int tty_type;
35603 struct ipw_network *network;
35604 struct tty_struct *linux_tty;
35605 - int open_count;
35606 + local_t open_count;
35607 unsigned int control_lines;
35608 struct mutex ipw_tty_mutex;
35609 int tx_bytes_queued;
35610 @@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
35611 mutex_unlock(&tty->ipw_tty_mutex);
35612 return -ENODEV;
35613 }
35614 - if (tty->open_count == 0)
35615 + if (local_read(&tty->open_count) == 0)
35616 tty->tx_bytes_queued = 0;
35617
35618 - tty->open_count++;
35619 + local_inc(&tty->open_count);
35620
35621 tty->linux_tty = linux_tty;
35622 linux_tty->driver_data = tty;
35623 @@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
35624
35625 static void do_ipw_close(struct ipw_tty *tty)
35626 {
35627 - tty->open_count--;
35628 -
35629 - if (tty->open_count == 0) {
35630 + if (local_dec_return(&tty->open_count) == 0) {
35631 struct tty_struct *linux_tty = tty->linux_tty;
35632
35633 if (linux_tty != NULL) {
35634 @@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
35635 return;
35636
35637 mutex_lock(&tty->ipw_tty_mutex);
35638 - if (tty->open_count == 0) {
35639 + if (local_read(&tty->open_count) == 0) {
35640 mutex_unlock(&tty->ipw_tty_mutex);
35641 return;
35642 }
35643 @@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
35644 return;
35645 }
35646
35647 - if (!tty->open_count) {
35648 + if (!local_read(&tty->open_count)) {
35649 mutex_unlock(&tty->ipw_tty_mutex);
35650 return;
35651 }
35652 @@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *linux_tty,
35653 return -ENODEV;
35654
35655 mutex_lock(&tty->ipw_tty_mutex);
35656 - if (!tty->open_count) {
35657 + if (!local_read(&tty->open_count)) {
35658 mutex_unlock(&tty->ipw_tty_mutex);
35659 return -EINVAL;
35660 }
35661 @@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
35662 if (!tty)
35663 return -ENODEV;
35664
35665 - if (!tty->open_count)
35666 + if (!local_read(&tty->open_count))
35667 return -EINVAL;
35668
35669 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
35670 @@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
35671 if (!tty)
35672 return 0;
35673
35674 - if (!tty->open_count)
35675 + if (!local_read(&tty->open_count))
35676 return 0;
35677
35678 return tty->tx_bytes_queued;
35679 @@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
35680 if (!tty)
35681 return -ENODEV;
35682
35683 - if (!tty->open_count)
35684 + if (!local_read(&tty->open_count))
35685 return -EINVAL;
35686
35687 return get_control_lines(tty);
35688 @@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
35689 if (!tty)
35690 return -ENODEV;
35691
35692 - if (!tty->open_count)
35693 + if (!local_read(&tty->open_count))
35694 return -EINVAL;
35695
35696 return set_control_lines(tty, set, clear);
35697 @@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
35698 if (!tty)
35699 return -ENODEV;
35700
35701 - if (!tty->open_count)
35702 + if (!local_read(&tty->open_count))
35703 return -EINVAL;
35704
35705 /* FIXME: Exactly how is the tty object locked here .. */
35706 @@ -582,7 +581,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
35707 against a parallel ioctl etc */
35708 mutex_lock(&ttyj->ipw_tty_mutex);
35709 }
35710 - while (ttyj->open_count)
35711 + while (local_read(&ttyj->open_count))
35712 do_ipw_close(ttyj);
35713 ipwireless_disassociate_network_ttys(network,
35714 ttyj->channel_idx);
35715 diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
35716 index fc7bbba..9527e93 100644
35717 --- a/drivers/tty/n_gsm.c
35718 +++ b/drivers/tty/n_gsm.c
35719 @@ -1629,7 +1629,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
35720 kref_init(&dlci->ref);
35721 mutex_init(&dlci->mutex);
35722 dlci->fifo = &dlci->_fifo;
35723 - if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
35724 + if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
35725 kfree(dlci);
35726 return NULL;
35727 }
35728 diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
35729 index 39d6ab6..eb97f41 100644
35730 --- a/drivers/tty/n_tty.c
35731 +++ b/drivers/tty/n_tty.c
35732 @@ -2123,6 +2123,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
35733 {
35734 *ops = tty_ldisc_N_TTY;
35735 ops->owner = NULL;
35736 - ops->refcount = ops->flags = 0;
35737 + atomic_set(&ops->refcount, 0);
35738 + ops->flags = 0;
35739 }
35740 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
35741 diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
35742 index e18604b..a7d5a11 100644
35743 --- a/drivers/tty/pty.c
35744 +++ b/drivers/tty/pty.c
35745 @@ -773,8 +773,10 @@ static void __init unix98_pty_init(void)
35746 register_sysctl_table(pty_root_table);
35747
35748 /* Now create the /dev/ptmx special device */
35749 + pax_open_kernel();
35750 tty_default_fops(&ptmx_fops);
35751 - ptmx_fops.open = ptmx_open;
35752 + *(void **)&ptmx_fops.open = ptmx_open;
35753 + pax_close_kernel();
35754
35755 cdev_init(&ptmx_cdev, &ptmx_fops);
35756 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
35757 diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
35758 index 2b42a01..32a2ed3 100644
35759 --- a/drivers/tty/serial/kgdboc.c
35760 +++ b/drivers/tty/serial/kgdboc.c
35761 @@ -24,8 +24,9 @@
35762 #define MAX_CONFIG_LEN 40
35763
35764 static struct kgdb_io kgdboc_io_ops;
35765 +static struct kgdb_io kgdboc_io_ops_console;
35766
35767 -/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
35768 +/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
35769 static int configured = -1;
35770
35771 static char config[MAX_CONFIG_LEN];
35772 @@ -148,6 +149,8 @@ static void cleanup_kgdboc(void)
35773 kgdboc_unregister_kbd();
35774 if (configured == 1)
35775 kgdb_unregister_io_module(&kgdboc_io_ops);
35776 + else if (configured == 2)
35777 + kgdb_unregister_io_module(&kgdboc_io_ops_console);
35778 }
35779
35780 static int configure_kgdboc(void)
35781 @@ -157,13 +160,13 @@ static int configure_kgdboc(void)
35782 int err;
35783 char *cptr = config;
35784 struct console *cons;
35785 + int is_console = 0;
35786
35787 err = kgdboc_option_setup(config);
35788 if (err || !strlen(config) || isspace(config[0]))
35789 goto noconfig;
35790
35791 err = -ENODEV;
35792 - kgdboc_io_ops.is_console = 0;
35793 kgdb_tty_driver = NULL;
35794
35795 kgdboc_use_kms = 0;
35796 @@ -184,7 +187,7 @@ static int configure_kgdboc(void)
35797 int idx;
35798 if (cons->device && cons->device(cons, &idx) == p &&
35799 idx == tty_line) {
35800 - kgdboc_io_ops.is_console = 1;
35801 + is_console = 1;
35802 break;
35803 }
35804 cons = cons->next;
35805 @@ -194,12 +197,16 @@ static int configure_kgdboc(void)
35806 kgdb_tty_line = tty_line;
35807
35808 do_register:
35809 - err = kgdb_register_io_module(&kgdboc_io_ops);
35810 + if (is_console) {
35811 + err = kgdb_register_io_module(&kgdboc_io_ops_console);
35812 + configured = 2;
35813 + } else {
35814 + err = kgdb_register_io_module(&kgdboc_io_ops);
35815 + configured = 1;
35816 + }
35817 if (err)
35818 goto noconfig;
35819
35820 - configured = 1;
35821 -
35822 return 0;
35823
35824 noconfig:
35825 @@ -213,7 +220,7 @@ noconfig:
35826 static int __init init_kgdboc(void)
35827 {
35828 /* Already configured? */
35829 - if (configured == 1)
35830 + if (configured >= 1)
35831 return 0;
35832
35833 return configure_kgdboc();
35834 @@ -262,7 +269,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
35835 if (config[len - 1] == '\n')
35836 config[len - 1] = '\0';
35837
35838 - if (configured == 1)
35839 + if (configured >= 1)
35840 cleanup_kgdboc();
35841
35842 /* Go and configure with the new params. */
35843 @@ -302,6 +309,15 @@ static struct kgdb_io kgdboc_io_ops = {
35844 .post_exception = kgdboc_post_exp_handler,
35845 };
35846
35847 +static struct kgdb_io kgdboc_io_ops_console = {
35848 + .name = "kgdboc",
35849 + .read_char = kgdboc_get_char,
35850 + .write_char = kgdboc_put_char,
35851 + .pre_exception = kgdboc_pre_exp_handler,
35852 + .post_exception = kgdboc_post_exp_handler,
35853 + .is_console = 1
35854 +};
35855 +
35856 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
35857 /* This is only available if kgdboc is a built in for early debugging */
35858 static int __init kgdboc_early_init(char *opt)
35859 diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
35860 index 05085be..67eadb0 100644
35861 --- a/drivers/tty/tty_io.c
35862 +++ b/drivers/tty/tty_io.c
35863 @@ -3240,7 +3240,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
35864
35865 void tty_default_fops(struct file_operations *fops)
35866 {
35867 - *fops = tty_fops;
35868 + memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
35869 }
35870
35871 /*
35872 diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
35873 index 8e0924f..4204eb4 100644
35874 --- a/drivers/tty/tty_ldisc.c
35875 +++ b/drivers/tty/tty_ldisc.c
35876 @@ -75,7 +75,7 @@ static void put_ldisc(struct tty_ldisc *ld)
35877 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
35878 struct tty_ldisc_ops *ldo = ld->ops;
35879
35880 - ldo->refcount--;
35881 + atomic_dec(&ldo->refcount);
35882 module_put(ldo->owner);
35883 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
35884
35885 @@ -110,7 +110,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
35886 spin_lock_irqsave(&tty_ldisc_lock, flags);
35887 tty_ldiscs[disc] = new_ldisc;
35888 new_ldisc->num = disc;
35889 - new_ldisc->refcount = 0;
35890 + atomic_set(&new_ldisc->refcount, 0);
35891 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
35892
35893 return ret;
35894 @@ -138,7 +138,7 @@ int tty_unregister_ldisc(int disc)
35895 return -EINVAL;
35896
35897 spin_lock_irqsave(&tty_ldisc_lock, flags);
35898 - if (tty_ldiscs[disc]->refcount)
35899 + if (atomic_read(&tty_ldiscs[disc]->refcount))
35900 ret = -EBUSY;
35901 else
35902 tty_ldiscs[disc] = NULL;
35903 @@ -159,7 +159,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
35904 if (ldops) {
35905 ret = ERR_PTR(-EAGAIN);
35906 if (try_module_get(ldops->owner)) {
35907 - ldops->refcount++;
35908 + atomic_inc(&ldops->refcount);
35909 ret = ldops;
35910 }
35911 }
35912 @@ -172,7 +172,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
35913 unsigned long flags;
35914
35915 spin_lock_irqsave(&tty_ldisc_lock, flags);
35916 - ldops->refcount--;
35917 + atomic_dec(&ldops->refcount);
35918 module_put(ldops->owner);
35919 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
35920 }
35921 diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
35922 index a605549..6bd3c96 100644
35923 --- a/drivers/tty/vt/keyboard.c
35924 +++ b/drivers/tty/vt/keyboard.c
35925 @@ -657,6 +657,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
35926 kbd->kbdmode == VC_OFF) &&
35927 value != KVAL(K_SAK))
35928 return; /* SAK is allowed even in raw mode */
35929 +
35930 +#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
35931 + {
35932 + void *func = fn_handler[value];
35933 + if (func == fn_show_state || func == fn_show_ptregs ||
35934 + func == fn_show_mem)
35935 + return;
35936 + }
35937 +#endif
35938 +
35939 fn_handler[value](vc);
35940 }
35941
35942 diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
35943 index 5e096f4..0da1363 100644
35944 --- a/drivers/tty/vt/vt_ioctl.c
35945 +++ b/drivers/tty/vt/vt_ioctl.c
35946 @@ -207,9 +207,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
35947 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
35948 return -EFAULT;
35949
35950 - if (!capable(CAP_SYS_TTY_CONFIG))
35951 - perm = 0;
35952 -
35953 switch (cmd) {
35954 case KDGKBENT:
35955 key_map = key_maps[s];
35956 @@ -221,6 +218,9 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
35957 val = (i ? K_HOLE : K_NOSUCHMAP);
35958 return put_user(val, &user_kbe->kb_value);
35959 case KDSKBENT:
35960 + if (!capable(CAP_SYS_TTY_CONFIG))
35961 + perm = 0;
35962 +
35963 if (!perm)
35964 return -EPERM;
35965 if (!i && v == K_NOSUCHMAP) {
35966 @@ -322,9 +322,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
35967 int i, j, k;
35968 int ret;
35969
35970 - if (!capable(CAP_SYS_TTY_CONFIG))
35971 - perm = 0;
35972 -
35973 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
35974 if (!kbs) {
35975 ret = -ENOMEM;
35976 @@ -358,6 +355,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
35977 kfree(kbs);
35978 return ((p && *p) ? -EOVERFLOW : 0);
35979 case KDSKBSENT:
35980 + if (!capable(CAP_SYS_TTY_CONFIG))
35981 + perm = 0;
35982 +
35983 if (!perm) {
35984 ret = -EPERM;
35985 goto reterr;
35986 diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
35987 index a783d53..cb30d94 100644
35988 --- a/drivers/uio/uio.c
35989 +++ b/drivers/uio/uio.c
35990 @@ -25,6 +25,7 @@
35991 #include <linux/kobject.h>
35992 #include <linux/cdev.h>
35993 #include <linux/uio_driver.h>
35994 +#include <asm/local.h>
35995
35996 #define UIO_MAX_DEVICES (1U << MINORBITS)
35997
35998 @@ -32,10 +33,10 @@ struct uio_device {
35999 struct module *owner;
36000 struct device *dev;
36001 int minor;
36002 - atomic_t event;
36003 + atomic_unchecked_t event;
36004 struct fasync_struct *async_queue;
36005 wait_queue_head_t wait;
36006 - int vma_count;
36007 + local_t vma_count;
36008 struct uio_info *info;
36009 struct kobject *map_dir;
36010 struct kobject *portio_dir;
36011 @@ -242,7 +243,7 @@ static ssize_t show_event(struct device *dev,
36012 struct device_attribute *attr, char *buf)
36013 {
36014 struct uio_device *idev = dev_get_drvdata(dev);
36015 - return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
36016 + return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
36017 }
36018
36019 static struct device_attribute uio_class_attributes[] = {
36020 @@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *info)
36021 {
36022 struct uio_device *idev = info->uio_dev;
36023
36024 - atomic_inc(&idev->event);
36025 + atomic_inc_unchecked(&idev->event);
36026 wake_up_interruptible(&idev->wait);
36027 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
36028 }
36029 @@ -461,7 +462,7 @@ static int uio_open(struct inode *inode, struct file *filep)
36030 }
36031
36032 listener->dev = idev;
36033 - listener->event_count = atomic_read(&idev->event);
36034 + listener->event_count = atomic_read_unchecked(&idev->event);
36035 filep->private_data = listener;
36036
36037 if (idev->info->open) {
36038 @@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
36039 return -EIO;
36040
36041 poll_wait(filep, &idev->wait, wait);
36042 - if (listener->event_count != atomic_read(&idev->event))
36043 + if (listener->event_count != atomic_read_unchecked(&idev->event))
36044 return POLLIN | POLLRDNORM;
36045 return 0;
36046 }
36047 @@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
36048 do {
36049 set_current_state(TASK_INTERRUPTIBLE);
36050
36051 - event_count = atomic_read(&idev->event);
36052 + event_count = atomic_read_unchecked(&idev->event);
36053 if (event_count != listener->event_count) {
36054 if (copy_to_user(buf, &event_count, count))
36055 retval = -EFAULT;
36056 @@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
36057 static void uio_vma_open(struct vm_area_struct *vma)
36058 {
36059 struct uio_device *idev = vma->vm_private_data;
36060 - idev->vma_count++;
36061 + local_inc(&idev->vma_count);
36062 }
36063
36064 static void uio_vma_close(struct vm_area_struct *vma)
36065 {
36066 struct uio_device *idev = vma->vm_private_data;
36067 - idev->vma_count--;
36068 + local_dec(&idev->vma_count);
36069 }
36070
36071 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
36072 @@ -821,7 +822,7 @@ int __uio_register_device(struct module *owner,
36073 idev->owner = owner;
36074 idev->info = info;
36075 init_waitqueue_head(&idev->wait);
36076 - atomic_set(&idev->event, 0);
36077 + atomic_set_unchecked(&idev->event, 0);
36078
36079 ret = uio_get_minor(idev);
36080 if (ret)
36081 diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
36082 index a845f8b..4f54072 100644
36083 --- a/drivers/usb/atm/cxacru.c
36084 +++ b/drivers/usb/atm/cxacru.c
36085 @@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
36086 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
36087 if (ret < 2)
36088 return -EINVAL;
36089 - if (index < 0 || index > 0x7f)
36090 + if (index > 0x7f)
36091 return -EINVAL;
36092 pos += tmp;
36093
36094 diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
36095 index d3448ca..d2864ca 100644
36096 --- a/drivers/usb/atm/usbatm.c
36097 +++ b/drivers/usb/atm/usbatm.c
36098 @@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
36099 if (printk_ratelimit())
36100 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
36101 __func__, vpi, vci);
36102 - atomic_inc(&vcc->stats->rx_err);
36103 + atomic_inc_unchecked(&vcc->stats->rx_err);
36104 return;
36105 }
36106
36107 @@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
36108 if (length > ATM_MAX_AAL5_PDU) {
36109 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
36110 __func__, length, vcc);
36111 - atomic_inc(&vcc->stats->rx_err);
36112 + atomic_inc_unchecked(&vcc->stats->rx_err);
36113 goto out;
36114 }
36115
36116 @@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
36117 if (sarb->len < pdu_length) {
36118 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
36119 __func__, pdu_length, sarb->len, vcc);
36120 - atomic_inc(&vcc->stats->rx_err);
36121 + atomic_inc_unchecked(&vcc->stats->rx_err);
36122 goto out;
36123 }
36124
36125 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
36126 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
36127 __func__, vcc);
36128 - atomic_inc(&vcc->stats->rx_err);
36129 + atomic_inc_unchecked(&vcc->stats->rx_err);
36130 goto out;
36131 }
36132
36133 @@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
36134 if (printk_ratelimit())
36135 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
36136 __func__, length);
36137 - atomic_inc(&vcc->stats->rx_drop);
36138 + atomic_inc_unchecked(&vcc->stats->rx_drop);
36139 goto out;
36140 }
36141
36142 @@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
36143
36144 vcc->push(vcc, skb);
36145
36146 - atomic_inc(&vcc->stats->rx);
36147 + atomic_inc_unchecked(&vcc->stats->rx);
36148 out:
36149 skb_trim(sarb, 0);
36150 }
36151 @@ -615,7 +615,7 @@ static void usbatm_tx_process(unsigned long data)
36152 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
36153
36154 usbatm_pop(vcc, skb);
36155 - atomic_inc(&vcc->stats->tx);
36156 + atomic_inc_unchecked(&vcc->stats->tx);
36157
36158 skb = skb_dequeue(&instance->sndqueue);
36159 }
36160 @@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
36161 if (!left--)
36162 return sprintf(page,
36163 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
36164 - atomic_read(&atm_dev->stats.aal5.tx),
36165 - atomic_read(&atm_dev->stats.aal5.tx_err),
36166 - atomic_read(&atm_dev->stats.aal5.rx),
36167 - atomic_read(&atm_dev->stats.aal5.rx_err),
36168 - atomic_read(&atm_dev->stats.aal5.rx_drop));
36169 + atomic_read_unchecked(&atm_dev->stats.aal5.tx),
36170 + atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
36171 + atomic_read_unchecked(&atm_dev->stats.aal5.rx),
36172 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
36173 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
36174
36175 if (!left--) {
36176 if (instance->disconnected)
36177 diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
36178 index d956965..4179a77 100644
36179 --- a/drivers/usb/core/devices.c
36180 +++ b/drivers/usb/core/devices.c
36181 @@ -126,7 +126,7 @@ static const char format_endpt[] =
36182 * time it gets called.
36183 */
36184 static struct device_connect_event {
36185 - atomic_t count;
36186 + atomic_unchecked_t count;
36187 wait_queue_head_t wait;
36188 } device_event = {
36189 .count = ATOMIC_INIT(1),
36190 @@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
36191
36192 void usbfs_conn_disc_event(void)
36193 {
36194 - atomic_add(2, &device_event.count);
36195 + atomic_add_unchecked(2, &device_event.count);
36196 wake_up(&device_event.wait);
36197 }
36198
36199 @@ -648,7 +648,7 @@ static unsigned int usb_device_poll(struct file *file,
36200
36201 poll_wait(file, &device_event.wait, wait);
36202
36203 - event_count = atomic_read(&device_event.count);
36204 + event_count = atomic_read_unchecked(&device_event.count);
36205 if (file->f_version != event_count) {
36206 file->f_version = event_count;
36207 return POLLIN | POLLRDNORM;
36208 diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
36209 index b3bdfed..a9460e0 100644
36210 --- a/drivers/usb/core/message.c
36211 +++ b/drivers/usb/core/message.c
36212 @@ -869,8 +869,8 @@ char *usb_cache_string(struct usb_device *udev, int index)
36213 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
36214 if (buf) {
36215 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
36216 - if (len > 0) {
36217 - smallbuf = kmalloc(++len, GFP_NOIO);
36218 + if (len++ > 0) {
36219 + smallbuf = kmalloc(len, GFP_NOIO);
36220 if (!smallbuf)
36221 return buf;
36222 memcpy(smallbuf, buf, len);
36223 diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
36224 index 1fc8f12..20647c1 100644
36225 --- a/drivers/usb/early/ehci-dbgp.c
36226 +++ b/drivers/usb/early/ehci-dbgp.c
36227 @@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
36228
36229 #ifdef CONFIG_KGDB
36230 static struct kgdb_io kgdbdbgp_io_ops;
36231 -#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
36232 +static struct kgdb_io kgdbdbgp_io_ops_console;
36233 +#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
36234 #else
36235 #define dbgp_kgdb_mode (0)
36236 #endif
36237 @@ -1035,6 +1036,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
36238 .write_char = kgdbdbgp_write_char,
36239 };
36240
36241 +static struct kgdb_io kgdbdbgp_io_ops_console = {
36242 + .name = "kgdbdbgp",
36243 + .read_char = kgdbdbgp_read_char,
36244 + .write_char = kgdbdbgp_write_char,
36245 + .is_console = 1
36246 +};
36247 +
36248 static int kgdbdbgp_wait_time;
36249
36250 static int __init kgdbdbgp_parse_config(char *str)
36251 @@ -1050,8 +1058,10 @@ static int __init kgdbdbgp_parse_config(char *str)
36252 ptr++;
36253 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
36254 }
36255 - kgdb_register_io_module(&kgdbdbgp_io_ops);
36256 - kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
36257 + if (early_dbgp_console.index != -1)
36258 + kgdb_register_io_module(&kgdbdbgp_io_ops_console);
36259 + else
36260 + kgdb_register_io_module(&kgdbdbgp_io_ops);
36261
36262 return 0;
36263 }
36264 diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
36265 index d6bea3e..60b250e 100644
36266 --- a/drivers/usb/wusbcore/wa-hc.h
36267 +++ b/drivers/usb/wusbcore/wa-hc.h
36268 @@ -192,7 +192,7 @@ struct wahc {
36269 struct list_head xfer_delayed_list;
36270 spinlock_t xfer_list_lock;
36271 struct work_struct xfer_work;
36272 - atomic_t xfer_id_count;
36273 + atomic_unchecked_t xfer_id_count;
36274 };
36275
36276
36277 @@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
36278 INIT_LIST_HEAD(&wa->xfer_delayed_list);
36279 spin_lock_init(&wa->xfer_list_lock);
36280 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
36281 - atomic_set(&wa->xfer_id_count, 1);
36282 + atomic_set_unchecked(&wa->xfer_id_count, 1);
36283 }
36284
36285 /**
36286 diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
36287 index 57c01ab..8a05959 100644
36288 --- a/drivers/usb/wusbcore/wa-xfer.c
36289 +++ b/drivers/usb/wusbcore/wa-xfer.c
36290 @@ -296,7 +296,7 @@ out:
36291 */
36292 static void wa_xfer_id_init(struct wa_xfer *xfer)
36293 {
36294 - xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
36295 + xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
36296 }
36297
36298 /*
36299 diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
36300 index c14c42b..f955cc2 100644
36301 --- a/drivers/vhost/vhost.c
36302 +++ b/drivers/vhost/vhost.c
36303 @@ -629,7 +629,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
36304 return 0;
36305 }
36306
36307 -static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
36308 +static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
36309 {
36310 struct file *eventfp, *filep = NULL,
36311 *pollstart = NULL, *pollstop = NULL;
36312 diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
36313 index b0b2ac3..89a4399 100644
36314 --- a/drivers/video/aty/aty128fb.c
36315 +++ b/drivers/video/aty/aty128fb.c
36316 @@ -148,7 +148,7 @@ enum {
36317 };
36318
36319 /* Must match above enum */
36320 -static const char *r128_family[] __devinitdata = {
36321 +static const char *r128_family[] __devinitconst = {
36322 "AGP",
36323 "PCI",
36324 "PRO AGP",
36325 diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
36326 index 5c3960d..15cf8fc 100644
36327 --- a/drivers/video/fbcmap.c
36328 +++ b/drivers/video/fbcmap.c
36329 @@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
36330 rc = -ENODEV;
36331 goto out;
36332 }
36333 - if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
36334 - !info->fbops->fb_setcmap)) {
36335 + if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
36336 rc = -EINVAL;
36337 goto out1;
36338 }
36339 diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
36340 index ad93629..e020fc3 100644
36341 --- a/drivers/video/fbmem.c
36342 +++ b/drivers/video/fbmem.c
36343 @@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
36344 image->dx += image->width + 8;
36345 }
36346 } else if (rotate == FB_ROTATE_UD) {
36347 - for (x = 0; x < num && image->dx >= 0; x++) {
36348 + for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
36349 info->fbops->fb_imageblit(info, image);
36350 image->dx -= image->width + 8;
36351 }
36352 @@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
36353 image->dy += image->height + 8;
36354 }
36355 } else if (rotate == FB_ROTATE_CCW) {
36356 - for (x = 0; x < num && image->dy >= 0; x++) {
36357 + for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
36358 info->fbops->fb_imageblit(info, image);
36359 image->dy -= image->height + 8;
36360 }
36361 @@ -1143,7 +1143,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
36362 return -EFAULT;
36363 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
36364 return -EINVAL;
36365 - if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
36366 + if (con2fb.framebuffer >= FB_MAX)
36367 return -EINVAL;
36368 if (!registered_fb[con2fb.framebuffer])
36369 request_module("fb%d", con2fb.framebuffer);
36370 diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
36371 index 5a5d092..265c5ed 100644
36372 --- a/drivers/video/geode/gx1fb_core.c
36373 +++ b/drivers/video/geode/gx1fb_core.c
36374 @@ -29,7 +29,7 @@ static int crt_option = 1;
36375 static char panel_option[32] = "";
36376
36377 /* Modes relevant to the GX1 (taken from modedb.c) */
36378 -static const struct fb_videomode __devinitdata gx1_modedb[] = {
36379 +static const struct fb_videomode __devinitconst gx1_modedb[] = {
36380 /* 640x480-60 VESA */
36381 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
36382 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
36383 diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c
36384 index 0fad23f..0e9afa4 100644
36385 --- a/drivers/video/gxt4500.c
36386 +++ b/drivers/video/gxt4500.c
36387 @@ -156,7 +156,7 @@ struct gxt4500_par {
36388 static char *mode_option;
36389
36390 /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
36391 -static const struct fb_videomode defaultmode __devinitdata = {
36392 +static const struct fb_videomode defaultmode __devinitconst = {
36393 .refresh = 60,
36394 .xres = 1280,
36395 .yres = 1024,
36396 @@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, struct fb_info *info)
36397 return 0;
36398 }
36399
36400 -static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
36401 +static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
36402 .id = "IBM GXT4500P",
36403 .type = FB_TYPE_PACKED_PIXELS,
36404 .visual = FB_VISUAL_PSEUDOCOLOR,
36405 diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
36406 index 7672d2e..b56437f 100644
36407 --- a/drivers/video/i810/i810_accel.c
36408 +++ b/drivers/video/i810/i810_accel.c
36409 @@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
36410 }
36411 }
36412 printk("ringbuffer lockup!!!\n");
36413 + printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
36414 i810_report_error(mmio);
36415 par->dev_flags |= LOCKUP;
36416 info->pixmap.scan_align = 1;
36417 diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
36418 index 318f6fb..9a389c1 100644
36419 --- a/drivers/video/i810/i810_main.c
36420 +++ b/drivers/video/i810/i810_main.c
36421 @@ -97,7 +97,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info);
36422 static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
36423
36424 /* PCI */
36425 -static const char *i810_pci_list[] __devinitdata = {
36426 +static const char *i810_pci_list[] __devinitconst = {
36427 "Intel(R) 810 Framebuffer Device" ,
36428 "Intel(R) 810-DC100 Framebuffer Device" ,
36429 "Intel(R) 810E Framebuffer Device" ,
36430 diff --git a/drivers/video/jz4740_fb.c b/drivers/video/jz4740_fb.c
36431 index de36693..3c63fc2 100644
36432 --- a/drivers/video/jz4740_fb.c
36433 +++ b/drivers/video/jz4740_fb.c
36434 @@ -136,7 +136,7 @@ struct jzfb {
36435 uint32_t pseudo_palette[16];
36436 };
36437
36438 -static const struct fb_fix_screeninfo jzfb_fix __devinitdata = {
36439 +static const struct fb_fix_screeninfo jzfb_fix __devinitconst = {
36440 .id = "JZ4740 FB",
36441 .type = FB_TYPE_PACKED_PIXELS,
36442 .visual = FB_VISUAL_TRUECOLOR,
36443 diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
36444 index 3c14e43..eafa544 100644
36445 --- a/drivers/video/logo/logo_linux_clut224.ppm
36446 +++ b/drivers/video/logo/logo_linux_clut224.ppm
36447 @@ -1,1604 +1,1123 @@
36448 P3
36449 -# Standard 224-color Linux logo
36450 80 80
36451 255
36452 - 0 0 0 0 0 0 0 0 0 0 0 0
36453 - 0 0 0 0 0 0 0 0 0 0 0 0
36454 - 0 0 0 0 0 0 0 0 0 0 0 0
36455 - 0 0 0 0 0 0 0 0 0 0 0 0
36456 - 0 0 0 0 0 0 0 0 0 0 0 0
36457 - 0 0 0 0 0 0 0 0 0 0 0 0
36458 - 0 0 0 0 0 0 0 0 0 0 0 0
36459 - 0 0 0 0 0 0 0 0 0 0 0 0
36460 - 0 0 0 0 0 0 0 0 0 0 0 0
36461 - 6 6 6 6 6 6 10 10 10 10 10 10
36462 - 10 10 10 6 6 6 6 6 6 6 6 6
36463 - 0 0 0 0 0 0 0 0 0 0 0 0
36464 - 0 0 0 0 0 0 0 0 0 0 0 0
36465 - 0 0 0 0 0 0 0 0 0 0 0 0
36466 - 0 0 0 0 0 0 0 0 0 0 0 0
36467 - 0 0 0 0 0 0 0 0 0 0 0 0
36468 - 0 0 0 0 0 0 0 0 0 0 0 0
36469 - 0 0 0 0 0 0 0 0 0 0 0 0
36470 - 0 0 0 0 0 0 0 0 0 0 0 0
36471 - 0 0 0 0 0 0 0 0 0 0 0 0
36472 - 0 0 0 0 0 0 0 0 0 0 0 0
36473 - 0 0 0 0 0 0 0 0 0 0 0 0
36474 - 0 0 0 0 0 0 0 0 0 0 0 0
36475 - 0 0 0 0 0 0 0 0 0 0 0 0
36476 - 0 0 0 0 0 0 0 0 0 0 0 0
36477 - 0 0 0 0 0 0 0 0 0 0 0 0
36478 - 0 0 0 0 0 0 0 0 0 0 0 0
36479 - 0 0 0 0 0 0 0 0 0 0 0 0
36480 - 0 0 0 6 6 6 10 10 10 14 14 14
36481 - 22 22 22 26 26 26 30 30 30 34 34 34
36482 - 30 30 30 30 30 30 26 26 26 18 18 18
36483 - 14 14 14 10 10 10 6 6 6 0 0 0
36484 - 0 0 0 0 0 0 0 0 0 0 0 0
36485 - 0 0 0 0 0 0 0 0 0 0 0 0
36486 - 0 0 0 0 0 0 0 0 0 0 0 0
36487 - 0 0 0 0 0 0 0 0 0 0 0 0
36488 - 0 0 0 0 0 0 0 0 0 0 0 0
36489 - 0 0 0 0 0 0 0 0 0 0 0 0
36490 - 0 0 0 0 0 0 0 0 0 0 0 0
36491 - 0 0 0 0 0 0 0 0 0 0 0 0
36492 - 0 0 0 0 0 0 0 0 0 0 0 0
36493 - 0 0 0 0 0 1 0 0 1 0 0 0
36494 - 0 0 0 0 0 0 0 0 0 0 0 0
36495 - 0 0 0 0 0 0 0 0 0 0 0 0
36496 - 0 0 0 0 0 0 0 0 0 0 0 0
36497 - 0 0 0 0 0 0 0 0 0 0 0 0
36498 - 0 0 0 0 0 0 0 0 0 0 0 0
36499 - 0 0 0 0 0 0 0 0 0 0 0 0
36500 - 6 6 6 14 14 14 26 26 26 42 42 42
36501 - 54 54 54 66 66 66 78 78 78 78 78 78
36502 - 78 78 78 74 74 74 66 66 66 54 54 54
36503 - 42 42 42 26 26 26 18 18 18 10 10 10
36504 - 6 6 6 0 0 0 0 0 0 0 0 0
36505 - 0 0 0 0 0 0 0 0 0 0 0 0
36506 - 0 0 0 0 0 0 0 0 0 0 0 0
36507 - 0 0 0 0 0 0 0 0 0 0 0 0
36508 - 0 0 0 0 0 0 0 0 0 0 0 0
36509 - 0 0 0 0 0 0 0 0 0 0 0 0
36510 - 0 0 0 0 0 0 0 0 0 0 0 0
36511 - 0 0 0 0 0 0 0 0 0 0 0 0
36512 - 0 0 0 0 0 0 0 0 0 0 0 0
36513 - 0 0 1 0 0 0 0 0 0 0 0 0
36514 - 0 0 0 0 0 0 0 0 0 0 0 0
36515 - 0 0 0 0 0 0 0 0 0 0 0 0
36516 - 0 0 0 0 0 0 0 0 0 0 0 0
36517 - 0 0 0 0 0 0 0 0 0 0 0 0
36518 - 0 0 0 0 0 0 0 0 0 0 0 0
36519 - 0 0 0 0 0 0 0 0 0 10 10 10
36520 - 22 22 22 42 42 42 66 66 66 86 86 86
36521 - 66 66 66 38 38 38 38 38 38 22 22 22
36522 - 26 26 26 34 34 34 54 54 54 66 66 66
36523 - 86 86 86 70 70 70 46 46 46 26 26 26
36524 - 14 14 14 6 6 6 0 0 0 0 0 0
36525 - 0 0 0 0 0 0 0 0 0 0 0 0
36526 - 0 0 0 0 0 0 0 0 0 0 0 0
36527 - 0 0 0 0 0 0 0 0 0 0 0 0
36528 - 0 0 0 0 0 0 0 0 0 0 0 0
36529 - 0 0 0 0 0 0 0 0 0 0 0 0
36530 - 0 0 0 0 0 0 0 0 0 0 0 0
36531 - 0 0 0 0 0 0 0 0 0 0 0 0
36532 - 0 0 0 0 0 0 0 0 0 0 0 0
36533 - 0 0 1 0 0 1 0 0 1 0 0 0
36534 - 0 0 0 0 0 0 0 0 0 0 0 0
36535 - 0 0 0 0 0 0 0 0 0 0 0 0
36536 - 0 0 0 0 0 0 0 0 0 0 0 0
36537 - 0 0 0 0 0 0 0 0 0 0 0 0
36538 - 0 0 0 0 0 0 0 0 0 0 0 0
36539 - 0 0 0 0 0 0 10 10 10 26 26 26
36540 - 50 50 50 82 82 82 58 58 58 6 6 6
36541 - 2 2 6 2 2 6 2 2 6 2 2 6
36542 - 2 2 6 2 2 6 2 2 6 2 2 6
36543 - 6 6 6 54 54 54 86 86 86 66 66 66
36544 - 38 38 38 18 18 18 6 6 6 0 0 0
36545 - 0 0 0 0 0 0 0 0 0 0 0 0
36546 - 0 0 0 0 0 0 0 0 0 0 0 0
36547 - 0 0 0 0 0 0 0 0 0 0 0 0
36548 - 0 0 0 0 0 0 0 0 0 0 0 0
36549 - 0 0 0 0 0 0 0 0 0 0 0 0
36550 - 0 0 0 0 0 0 0 0 0 0 0 0
36551 - 0 0 0 0 0 0 0 0 0 0 0 0
36552 - 0 0 0 0 0 0 0 0 0 0 0 0
36553 - 0 0 0 0 0 0 0 0 0 0 0 0
36554 - 0 0 0 0 0 0 0 0 0 0 0 0
36555 - 0 0 0 0 0 0 0 0 0 0 0 0
36556 - 0 0 0 0 0 0 0 0 0 0 0 0
36557 - 0 0 0 0 0 0 0 0 0 0 0 0
36558 - 0 0 0 0 0 0 0 0 0 0 0 0
36559 - 0 0 0 6 6 6 22 22 22 50 50 50
36560 - 78 78 78 34 34 34 2 2 6 2 2 6
36561 - 2 2 6 2 2 6 2 2 6 2 2 6
36562 - 2 2 6 2 2 6 2 2 6 2 2 6
36563 - 2 2 6 2 2 6 6 6 6 70 70 70
36564 - 78 78 78 46 46 46 22 22 22 6 6 6
36565 - 0 0 0 0 0 0 0 0 0 0 0 0
36566 - 0 0 0 0 0 0 0 0 0 0 0 0
36567 - 0 0 0 0 0 0 0 0 0 0 0 0
36568 - 0 0 0 0 0 0 0 0 0 0 0 0
36569 - 0 0 0 0 0 0 0 0 0 0 0 0
36570 - 0 0 0 0 0 0 0 0 0 0 0 0
36571 - 0 0 0 0 0 0 0 0 0 0 0 0
36572 - 0 0 0 0 0 0 0 0 0 0 0 0
36573 - 0 0 1 0 0 1 0 0 1 0 0 0
36574 - 0 0 0 0 0 0 0 0 0 0 0 0
36575 - 0 0 0 0 0 0 0 0 0 0 0 0
36576 - 0 0 0 0 0 0 0 0 0 0 0 0
36577 - 0 0 0 0 0 0 0 0 0 0 0 0
36578 - 0 0 0 0 0 0 0 0 0 0 0 0
36579 - 6 6 6 18 18 18 42 42 42 82 82 82
36580 - 26 26 26 2 2 6 2 2 6 2 2 6
36581 - 2 2 6 2 2 6 2 2 6 2 2 6
36582 - 2 2 6 2 2 6 2 2 6 14 14 14
36583 - 46 46 46 34 34 34 6 6 6 2 2 6
36584 - 42 42 42 78 78 78 42 42 42 18 18 18
36585 - 6 6 6 0 0 0 0 0 0 0 0 0
36586 - 0 0 0 0 0 0 0 0 0 0 0 0
36587 - 0 0 0 0 0 0 0 0 0 0 0 0
36588 - 0 0 0 0 0 0 0 0 0 0 0 0
36589 - 0 0 0 0 0 0 0 0 0 0 0 0
36590 - 0 0 0 0 0 0 0 0 0 0 0 0
36591 - 0 0 0 0 0 0 0 0 0 0 0 0
36592 - 0 0 0 0 0 0 0 0 0 0 0 0
36593 - 0 0 1 0 0 0 0 0 1 0 0 0
36594 - 0 0 0 0 0 0 0 0 0 0 0 0
36595 - 0 0 0 0 0 0 0 0 0 0 0 0
36596 - 0 0 0 0 0 0 0 0 0 0 0 0
36597 - 0 0 0 0 0 0 0 0 0 0 0 0
36598 - 0 0 0 0 0 0 0 0 0 0 0 0
36599 - 10 10 10 30 30 30 66 66 66 58 58 58
36600 - 2 2 6 2 2 6 2 2 6 2 2 6
36601 - 2 2 6 2 2 6 2 2 6 2 2 6
36602 - 2 2 6 2 2 6 2 2 6 26 26 26
36603 - 86 86 86 101 101 101 46 46 46 10 10 10
36604 - 2 2 6 58 58 58 70 70 70 34 34 34
36605 - 10 10 10 0 0 0 0 0 0 0 0 0
36606 - 0 0 0 0 0 0 0 0 0 0 0 0
36607 - 0 0 0 0 0 0 0 0 0 0 0 0
36608 - 0 0 0 0 0 0 0 0 0 0 0 0
36609 - 0 0 0 0 0 0 0 0 0 0 0 0
36610 - 0 0 0 0 0 0 0 0 0 0 0 0
36611 - 0 0 0 0 0 0 0 0 0 0 0 0
36612 - 0 0 0 0 0 0 0 0 0 0 0 0
36613 - 0 0 1 0 0 1 0 0 1 0 0 0
36614 - 0 0 0 0 0 0 0 0 0 0 0 0
36615 - 0 0 0 0 0 0 0 0 0 0 0 0
36616 - 0 0 0 0 0 0 0 0 0 0 0 0
36617 - 0 0 0 0 0 0 0 0 0 0 0 0
36618 - 0 0 0 0 0 0 0 0 0 0 0 0
36619 - 14 14 14 42 42 42 86 86 86 10 10 10
36620 - 2 2 6 2 2 6 2 2 6 2 2 6
36621 - 2 2 6 2 2 6 2 2 6 2 2 6
36622 - 2 2 6 2 2 6 2 2 6 30 30 30
36623 - 94 94 94 94 94 94 58 58 58 26 26 26
36624 - 2 2 6 6 6 6 78 78 78 54 54 54
36625 - 22 22 22 6 6 6 0 0 0 0 0 0
36626 - 0 0 0 0 0 0 0 0 0 0 0 0
36627 - 0 0 0 0 0 0 0 0 0 0 0 0
36628 - 0 0 0 0 0 0 0 0 0 0 0 0
36629 - 0 0 0 0 0 0 0 0 0 0 0 0
36630 - 0 0 0 0 0 0 0 0 0 0 0 0
36631 - 0 0 0 0 0 0 0 0 0 0 0 0
36632 - 0 0 0 0 0 0 0 0 0 0 0 0
36633 - 0 0 0 0 0 0 0 0 0 0 0 0
36634 - 0 0 0 0 0 0 0 0 0 0 0 0
36635 - 0 0 0 0 0 0 0 0 0 0 0 0
36636 - 0 0 0 0 0 0 0 0 0 0 0 0
36637 - 0 0 0 0 0 0 0 0 0 0 0 0
36638 - 0 0 0 0 0 0 0 0 0 6 6 6
36639 - 22 22 22 62 62 62 62 62 62 2 2 6
36640 - 2 2 6 2 2 6 2 2 6 2 2 6
36641 - 2 2 6 2 2 6 2 2 6 2 2 6
36642 - 2 2 6 2 2 6 2 2 6 26 26 26
36643 - 54 54 54 38 38 38 18 18 18 10 10 10
36644 - 2 2 6 2 2 6 34 34 34 82 82 82
36645 - 38 38 38 14 14 14 0 0 0 0 0 0
36646 - 0 0 0 0 0 0 0 0 0 0 0 0
36647 - 0 0 0 0 0 0 0 0 0 0 0 0
36648 - 0 0 0 0 0 0 0 0 0 0 0 0
36649 - 0 0 0 0 0 0 0 0 0 0 0 0
36650 - 0 0 0 0 0 0 0 0 0 0 0 0
36651 - 0 0 0 0 0 0 0 0 0 0 0 0
36652 - 0 0 0 0 0 0 0 0 0 0 0 0
36653 - 0 0 0 0 0 1 0 0 1 0 0 0
36654 - 0 0 0 0 0 0 0 0 0 0 0 0
36655 - 0 0 0 0 0 0 0 0 0 0 0 0
36656 - 0 0 0 0 0 0 0 0 0 0 0 0
36657 - 0 0 0 0 0 0 0 0 0 0 0 0
36658 - 0 0 0 0 0 0 0 0 0 6 6 6
36659 - 30 30 30 78 78 78 30 30 30 2 2 6
36660 - 2 2 6 2 2 6 2 2 6 2 2 6
36661 - 2 2 6 2 2 6 2 2 6 2 2 6
36662 - 2 2 6 2 2 6 2 2 6 10 10 10
36663 - 10 10 10 2 2 6 2 2 6 2 2 6
36664 - 2 2 6 2 2 6 2 2 6 78 78 78
36665 - 50 50 50 18 18 18 6 6 6 0 0 0
36666 - 0 0 0 0 0 0 0 0 0 0 0 0
36667 - 0 0 0 0 0 0 0 0 0 0 0 0
36668 - 0 0 0 0 0 0 0 0 0 0 0 0
36669 - 0 0 0 0 0 0 0 0 0 0 0 0
36670 - 0 0 0 0 0 0 0 0 0 0 0 0
36671 - 0 0 0 0 0 0 0 0 0 0 0 0
36672 - 0 0 0 0 0 0 0 0 0 0 0 0
36673 - 0 0 1 0 0 0 0 0 0 0 0 0
36674 - 0 0 0 0 0 0 0 0 0 0 0 0
36675 - 0 0 0 0 0 0 0 0 0 0 0 0
36676 - 0 0 0 0 0 0 0 0 0 0 0 0
36677 - 0 0 0 0 0 0 0 0 0 0 0 0
36678 - 0 0 0 0 0 0 0 0 0 10 10 10
36679 - 38 38 38 86 86 86 14 14 14 2 2 6
36680 - 2 2 6 2 2 6 2 2 6 2 2 6
36681 - 2 2 6 2 2 6 2 2 6 2 2 6
36682 - 2 2 6 2 2 6 2 2 6 2 2 6
36683 - 2 2 6 2 2 6 2 2 6 2 2 6
36684 - 2 2 6 2 2 6 2 2 6 54 54 54
36685 - 66 66 66 26 26 26 6 6 6 0 0 0
36686 - 0 0 0 0 0 0 0 0 0 0 0 0
36687 - 0 0 0 0 0 0 0 0 0 0 0 0
36688 - 0 0 0 0 0 0 0 0 0 0 0 0
36689 - 0 0 0 0 0 0 0 0 0 0 0 0
36690 - 0 0 0 0 0 0 0 0 0 0 0 0
36691 - 0 0 0 0 0 0 0 0 0 0 0 0
36692 - 0 0 0 0 0 0 0 0 0 0 0 0
36693 - 0 0 0 0 0 1 0 0 1 0 0 0
36694 - 0 0 0 0 0 0 0 0 0 0 0 0
36695 - 0 0 0 0 0 0 0 0 0 0 0 0
36696 - 0 0 0 0 0 0 0 0 0 0 0 0
36697 - 0 0 0 0 0 0 0 0 0 0 0 0
36698 - 0 0 0 0 0 0 0 0 0 14 14 14
36699 - 42 42 42 82 82 82 2 2 6 2 2 6
36700 - 2 2 6 6 6 6 10 10 10 2 2 6
36701 - 2 2 6 2 2 6 2 2 6 2 2 6
36702 - 2 2 6 2 2 6 2 2 6 6 6 6
36703 - 14 14 14 10 10 10 2 2 6 2 2 6
36704 - 2 2 6 2 2 6 2 2 6 18 18 18
36705 - 82 82 82 34 34 34 10 10 10 0 0 0
36706 - 0 0 0 0 0 0 0 0 0 0 0 0
36707 - 0 0 0 0 0 0 0 0 0 0 0 0
36708 - 0 0 0 0 0 0 0 0 0 0 0 0
36709 - 0 0 0 0 0 0 0 0 0 0 0 0
36710 - 0 0 0 0 0 0 0 0 0 0 0 0
36711 - 0 0 0 0 0 0 0 0 0 0 0 0
36712 - 0 0 0 0 0 0 0 0 0 0 0 0
36713 - 0 0 1 0 0 0 0 0 0 0 0 0
36714 - 0 0 0 0 0 0 0 0 0 0 0 0
36715 - 0 0 0 0 0 0 0 0 0 0 0 0
36716 - 0 0 0 0 0 0 0 0 0 0 0 0
36717 - 0 0 0 0 0 0 0 0 0 0 0 0
36718 - 0 0 0 0 0 0 0 0 0 14 14 14
36719 - 46 46 46 86 86 86 2 2 6 2 2 6
36720 - 6 6 6 6 6 6 22 22 22 34 34 34
36721 - 6 6 6 2 2 6 2 2 6 2 2 6
36722 - 2 2 6 2 2 6 18 18 18 34 34 34
36723 - 10 10 10 50 50 50 22 22 22 2 2 6
36724 - 2 2 6 2 2 6 2 2 6 10 10 10
36725 - 86 86 86 42 42 42 14 14 14 0 0 0
36726 - 0 0 0 0 0 0 0 0 0 0 0 0
36727 - 0 0 0 0 0 0 0 0 0 0 0 0
36728 - 0 0 0 0 0 0 0 0 0 0 0 0
36729 - 0 0 0 0 0 0 0 0 0 0 0 0
36730 - 0 0 0 0 0 0 0 0 0 0 0 0
36731 - 0 0 0 0 0 0 0 0 0 0 0 0
36732 - 0 0 0 0 0 0 0 0 0 0 0 0
36733 - 0 0 1 0 0 1 0 0 1 0 0 0
36734 - 0 0 0 0 0 0 0 0 0 0 0 0
36735 - 0 0 0 0 0 0 0 0 0 0 0 0
36736 - 0 0 0 0 0 0 0 0 0 0 0 0
36737 - 0 0 0 0 0 0 0 0 0 0 0 0
36738 - 0 0 0 0 0 0 0 0 0 14 14 14
36739 - 46 46 46 86 86 86 2 2 6 2 2 6
36740 - 38 38 38 116 116 116 94 94 94 22 22 22
36741 - 22 22 22 2 2 6 2 2 6 2 2 6
36742 - 14 14 14 86 86 86 138 138 138 162 162 162
36743 -154 154 154 38 38 38 26 26 26 6 6 6
36744 - 2 2 6 2 2 6 2 2 6 2 2 6
36745 - 86 86 86 46 46 46 14 14 14 0 0 0
36746 - 0 0 0 0 0 0 0 0 0 0 0 0
36747 - 0 0 0 0 0 0 0 0 0 0 0 0
36748 - 0 0 0 0 0 0 0 0 0 0 0 0
36749 - 0 0 0 0 0 0 0 0 0 0 0 0
36750 - 0 0 0 0 0 0 0 0 0 0 0 0
36751 - 0 0 0 0 0 0 0 0 0 0 0 0
36752 - 0 0 0 0 0 0 0 0 0 0 0 0
36753 - 0 0 0 0 0 0 0 0 0 0 0 0
36754 - 0 0 0 0 0 0 0 0 0 0 0 0
36755 - 0 0 0 0 0 0 0 0 0 0 0 0
36756 - 0 0 0 0 0 0 0 0 0 0 0 0
36757 - 0 0 0 0 0 0 0 0 0 0 0 0
36758 - 0 0 0 0 0 0 0 0 0 14 14 14
36759 - 46 46 46 86 86 86 2 2 6 14 14 14
36760 -134 134 134 198 198 198 195 195 195 116 116 116
36761 - 10 10 10 2 2 6 2 2 6 6 6 6
36762 -101 98 89 187 187 187 210 210 210 218 218 218
36763 -214 214 214 134 134 134 14 14 14 6 6 6
36764 - 2 2 6 2 2 6 2 2 6 2 2 6
36765 - 86 86 86 50 50 50 18 18 18 6 6 6
36766 - 0 0 0 0 0 0 0 0 0 0 0 0
36767 - 0 0 0 0 0 0 0 0 0 0 0 0
36768 - 0 0 0 0 0 0 0 0 0 0 0 0
36769 - 0 0 0 0 0 0 0 0 0 0 0 0
36770 - 0 0 0 0 0 0 0 0 0 0 0 0
36771 - 0 0 0 0 0 0 0 0 0 0 0 0
36772 - 0 0 0 0 0 0 0 0 1 0 0 0
36773 - 0 0 1 0 0 1 0 0 1 0 0 0
36774 - 0 0 0 0 0 0 0 0 0 0 0 0
36775 - 0 0 0 0 0 0 0 0 0 0 0 0
36776 - 0 0 0 0 0 0 0 0 0 0 0 0
36777 - 0 0 0 0 0 0 0 0 0 0 0 0
36778 - 0 0 0 0 0 0 0 0 0 14 14 14
36779 - 46 46 46 86 86 86 2 2 6 54 54 54
36780 -218 218 218 195 195 195 226 226 226 246 246 246
36781 - 58 58 58 2 2 6 2 2 6 30 30 30
36782 -210 210 210 253 253 253 174 174 174 123 123 123
36783 -221 221 221 234 234 234 74 74 74 2 2 6
36784 - 2 2 6 2 2 6 2 2 6 2 2 6
36785 - 70 70 70 58 58 58 22 22 22 6 6 6
36786 - 0 0 0 0 0 0 0 0 0 0 0 0
36787 - 0 0 0 0 0 0 0 0 0 0 0 0
36788 - 0 0 0 0 0 0 0 0 0 0 0 0
36789 - 0 0 0 0 0 0 0 0 0 0 0 0
36790 - 0 0 0 0 0 0 0 0 0 0 0 0
36791 - 0 0 0 0 0 0 0 0 0 0 0 0
36792 - 0 0 0 0 0 0 0 0 0 0 0 0
36793 - 0 0 0 0 0 0 0 0 0 0 0 0
36794 - 0 0 0 0 0 0 0 0 0 0 0 0
36795 - 0 0 0 0 0 0 0 0 0 0 0 0
36796 - 0 0 0 0 0 0 0 0 0 0 0 0
36797 - 0 0 0 0 0 0 0 0 0 0 0 0
36798 - 0 0 0 0 0 0 0 0 0 14 14 14
36799 - 46 46 46 82 82 82 2 2 6 106 106 106
36800 -170 170 170 26 26 26 86 86 86 226 226 226
36801 -123 123 123 10 10 10 14 14 14 46 46 46
36802 -231 231 231 190 190 190 6 6 6 70 70 70
36803 - 90 90 90 238 238 238 158 158 158 2 2 6
36804 - 2 2 6 2 2 6 2 2 6 2 2 6
36805 - 70 70 70 58 58 58 22 22 22 6 6 6
36806 - 0 0 0 0 0 0 0 0 0 0 0 0
36807 - 0 0 0 0 0 0 0 0 0 0 0 0
36808 - 0 0 0 0 0 0 0 0 0 0 0 0
36809 - 0 0 0 0 0 0 0 0 0 0 0 0
36810 - 0 0 0 0 0 0 0 0 0 0 0 0
36811 - 0 0 0 0 0 0 0 0 0 0 0 0
36812 - 0 0 0 0 0 0 0 0 1 0 0 0
36813 - 0 0 1 0 0 1 0 0 1 0 0 0
36814 - 0 0 0 0 0 0 0 0 0 0 0 0
36815 - 0 0 0 0 0 0 0 0 0 0 0 0
36816 - 0 0 0 0 0 0 0 0 0 0 0 0
36817 - 0 0 0 0 0 0 0 0 0 0 0 0
36818 - 0 0 0 0 0 0 0 0 0 14 14 14
36819 - 42 42 42 86 86 86 6 6 6 116 116 116
36820 -106 106 106 6 6 6 70 70 70 149 149 149
36821 -128 128 128 18 18 18 38 38 38 54 54 54
36822 -221 221 221 106 106 106 2 2 6 14 14 14
36823 - 46 46 46 190 190 190 198 198 198 2 2 6
36824 - 2 2 6 2 2 6 2 2 6 2 2 6
36825 - 74 74 74 62 62 62 22 22 22 6 6 6
36826 - 0 0 0 0 0 0 0 0 0 0 0 0
36827 - 0 0 0 0 0 0 0 0 0 0 0 0
36828 - 0 0 0 0 0 0 0 0 0 0 0 0
36829 - 0 0 0 0 0 0 0 0 0 0 0 0
36830 - 0 0 0 0 0 0 0 0 0 0 0 0
36831 - 0 0 0 0 0 0 0 0 0 0 0 0
36832 - 0 0 0 0 0 0 0 0 1 0 0 0
36833 - 0 0 1 0 0 0 0 0 1 0 0 0
36834 - 0 0 0 0 0 0 0 0 0 0 0 0
36835 - 0 0 0 0 0 0 0 0 0 0 0 0
36836 - 0 0 0 0 0 0 0 0 0 0 0 0
36837 - 0 0 0 0 0 0 0 0 0 0 0 0
36838 - 0 0 0 0 0 0 0 0 0 14 14 14
36839 - 42 42 42 94 94 94 14 14 14 101 101 101
36840 -128 128 128 2 2 6 18 18 18 116 116 116
36841 -118 98 46 121 92 8 121 92 8 98 78 10
36842 -162 162 162 106 106 106 2 2 6 2 2 6
36843 - 2 2 6 195 195 195 195 195 195 6 6 6
36844 - 2 2 6 2 2 6 2 2 6 2 2 6
36845 - 74 74 74 62 62 62 22 22 22 6 6 6
36846 - 0 0 0 0 0 0 0 0 0 0 0 0
36847 - 0 0 0 0 0 0 0 0 0 0 0 0
36848 - 0 0 0 0 0 0 0 0 0 0 0 0
36849 - 0 0 0 0 0 0 0 0 0 0 0 0
36850 - 0 0 0 0 0 0 0 0 0 0 0 0
36851 - 0 0 0 0 0 0 0 0 0 0 0 0
36852 - 0 0 0 0 0 0 0 0 1 0 0 1
36853 - 0 0 1 0 0 0 0 0 1 0 0 0
36854 - 0 0 0 0 0 0 0 0 0 0 0 0
36855 - 0 0 0 0 0 0 0 0 0 0 0 0
36856 - 0 0 0 0 0 0 0 0 0 0 0 0
36857 - 0 0 0 0 0 0 0 0 0 0 0 0
36858 - 0 0 0 0 0 0 0 0 0 10 10 10
36859 - 38 38 38 90 90 90 14 14 14 58 58 58
36860 -210 210 210 26 26 26 54 38 6 154 114 10
36861 -226 170 11 236 186 11 225 175 15 184 144 12
36862 -215 174 15 175 146 61 37 26 9 2 2 6
36863 - 70 70 70 246 246 246 138 138 138 2 2 6
36864 - 2 2 6 2 2 6 2 2 6 2 2 6
36865 - 70 70 70 66 66 66 26 26 26 6 6 6
36866 - 0 0 0 0 0 0 0 0 0 0 0 0
36867 - 0 0 0 0 0 0 0 0 0 0 0 0
36868 - 0 0 0 0 0 0 0 0 0 0 0 0
36869 - 0 0 0 0 0 0 0 0 0 0 0 0
36870 - 0 0 0 0 0 0 0 0 0 0 0 0
36871 - 0 0 0 0 0 0 0 0 0 0 0 0
36872 - 0 0 0 0 0 0 0 0 0 0 0 0
36873 - 0 0 0 0 0 0 0 0 0 0 0 0
36874 - 0 0 0 0 0 0 0 0 0 0 0 0
36875 - 0 0 0 0 0 0 0 0 0 0 0 0
36876 - 0 0 0 0 0 0 0 0 0 0 0 0
36877 - 0 0 0 0 0 0 0 0 0 0 0 0
36878 - 0 0 0 0 0 0 0 0 0 10 10 10
36879 - 38 38 38 86 86 86 14 14 14 10 10 10
36880 -195 195 195 188 164 115 192 133 9 225 175 15
36881 -239 182 13 234 190 10 232 195 16 232 200 30
36882 -245 207 45 241 208 19 232 195 16 184 144 12
36883 -218 194 134 211 206 186 42 42 42 2 2 6
36884 - 2 2 6 2 2 6 2 2 6 2 2 6
36885 - 50 50 50 74 74 74 30 30 30 6 6 6
36886 - 0 0 0 0 0 0 0 0 0 0 0 0
36887 - 0 0 0 0 0 0 0 0 0 0 0 0
36888 - 0 0 0 0 0 0 0 0 0 0 0 0
36889 - 0 0 0 0 0 0 0 0 0 0 0 0
36890 - 0 0 0 0 0 0 0 0 0 0 0 0
36891 - 0 0 0 0 0 0 0 0 0 0 0 0
36892 - 0 0 0 0 0 0 0 0 0 0 0 0
36893 - 0 0 0 0 0 0 0 0 0 0 0 0
36894 - 0 0 0 0 0 0 0 0 0 0 0 0
36895 - 0 0 0 0 0 0 0 0 0 0 0 0
36896 - 0 0 0 0 0 0 0 0 0 0 0 0
36897 - 0 0 0 0 0 0 0 0 0 0 0 0
36898 - 0 0 0 0 0 0 0 0 0 10 10 10
36899 - 34 34 34 86 86 86 14 14 14 2 2 6
36900 -121 87 25 192 133 9 219 162 10 239 182 13
36901 -236 186 11 232 195 16 241 208 19 244 214 54
36902 -246 218 60 246 218 38 246 215 20 241 208 19
36903 -241 208 19 226 184 13 121 87 25 2 2 6
36904 - 2 2 6 2 2 6 2 2 6 2 2 6
36905 - 50 50 50 82 82 82 34 34 34 10 10 10
36906 - 0 0 0 0 0 0 0 0 0 0 0 0
36907 - 0 0 0 0 0 0 0 0 0 0 0 0
36908 - 0 0 0 0 0 0 0 0 0 0 0 0
36909 - 0 0 0 0 0 0 0 0 0 0 0 0
36910 - 0 0 0 0 0 0 0 0 0 0 0 0
36911 - 0 0 0 0 0 0 0 0 0 0 0 0
36912 - 0 0 0 0 0 0 0 0 0 0 0 0
36913 - 0 0 0 0 0 0 0 0 0 0 0 0
36914 - 0 0 0 0 0 0 0 0 0 0 0 0
36915 - 0 0 0 0 0 0 0 0 0 0 0 0
36916 - 0 0 0 0 0 0 0 0 0 0 0 0
36917 - 0 0 0 0 0 0 0 0 0 0 0 0
36918 - 0 0 0 0 0 0 0 0 0 10 10 10
36919 - 34 34 34 82 82 82 30 30 30 61 42 6
36920 -180 123 7 206 145 10 230 174 11 239 182 13
36921 -234 190 10 238 202 15 241 208 19 246 218 74
36922 -246 218 38 246 215 20 246 215 20 246 215 20
36923 -226 184 13 215 174 15 184 144 12 6 6 6
36924 - 2 2 6 2 2 6 2 2 6 2 2 6
36925 - 26 26 26 94 94 94 42 42 42 14 14 14
36926 - 0 0 0 0 0 0 0 0 0 0 0 0
36927 - 0 0 0 0 0 0 0 0 0 0 0 0
36928 - 0 0 0 0 0 0 0 0 0 0 0 0
36929 - 0 0 0 0 0 0 0 0 0 0 0 0
36930 - 0 0 0 0 0 0 0 0 0 0 0 0
36931 - 0 0 0 0 0 0 0 0 0 0 0 0
36932 - 0 0 0 0 0 0 0 0 0 0 0 0
36933 - 0 0 0 0 0 0 0 0 0 0 0 0
36934 - 0 0 0 0 0 0 0 0 0 0 0 0
36935 - 0 0 0 0 0 0 0 0 0 0 0 0
36936 - 0 0 0 0 0 0 0 0 0 0 0 0
36937 - 0 0 0 0 0 0 0 0 0 0 0 0
36938 - 0 0 0 0 0 0 0 0 0 10 10 10
36939 - 30 30 30 78 78 78 50 50 50 104 69 6
36940 -192 133 9 216 158 10 236 178 12 236 186 11
36941 -232 195 16 241 208 19 244 214 54 245 215 43
36942 -246 215 20 246 215 20 241 208 19 198 155 10
36943 -200 144 11 216 158 10 156 118 10 2 2 6
36944 - 2 2 6 2 2 6 2 2 6 2 2 6
36945 - 6 6 6 90 90 90 54 54 54 18 18 18
36946 - 6 6 6 0 0 0 0 0 0 0 0 0
36947 - 0 0 0 0 0 0 0 0 0 0 0 0
36948 - 0 0 0 0 0 0 0 0 0 0 0 0
36949 - 0 0 0 0 0 0 0 0 0 0 0 0
36950 - 0 0 0 0 0 0 0 0 0 0 0 0
36951 - 0 0 0 0 0 0 0 0 0 0 0 0
36952 - 0 0 0 0 0 0 0 0 0 0 0 0
36953 - 0 0 0 0 0 0 0 0 0 0 0 0
36954 - 0 0 0 0 0 0 0 0 0 0 0 0
36955 - 0 0 0 0 0 0 0 0 0 0 0 0
36956 - 0 0 0 0 0 0 0 0 0 0 0 0
36957 - 0 0 0 0 0 0 0 0 0 0 0 0
36958 - 0 0 0 0 0 0 0 0 0 10 10 10
36959 - 30 30 30 78 78 78 46 46 46 22 22 22
36960 -137 92 6 210 162 10 239 182 13 238 190 10
36961 -238 202 15 241 208 19 246 215 20 246 215 20
36962 -241 208 19 203 166 17 185 133 11 210 150 10
36963 -216 158 10 210 150 10 102 78 10 2 2 6
36964 - 6 6 6 54 54 54 14 14 14 2 2 6
36965 - 2 2 6 62 62 62 74 74 74 30 30 30
36966 - 10 10 10 0 0 0 0 0 0 0 0 0
36967 - 0 0 0 0 0 0 0 0 0 0 0 0
36968 - 0 0 0 0 0 0 0 0 0 0 0 0
36969 - 0 0 0 0 0 0 0 0 0 0 0 0
36970 - 0 0 0 0 0 0 0 0 0 0 0 0
36971 - 0 0 0 0 0 0 0 0 0 0 0 0
36972 - 0 0 0 0 0 0 0 0 0 0 0 0
36973 - 0 0 0 0 0 0 0 0 0 0 0 0
36974 - 0 0 0 0 0 0 0 0 0 0 0 0
36975 - 0 0 0 0 0 0 0 0 0 0 0 0
36976 - 0 0 0 0 0 0 0 0 0 0 0 0
36977 - 0 0 0 0 0 0 0 0 0 0 0 0
36978 - 0 0 0 0 0 0 0 0 0 10 10 10
36979 - 34 34 34 78 78 78 50 50 50 6 6 6
36980 - 94 70 30 139 102 15 190 146 13 226 184 13
36981 -232 200 30 232 195 16 215 174 15 190 146 13
36982 -168 122 10 192 133 9 210 150 10 213 154 11
36983 -202 150 34 182 157 106 101 98 89 2 2 6
36984 - 2 2 6 78 78 78 116 116 116 58 58 58
36985 - 2 2 6 22 22 22 90 90 90 46 46 46
36986 - 18 18 18 6 6 6 0 0 0 0 0 0
36987 - 0 0 0 0 0 0 0 0 0 0 0 0
36988 - 0 0 0 0 0 0 0 0 0 0 0 0
36989 - 0 0 0 0 0 0 0 0 0 0 0 0
36990 - 0 0 0 0 0 0 0 0 0 0 0 0
36991 - 0 0 0 0 0 0 0 0 0 0 0 0
36992 - 0 0 0 0 0 0 0 0 0 0 0 0
36993 - 0 0 0 0 0 0 0 0 0 0 0 0
36994 - 0 0 0 0 0 0 0 0 0 0 0 0
36995 - 0 0 0 0 0 0 0 0 0 0 0 0
36996 - 0 0 0 0 0 0 0 0 0 0 0 0
36997 - 0 0 0 0 0 0 0 0 0 0 0 0
36998 - 0 0 0 0 0 0 0 0 0 10 10 10
36999 - 38 38 38 86 86 86 50 50 50 6 6 6
37000 -128 128 128 174 154 114 156 107 11 168 122 10
37001 -198 155 10 184 144 12 197 138 11 200 144 11
37002 -206 145 10 206 145 10 197 138 11 188 164 115
37003 -195 195 195 198 198 198 174 174 174 14 14 14
37004 - 2 2 6 22 22 22 116 116 116 116 116 116
37005 - 22 22 22 2 2 6 74 74 74 70 70 70
37006 - 30 30 30 10 10 10 0 0 0 0 0 0
37007 - 0 0 0 0 0 0 0 0 0 0 0 0
37008 - 0 0 0 0 0 0 0 0 0 0 0 0
37009 - 0 0 0 0 0 0 0 0 0 0 0 0
37010 - 0 0 0 0 0 0 0 0 0 0 0 0
37011 - 0 0 0 0 0 0 0 0 0 0 0 0
37012 - 0 0 0 0 0 0 0 0 0 0 0 0
37013 - 0 0 0 0 0 0 0 0 0 0 0 0
37014 - 0 0 0 0 0 0 0 0 0 0 0 0
37015 - 0 0 0 0 0 0 0 0 0 0 0 0
37016 - 0 0 0 0 0 0 0 0 0 0 0 0
37017 - 0 0 0 0 0 0 0 0 0 0 0 0
37018 - 0 0 0 0 0 0 6 6 6 18 18 18
37019 - 50 50 50 101 101 101 26 26 26 10 10 10
37020 -138 138 138 190 190 190 174 154 114 156 107 11
37021 -197 138 11 200 144 11 197 138 11 192 133 9
37022 -180 123 7 190 142 34 190 178 144 187 187 187
37023 -202 202 202 221 221 221 214 214 214 66 66 66
37024 - 2 2 6 2 2 6 50 50 50 62 62 62
37025 - 6 6 6 2 2 6 10 10 10 90 90 90
37026 - 50 50 50 18 18 18 6 6 6 0 0 0
37027 - 0 0 0 0 0 0 0 0 0 0 0 0
37028 - 0 0 0 0 0 0 0 0 0 0 0 0
37029 - 0 0 0 0 0 0 0 0 0 0 0 0
37030 - 0 0 0 0 0 0 0 0 0 0 0 0
37031 - 0 0 0 0 0 0 0 0 0 0 0 0
37032 - 0 0 0 0 0 0 0 0 0 0 0 0
37033 - 0 0 0 0 0 0 0 0 0 0 0 0
37034 - 0 0 0 0 0 0 0 0 0 0 0 0
37035 - 0 0 0 0 0 0 0 0 0 0 0 0
37036 - 0 0 0 0 0 0 0 0 0 0 0 0
37037 - 0 0 0 0 0 0 0 0 0 0 0 0
37038 - 0 0 0 0 0 0 10 10 10 34 34 34
37039 - 74 74 74 74 74 74 2 2 6 6 6 6
37040 -144 144 144 198 198 198 190 190 190 178 166 146
37041 -154 121 60 156 107 11 156 107 11 168 124 44
37042 -174 154 114 187 187 187 190 190 190 210 210 210
37043 -246 246 246 253 253 253 253 253 253 182 182 182
37044 - 6 6 6 2 2 6 2 2 6 2 2 6
37045 - 2 2 6 2 2 6 2 2 6 62 62 62
37046 - 74 74 74 34 34 34 14 14 14 0 0 0
37047 - 0 0 0 0 0 0 0 0 0 0 0 0
37048 - 0 0 0 0 0 0 0 0 0 0 0 0
37049 - 0 0 0 0 0 0 0 0 0 0 0 0
37050 - 0 0 0 0 0 0 0 0 0 0 0 0
37051 - 0 0 0 0 0 0 0 0 0 0 0 0
37052 - 0 0 0 0 0 0 0 0 0 0 0 0
37053 - 0 0 0 0 0 0 0 0 0 0 0 0
37054 - 0 0 0 0 0 0 0 0 0 0 0 0
37055 - 0 0 0 0 0 0 0 0 0 0 0 0
37056 - 0 0 0 0 0 0 0 0 0 0 0 0
37057 - 0 0 0 0 0 0 0 0 0 0 0 0
37058 - 0 0 0 10 10 10 22 22 22 54 54 54
37059 - 94 94 94 18 18 18 2 2 6 46 46 46
37060 -234 234 234 221 221 221 190 190 190 190 190 190
37061 -190 190 190 187 187 187 187 187 187 190 190 190
37062 -190 190 190 195 195 195 214 214 214 242 242 242
37063 -253 253 253 253 253 253 253 253 253 253 253 253
37064 - 82 82 82 2 2 6 2 2 6 2 2 6
37065 - 2 2 6 2 2 6 2 2 6 14 14 14
37066 - 86 86 86 54 54 54 22 22 22 6 6 6
37067 - 0 0 0 0 0 0 0 0 0 0 0 0
37068 - 0 0 0 0 0 0 0 0 0 0 0 0
37069 - 0 0 0 0 0 0 0 0 0 0 0 0
37070 - 0 0 0 0 0 0 0 0 0 0 0 0
37071 - 0 0 0 0 0 0 0 0 0 0 0 0
37072 - 0 0 0 0 0 0 0 0 0 0 0 0
37073 - 0 0 0 0 0 0 0 0 0 0 0 0
37074 - 0 0 0 0 0 0 0 0 0 0 0 0
37075 - 0 0 0 0 0 0 0 0 0 0 0 0
37076 - 0 0 0 0 0 0 0 0 0 0 0 0
37077 - 0 0 0 0 0 0 0 0 0 0 0 0
37078 - 6 6 6 18 18 18 46 46 46 90 90 90
37079 - 46 46 46 18 18 18 6 6 6 182 182 182
37080 -253 253 253 246 246 246 206 206 206 190 190 190
37081 -190 190 190 190 190 190 190 190 190 190 190 190
37082 -206 206 206 231 231 231 250 250 250 253 253 253
37083 -253 253 253 253 253 253 253 253 253 253 253 253
37084 -202 202 202 14 14 14 2 2 6 2 2 6
37085 - 2 2 6 2 2 6 2 2 6 2 2 6
37086 - 42 42 42 86 86 86 42 42 42 18 18 18
37087 - 6 6 6 0 0 0 0 0 0 0 0 0
37088 - 0 0 0 0 0 0 0 0 0 0 0 0
37089 - 0 0 0 0 0 0 0 0 0 0 0 0
37090 - 0 0 0 0 0 0 0 0 0 0 0 0
37091 - 0 0 0 0 0 0 0 0 0 0 0 0
37092 - 0 0 0 0 0 0 0 0 0 0 0 0
37093 - 0 0 0 0 0 0 0 0 0 0 0 0
37094 - 0 0 0 0 0 0 0 0 0 0 0 0
37095 - 0 0 0 0 0 0 0 0 0 0 0 0
37096 - 0 0 0 0 0 0 0 0 0 0 0 0
37097 - 0 0 0 0 0 0 0 0 0 6 6 6
37098 - 14 14 14 38 38 38 74 74 74 66 66 66
37099 - 2 2 6 6 6 6 90 90 90 250 250 250
37100 -253 253 253 253 253 253 238 238 238 198 198 198
37101 -190 190 190 190 190 190 195 195 195 221 221 221
37102 -246 246 246 253 253 253 253 253 253 253 253 253
37103 -253 253 253 253 253 253 253 253 253 253 253 253
37104 -253 253 253 82 82 82 2 2 6 2 2 6
37105 - 2 2 6 2 2 6 2 2 6 2 2 6
37106 - 2 2 6 78 78 78 70 70 70 34 34 34
37107 - 14 14 14 6 6 6 0 0 0 0 0 0
37108 - 0 0 0 0 0 0 0 0 0 0 0 0
37109 - 0 0 0 0 0 0 0 0 0 0 0 0
37110 - 0 0 0 0 0 0 0 0 0 0 0 0
37111 - 0 0 0 0 0 0 0 0 0 0 0 0
37112 - 0 0 0 0 0 0 0 0 0 0 0 0
37113 - 0 0 0 0 0 0 0 0 0 0 0 0
37114 - 0 0 0 0 0 0 0 0 0 0 0 0
37115 - 0 0 0 0 0 0 0 0 0 0 0 0
37116 - 0 0 0 0 0 0 0 0 0 0 0 0
37117 - 0 0 0 0 0 0 0 0 0 14 14 14
37118 - 34 34 34 66 66 66 78 78 78 6 6 6
37119 - 2 2 6 18 18 18 218 218 218 253 253 253
37120 -253 253 253 253 253 253 253 253 253 246 246 246
37121 -226 226 226 231 231 231 246 246 246 253 253 253
37122 -253 253 253 253 253 253 253 253 253 253 253 253
37123 -253 253 253 253 253 253 253 253 253 253 253 253
37124 -253 253 253 178 178 178 2 2 6 2 2 6
37125 - 2 2 6 2 2 6 2 2 6 2 2 6
37126 - 2 2 6 18 18 18 90 90 90 62 62 62
37127 - 30 30 30 10 10 10 0 0 0 0 0 0
37128 - 0 0 0 0 0 0 0 0 0 0 0 0
37129 - 0 0 0 0 0 0 0 0 0 0 0 0
37130 - 0 0 0 0 0 0 0 0 0 0 0 0
37131 - 0 0 0 0 0 0 0 0 0 0 0 0
37132 - 0 0 0 0 0 0 0 0 0 0 0 0
37133 - 0 0 0 0 0 0 0 0 0 0 0 0
37134 - 0 0 0 0 0 0 0 0 0 0 0 0
37135 - 0 0 0 0 0 0 0 0 0 0 0 0
37136 - 0 0 0 0 0 0 0 0 0 0 0 0
37137 - 0 0 0 0 0 0 10 10 10 26 26 26
37138 - 58 58 58 90 90 90 18 18 18 2 2 6
37139 - 2 2 6 110 110 110 253 253 253 253 253 253
37140 -253 253 253 253 253 253 253 253 253 253 253 253
37141 -250 250 250 253 253 253 253 253 253 253 253 253
37142 -253 253 253 253 253 253 253 253 253 253 253 253
37143 -253 253 253 253 253 253 253 253 253 253 253 253
37144 -253 253 253 231 231 231 18 18 18 2 2 6
37145 - 2 2 6 2 2 6 2 2 6 2 2 6
37146 - 2 2 6 2 2 6 18 18 18 94 94 94
37147 - 54 54 54 26 26 26 10 10 10 0 0 0
37148 - 0 0 0 0 0 0 0 0 0 0 0 0
37149 - 0 0 0 0 0 0 0 0 0 0 0 0
37150 - 0 0 0 0 0 0 0 0 0 0 0 0
37151 - 0 0 0 0 0 0 0 0 0 0 0 0
37152 - 0 0 0 0 0 0 0 0 0 0 0 0
37153 - 0 0 0 0 0 0 0 0 0 0 0 0
37154 - 0 0 0 0 0 0 0 0 0 0 0 0
37155 - 0 0 0 0 0 0 0 0 0 0 0 0
37156 - 0 0 0 0 0 0 0 0 0 0 0 0
37157 - 0 0 0 6 6 6 22 22 22 50 50 50
37158 - 90 90 90 26 26 26 2 2 6 2 2 6
37159 - 14 14 14 195 195 195 250 250 250 253 253 253
37160 -253 253 253 253 253 253 253 253 253 253 253 253
37161 -253 253 253 253 253 253 253 253 253 253 253 253
37162 -253 253 253 253 253 253 253 253 253 253 253 253
37163 -253 253 253 253 253 253 253 253 253 253 253 253
37164 -250 250 250 242 242 242 54 54 54 2 2 6
37165 - 2 2 6 2 2 6 2 2 6 2 2 6
37166 - 2 2 6 2 2 6 2 2 6 38 38 38
37167 - 86 86 86 50 50 50 22 22 22 6 6 6
37168 - 0 0 0 0 0 0 0 0 0 0 0 0
37169 - 0 0 0 0 0 0 0 0 0 0 0 0
37170 - 0 0 0 0 0 0 0 0 0 0 0 0
37171 - 0 0 0 0 0 0 0 0 0 0 0 0
37172 - 0 0 0 0 0 0 0 0 0 0 0 0
37173 - 0 0 0 0 0 0 0 0 0 0 0 0
37174 - 0 0 0 0 0 0 0 0 0 0 0 0
37175 - 0 0 0 0 0 0 0 0 0 0 0 0
37176 - 0 0 0 0 0 0 0 0 0 0 0 0
37177 - 6 6 6 14 14 14 38 38 38 82 82 82
37178 - 34 34 34 2 2 6 2 2 6 2 2 6
37179 - 42 42 42 195 195 195 246 246 246 253 253 253
37180 -253 253 253 253 253 253 253 253 253 250 250 250
37181 -242 242 242 242 242 242 250 250 250 253 253 253
37182 -253 253 253 253 253 253 253 253 253 253 253 253
37183 -253 253 253 250 250 250 246 246 246 238 238 238
37184 -226 226 226 231 231 231 101 101 101 6 6 6
37185 - 2 2 6 2 2 6 2 2 6 2 2 6
37186 - 2 2 6 2 2 6 2 2 6 2 2 6
37187 - 38 38 38 82 82 82 42 42 42 14 14 14
37188 - 6 6 6 0 0 0 0 0 0 0 0 0
37189 - 0 0 0 0 0 0 0 0 0 0 0 0
37190 - 0 0 0 0 0 0 0 0 0 0 0 0
37191 - 0 0 0 0 0 0 0 0 0 0 0 0
37192 - 0 0 0 0 0 0 0 0 0 0 0 0
37193 - 0 0 0 0 0 0 0 0 0 0 0 0
37194 - 0 0 0 0 0 0 0 0 0 0 0 0
37195 - 0 0 0 0 0 0 0 0 0 0 0 0
37196 - 0 0 0 0 0 0 0 0 0 0 0 0
37197 - 10 10 10 26 26 26 62 62 62 66 66 66
37198 - 2 2 6 2 2 6 2 2 6 6 6 6
37199 - 70 70 70 170 170 170 206 206 206 234 234 234
37200 -246 246 246 250 250 250 250 250 250 238 238 238
37201 -226 226 226 231 231 231 238 238 238 250 250 250
37202 -250 250 250 250 250 250 246 246 246 231 231 231
37203 -214 214 214 206 206 206 202 202 202 202 202 202
37204 -198 198 198 202 202 202 182 182 182 18 18 18
37205 - 2 2 6 2 2 6 2 2 6 2 2 6
37206 - 2 2 6 2 2 6 2 2 6 2 2 6
37207 - 2 2 6 62 62 62 66 66 66 30 30 30
37208 - 10 10 10 0 0 0 0 0 0 0 0 0
37209 - 0 0 0 0 0 0 0 0 0 0 0 0
37210 - 0 0 0 0 0 0 0 0 0 0 0 0
37211 - 0 0 0 0 0 0 0 0 0 0 0 0
37212 - 0 0 0 0 0 0 0 0 0 0 0 0
37213 - 0 0 0 0 0 0 0 0 0 0 0 0
37214 - 0 0 0 0 0 0 0 0 0 0 0 0
37215 - 0 0 0 0 0 0 0 0 0 0 0 0
37216 - 0 0 0 0 0 0 0 0 0 0 0 0
37217 - 14 14 14 42 42 42 82 82 82 18 18 18
37218 - 2 2 6 2 2 6 2 2 6 10 10 10
37219 - 94 94 94 182 182 182 218 218 218 242 242 242
37220 -250 250 250 253 253 253 253 253 253 250 250 250
37221 -234 234 234 253 253 253 253 253 253 253 253 253
37222 -253 253 253 253 253 253 253 253 253 246 246 246
37223 -238 238 238 226 226 226 210 210 210 202 202 202
37224 -195 195 195 195 195 195 210 210 210 158 158 158
37225 - 6 6 6 14 14 14 50 50 50 14 14 14
37226 - 2 2 6 2 2 6 2 2 6 2 2 6
37227 - 2 2 6 6 6 6 86 86 86 46 46 46
37228 - 18 18 18 6 6 6 0 0 0 0 0 0
37229 - 0 0 0 0 0 0 0 0 0 0 0 0
37230 - 0 0 0 0 0 0 0 0 0 0 0 0
37231 - 0 0 0 0 0 0 0 0 0 0 0 0
37232 - 0 0 0 0 0 0 0 0 0 0 0 0
37233 - 0 0 0 0 0 0 0 0 0 0 0 0
37234 - 0 0 0 0 0 0 0 0 0 0 0 0
37235 - 0 0 0 0 0 0 0 0 0 0 0 0
37236 - 0 0 0 0 0 0 0 0 0 6 6 6
37237 - 22 22 22 54 54 54 70 70 70 2 2 6
37238 - 2 2 6 10 10 10 2 2 6 22 22 22
37239 -166 166 166 231 231 231 250 250 250 253 253 253
37240 -253 253 253 253 253 253 253 253 253 250 250 250
37241 -242 242 242 253 253 253 253 253 253 253 253 253
37242 -253 253 253 253 253 253 253 253 253 253 253 253
37243 -253 253 253 253 253 253 253 253 253 246 246 246
37244 -231 231 231 206 206 206 198 198 198 226 226 226
37245 - 94 94 94 2 2 6 6 6 6 38 38 38
37246 - 30 30 30 2 2 6 2 2 6 2 2 6
37247 - 2 2 6 2 2 6 62 62 62 66 66 66
37248 - 26 26 26 10 10 10 0 0 0 0 0 0
37249 - 0 0 0 0 0 0 0 0 0 0 0 0
37250 - 0 0 0 0 0 0 0 0 0 0 0 0
37251 - 0 0 0 0 0 0 0 0 0 0 0 0
37252 - 0 0 0 0 0 0 0 0 0 0 0 0
37253 - 0 0 0 0 0 0 0 0 0 0 0 0
37254 - 0 0 0 0 0 0 0 0 0 0 0 0
37255 - 0 0 0 0 0 0 0 0 0 0 0 0
37256 - 0 0 0 0 0 0 0 0 0 10 10 10
37257 - 30 30 30 74 74 74 50 50 50 2 2 6
37258 - 26 26 26 26 26 26 2 2 6 106 106 106
37259 -238 238 238 253 253 253 253 253 253 253 253 253
37260 -253 253 253 253 253 253 253 253 253 253 253 253
37261 -253 253 253 253 253 253 253 253 253 253 253 253
37262 -253 253 253 253 253 253 253 253 253 253 253 253
37263 -253 253 253 253 253 253 253 253 253 253 253 253
37264 -253 253 253 246 246 246 218 218 218 202 202 202
37265 -210 210 210 14 14 14 2 2 6 2 2 6
37266 - 30 30 30 22 22 22 2 2 6 2 2 6
37267 - 2 2 6 2 2 6 18 18 18 86 86 86
37268 - 42 42 42 14 14 14 0 0 0 0 0 0
37269 - 0 0 0 0 0 0 0 0 0 0 0 0
37270 - 0 0 0 0 0 0 0 0 0 0 0 0
37271 - 0 0 0 0 0 0 0 0 0 0 0 0
37272 - 0 0 0 0 0 0 0 0 0 0 0 0
37273 - 0 0 0 0 0 0 0 0 0 0 0 0
37274 - 0 0 0 0 0 0 0 0 0 0 0 0
37275 - 0 0 0 0 0 0 0 0 0 0 0 0
37276 - 0 0 0 0 0 0 0 0 0 14 14 14
37277 - 42 42 42 90 90 90 22 22 22 2 2 6
37278 - 42 42 42 2 2 6 18 18 18 218 218 218
37279 -253 253 253 253 253 253 253 253 253 253 253 253
37280 -253 253 253 253 253 253 253 253 253 253 253 253
37281 -253 253 253 253 253 253 253 253 253 253 253 253
37282 -253 253 253 253 253 253 253 253 253 253 253 253
37283 -253 253 253 253 253 253 253 253 253 253 253 253
37284 -253 253 253 253 253 253 250 250 250 221 221 221
37285 -218 218 218 101 101 101 2 2 6 14 14 14
37286 - 18 18 18 38 38 38 10 10 10 2 2 6
37287 - 2 2 6 2 2 6 2 2 6 78 78 78
37288 - 58 58 58 22 22 22 6 6 6 0 0 0
37289 - 0 0 0 0 0 0 0 0 0 0 0 0
37290 - 0 0 0 0 0 0 0 0 0 0 0 0
37291 - 0 0 0 0 0 0 0 0 0 0 0 0
37292 - 0 0 0 0 0 0 0 0 0 0 0 0
37293 - 0 0 0 0 0 0 0 0 0 0 0 0
37294 - 0 0 0 0 0 0 0 0 0 0 0 0
37295 - 0 0 0 0 0 0 0 0 0 0 0 0
37296 - 0 0 0 0 0 0 6 6 6 18 18 18
37297 - 54 54 54 82 82 82 2 2 6 26 26 26
37298 - 22 22 22 2 2 6 123 123 123 253 253 253
37299 -253 253 253 253 253 253 253 253 253 253 253 253
37300 -253 253 253 253 253 253 253 253 253 253 253 253
37301 -253 253 253 253 253 253 253 253 253 253 253 253
37302 -253 253 253 253 253 253 253 253 253 253 253 253
37303 -253 253 253 253 253 253 253 253 253 253 253 253
37304 -253 253 253 253 253 253 253 253 253 250 250 250
37305 -238 238 238 198 198 198 6 6 6 38 38 38
37306 - 58 58 58 26 26 26 38 38 38 2 2 6
37307 - 2 2 6 2 2 6 2 2 6 46 46 46
37308 - 78 78 78 30 30 30 10 10 10 0 0 0
37309 - 0 0 0 0 0 0 0 0 0 0 0 0
37310 - 0 0 0 0 0 0 0 0 0 0 0 0
37311 - 0 0 0 0 0 0 0 0 0 0 0 0
37312 - 0 0 0 0 0 0 0 0 0 0 0 0
37313 - 0 0 0 0 0 0 0 0 0 0 0 0
37314 - 0 0 0 0 0 0 0 0 0 0 0 0
37315 - 0 0 0 0 0 0 0 0 0 0 0 0
37316 - 0 0 0 0 0 0 10 10 10 30 30 30
37317 - 74 74 74 58 58 58 2 2 6 42 42 42
37318 - 2 2 6 22 22 22 231 231 231 253 253 253
37319 -253 253 253 253 253 253 253 253 253 253 253 253
37320 -253 253 253 253 253 253 253 253 253 250 250 250
37321 -253 253 253 253 253 253 253 253 253 253 253 253
37322 -253 253 253 253 253 253 253 253 253 253 253 253
37323 -253 253 253 253 253 253 253 253 253 253 253 253
37324 -253 253 253 253 253 253 253 253 253 253 253 253
37325 -253 253 253 246 246 246 46 46 46 38 38 38
37326 - 42 42 42 14 14 14 38 38 38 14 14 14
37327 - 2 2 6 2 2 6 2 2 6 6 6 6
37328 - 86 86 86 46 46 46 14 14 14 0 0 0
37329 - 0 0 0 0 0 0 0 0 0 0 0 0
37330 - 0 0 0 0 0 0 0 0 0 0 0 0
37331 - 0 0 0 0 0 0 0 0 0 0 0 0
37332 - 0 0 0 0 0 0 0 0 0 0 0 0
37333 - 0 0 0 0 0 0 0 0 0 0 0 0
37334 - 0 0 0 0 0 0 0 0 0 0 0 0
37335 - 0 0 0 0 0 0 0 0 0 0 0 0
37336 - 0 0 0 6 6 6 14 14 14 42 42 42
37337 - 90 90 90 18 18 18 18 18 18 26 26 26
37338 - 2 2 6 116 116 116 253 253 253 253 253 253
37339 -253 253 253 253 253 253 253 253 253 253 253 253
37340 -253 253 253 253 253 253 250 250 250 238 238 238
37341 -253 253 253 253 253 253 253 253 253 253 253 253
37342 -253 253 253 253 253 253 253 253 253 253 253 253
37343 -253 253 253 253 253 253 253 253 253 253 253 253
37344 -253 253 253 253 253 253 253 253 253 253 253 253
37345 -253 253 253 253 253 253 94 94 94 6 6 6
37346 - 2 2 6 2 2 6 10 10 10 34 34 34
37347 - 2 2 6 2 2 6 2 2 6 2 2 6
37348 - 74 74 74 58 58 58 22 22 22 6 6 6
37349 - 0 0 0 0 0 0 0 0 0 0 0 0
37350 - 0 0 0 0 0 0 0 0 0 0 0 0
37351 - 0 0 0 0 0 0 0 0 0 0 0 0
37352 - 0 0 0 0 0 0 0 0 0 0 0 0
37353 - 0 0 0 0 0 0 0 0 0 0 0 0
37354 - 0 0 0 0 0 0 0 0 0 0 0 0
37355 - 0 0 0 0 0 0 0 0 0 0 0 0
37356 - 0 0 0 10 10 10 26 26 26 66 66 66
37357 - 82 82 82 2 2 6 38 38 38 6 6 6
37358 - 14 14 14 210 210 210 253 253 253 253 253 253
37359 -253 253 253 253 253 253 253 253 253 253 253 253
37360 -253 253 253 253 253 253 246 246 246 242 242 242
37361 -253 253 253 253 253 253 253 253 253 253 253 253
37362 -253 253 253 253 253 253 253 253 253 253 253 253
37363 -253 253 253 253 253 253 253 253 253 253 253 253
37364 -253 253 253 253 253 253 253 253 253 253 253 253
37365 -253 253 253 253 253 253 144 144 144 2 2 6
37366 - 2 2 6 2 2 6 2 2 6 46 46 46
37367 - 2 2 6 2 2 6 2 2 6 2 2 6
37368 - 42 42 42 74 74 74 30 30 30 10 10 10
37369 - 0 0 0 0 0 0 0 0 0 0 0 0
37370 - 0 0 0 0 0 0 0 0 0 0 0 0
37371 - 0 0 0 0 0 0 0 0 0 0 0 0
37372 - 0 0 0 0 0 0 0 0 0 0 0 0
37373 - 0 0 0 0 0 0 0 0 0 0 0 0
37374 - 0 0 0 0 0 0 0 0 0 0 0 0
37375 - 0 0 0 0 0 0 0 0 0 0 0 0
37376 - 6 6 6 14 14 14 42 42 42 90 90 90
37377 - 26 26 26 6 6 6 42 42 42 2 2 6
37378 - 74 74 74 250 250 250 253 253 253 253 253 253
37379 -253 253 253 253 253 253 253 253 253 253 253 253
37380 -253 253 253 253 253 253 242 242 242 242 242 242
37381 -253 253 253 253 253 253 253 253 253 253 253 253
37382 -253 253 253 253 253 253 253 253 253 253 253 253
37383 -253 253 253 253 253 253 253 253 253 253 253 253
37384 -253 253 253 253 253 253 253 253 253 253 253 253
37385 -253 253 253 253 253 253 182 182 182 2 2 6
37386 - 2 2 6 2 2 6 2 2 6 46 46 46
37387 - 2 2 6 2 2 6 2 2 6 2 2 6
37388 - 10 10 10 86 86 86 38 38 38 10 10 10
37389 - 0 0 0 0 0 0 0 0 0 0 0 0
37390 - 0 0 0 0 0 0 0 0 0 0 0 0
37391 - 0 0 0 0 0 0 0 0 0 0 0 0
37392 - 0 0 0 0 0 0 0 0 0 0 0 0
37393 - 0 0 0 0 0 0 0 0 0 0 0 0
37394 - 0 0 0 0 0 0 0 0 0 0 0 0
37395 - 0 0 0 0 0 0 0 0 0 0 0 0
37396 - 10 10 10 26 26 26 66 66 66 82 82 82
37397 - 2 2 6 22 22 22 18 18 18 2 2 6
37398 -149 149 149 253 253 253 253 253 253 253 253 253
37399 -253 253 253 253 253 253 253 253 253 253 253 253
37400 -253 253 253 253 253 253 234 234 234 242 242 242
37401 -253 253 253 253 253 253 253 253 253 253 253 253
37402 -253 253 253 253 253 253 253 253 253 253 253 253
37403 -253 253 253 253 253 253 253 253 253 253 253 253
37404 -253 253 253 253 253 253 253 253 253 253 253 253
37405 -253 253 253 253 253 253 206 206 206 2 2 6
37406 - 2 2 6 2 2 6 2 2 6 38 38 38
37407 - 2 2 6 2 2 6 2 2 6 2 2 6
37408 - 6 6 6 86 86 86 46 46 46 14 14 14
37409 - 0 0 0 0 0 0 0 0 0 0 0 0
37410 - 0 0 0 0 0 0 0 0 0 0 0 0
37411 - 0 0 0 0 0 0 0 0 0 0 0 0
37412 - 0 0 0 0 0 0 0 0 0 0 0 0
37413 - 0 0 0 0 0 0 0 0 0 0 0 0
37414 - 0 0 0 0 0 0 0 0 0 0 0 0
37415 - 0 0 0 0 0 0 0 0 0 6 6 6
37416 - 18 18 18 46 46 46 86 86 86 18 18 18
37417 - 2 2 6 34 34 34 10 10 10 6 6 6
37418 -210 210 210 253 253 253 253 253 253 253 253 253
37419 -253 253 253 253 253 253 253 253 253 253 253 253
37420 -253 253 253 253 253 253 234 234 234 242 242 242
37421 -253 253 253 253 253 253 253 253 253 253 253 253
37422 -253 253 253 253 253 253 253 253 253 253 253 253
37423 -253 253 253 253 253 253 253 253 253 253 253 253
37424 -253 253 253 253 253 253 253 253 253 253 253 253
37425 -253 253 253 253 253 253 221 221 221 6 6 6
37426 - 2 2 6 2 2 6 6 6 6 30 30 30
37427 - 2 2 6 2 2 6 2 2 6 2 2 6
37428 - 2 2 6 82 82 82 54 54 54 18 18 18
37429 - 6 6 6 0 0 0 0 0 0 0 0 0
37430 - 0 0 0 0 0 0 0 0 0 0 0 0
37431 - 0 0 0 0 0 0 0 0 0 0 0 0
37432 - 0 0 0 0 0 0 0 0 0 0 0 0
37433 - 0 0 0 0 0 0 0 0 0 0 0 0
37434 - 0 0 0 0 0 0 0 0 0 0 0 0
37435 - 0 0 0 0 0 0 0 0 0 10 10 10
37436 - 26 26 26 66 66 66 62 62 62 2 2 6
37437 - 2 2 6 38 38 38 10 10 10 26 26 26
37438 -238 238 238 253 253 253 253 253 253 253 253 253
37439 -253 253 253 253 253 253 253 253 253 253 253 253
37440 -253 253 253 253 253 253 231 231 231 238 238 238
37441 -253 253 253 253 253 253 253 253 253 253 253 253
37442 -253 253 253 253 253 253 253 253 253 253 253 253
37443 -253 253 253 253 253 253 253 253 253 253 253 253
37444 -253 253 253 253 253 253 253 253 253 253 253 253
37445 -253 253 253 253 253 253 231 231 231 6 6 6
37446 - 2 2 6 2 2 6 10 10 10 30 30 30
37447 - 2 2 6 2 2 6 2 2 6 2 2 6
37448 - 2 2 6 66 66 66 58 58 58 22 22 22
37449 - 6 6 6 0 0 0 0 0 0 0 0 0
37450 - 0 0 0 0 0 0 0 0 0 0 0 0
37451 - 0 0 0 0 0 0 0 0 0 0 0 0
37452 - 0 0 0 0 0 0 0 0 0 0 0 0
37453 - 0 0 0 0 0 0 0 0 0 0 0 0
37454 - 0 0 0 0 0 0 0 0 0 0 0 0
37455 - 0 0 0 0 0 0 0 0 0 10 10 10
37456 - 38 38 38 78 78 78 6 6 6 2 2 6
37457 - 2 2 6 46 46 46 14 14 14 42 42 42
37458 -246 246 246 253 253 253 253 253 253 253 253 253
37459 -253 253 253 253 253 253 253 253 253 253 253 253
37460 -253 253 253 253 253 253 231 231 231 242 242 242
37461 -253 253 253 253 253 253 253 253 253 253 253 253
37462 -253 253 253 253 253 253 253 253 253 253 253 253
37463 -253 253 253 253 253 253 253 253 253 253 253 253
37464 -253 253 253 253 253 253 253 253 253 253 253 253
37465 -253 253 253 253 253 253 234 234 234 10 10 10
37466 - 2 2 6 2 2 6 22 22 22 14 14 14
37467 - 2 2 6 2 2 6 2 2 6 2 2 6
37468 - 2 2 6 66 66 66 62 62 62 22 22 22
37469 - 6 6 6 0 0 0 0 0 0 0 0 0
37470 - 0 0 0 0 0 0 0 0 0 0 0 0
37471 - 0 0 0 0 0 0 0 0 0 0 0 0
37472 - 0 0 0 0 0 0 0 0 0 0 0 0
37473 - 0 0 0 0 0 0 0 0 0 0 0 0
37474 - 0 0 0 0 0 0 0 0 0 0 0 0
37475 - 0 0 0 0 0 0 6 6 6 18 18 18
37476 - 50 50 50 74 74 74 2 2 6 2 2 6
37477 - 14 14 14 70 70 70 34 34 34 62 62 62
37478 -250 250 250 253 253 253 253 253 253 253 253 253
37479 -253 253 253 253 253 253 253 253 253 253 253 253
37480 -253 253 253 253 253 253 231 231 231 246 246 246
37481 -253 253 253 253 253 253 253 253 253 253 253 253
37482 -253 253 253 253 253 253 253 253 253 253 253 253
37483 -253 253 253 253 253 253 253 253 253 253 253 253
37484 -253 253 253 253 253 253 253 253 253 253 253 253
37485 -253 253 253 253 253 253 234 234 234 14 14 14
37486 - 2 2 6 2 2 6 30 30 30 2 2 6
37487 - 2 2 6 2 2 6 2 2 6 2 2 6
37488 - 2 2 6 66 66 66 62 62 62 22 22 22
37489 - 6 6 6 0 0 0 0 0 0 0 0 0
37490 - 0 0 0 0 0 0 0 0 0 0 0 0
37491 - 0 0 0 0 0 0 0 0 0 0 0 0
37492 - 0 0 0 0 0 0 0 0 0 0 0 0
37493 - 0 0 0 0 0 0 0 0 0 0 0 0
37494 - 0 0 0 0 0 0 0 0 0 0 0 0
37495 - 0 0 0 0 0 0 6 6 6 18 18 18
37496 - 54 54 54 62 62 62 2 2 6 2 2 6
37497 - 2 2 6 30 30 30 46 46 46 70 70 70
37498 -250 250 250 253 253 253 253 253 253 253 253 253
37499 -253 253 253 253 253 253 253 253 253 253 253 253
37500 -253 253 253 253 253 253 231 231 231 246 246 246
37501 -253 253 253 253 253 253 253 253 253 253 253 253
37502 -253 253 253 253 253 253 253 253 253 253 253 253
37503 -253 253 253 253 253 253 253 253 253 253 253 253
37504 -253 253 253 253 253 253 253 253 253 253 253 253
37505 -253 253 253 253 253 253 226 226 226 10 10 10
37506 - 2 2 6 6 6 6 30 30 30 2 2 6
37507 - 2 2 6 2 2 6 2 2 6 2 2 6
37508 - 2 2 6 66 66 66 58 58 58 22 22 22
37509 - 6 6 6 0 0 0 0 0 0 0 0 0
37510 - 0 0 0 0 0 0 0 0 0 0 0 0
37511 - 0 0 0 0 0 0 0 0 0 0 0 0
37512 - 0 0 0 0 0 0 0 0 0 0 0 0
37513 - 0 0 0 0 0 0 0 0 0 0 0 0
37514 - 0 0 0 0 0 0 0 0 0 0 0 0
37515 - 0 0 0 0 0 0 6 6 6 22 22 22
37516 - 58 58 58 62 62 62 2 2 6 2 2 6
37517 - 2 2 6 2 2 6 30 30 30 78 78 78
37518 -250 250 250 253 253 253 253 253 253 253 253 253
37519 -253 253 253 253 253 253 253 253 253 253 253 253
37520 -253 253 253 253 253 253 231 231 231 246 246 246
37521 -253 253 253 253 253 253 253 253 253 253 253 253
37522 -253 253 253 253 253 253 253 253 253 253 253 253
37523 -253 253 253 253 253 253 253 253 253 253 253 253
37524 -253 253 253 253 253 253 253 253 253 253 253 253
37525 -253 253 253 253 253 253 206 206 206 2 2 6
37526 - 22 22 22 34 34 34 18 14 6 22 22 22
37527 - 26 26 26 18 18 18 6 6 6 2 2 6
37528 - 2 2 6 82 82 82 54 54 54 18 18 18
37529 - 6 6 6 0 0 0 0 0 0 0 0 0
37530 - 0 0 0 0 0 0 0 0 0 0 0 0
37531 - 0 0 0 0 0 0 0 0 0 0 0 0
37532 - 0 0 0 0 0 0 0 0 0 0 0 0
37533 - 0 0 0 0 0 0 0 0 0 0 0 0
37534 - 0 0 0 0 0 0 0 0 0 0 0 0
37535 - 0 0 0 0 0 0 6 6 6 26 26 26
37536 - 62 62 62 106 106 106 74 54 14 185 133 11
37537 -210 162 10 121 92 8 6 6 6 62 62 62
37538 -238 238 238 253 253 253 253 253 253 253 253 253
37539 -253 253 253 253 253 253 253 253 253 253 253 253
37540 -253 253 253 253 253 253 231 231 231 246 246 246
37541 -253 253 253 253 253 253 253 253 253 253 253 253
37542 -253 253 253 253 253 253 253 253 253 253 253 253
37543 -253 253 253 253 253 253 253 253 253 253 253 253
37544 -253 253 253 253 253 253 253 253 253 253 253 253
37545 -253 253 253 253 253 253 158 158 158 18 18 18
37546 - 14 14 14 2 2 6 2 2 6 2 2 6
37547 - 6 6 6 18 18 18 66 66 66 38 38 38
37548 - 6 6 6 94 94 94 50 50 50 18 18 18
37549 - 6 6 6 0 0 0 0 0 0 0 0 0
37550 - 0 0 0 0 0 0 0 0 0 0 0 0
37551 - 0 0 0 0 0 0 0 0 0 0 0 0
37552 - 0 0 0 0 0 0 0 0 0 0 0 0
37553 - 0 0 0 0 0 0 0 0 0 0 0 0
37554 - 0 0 0 0 0 0 0 0 0 6 6 6
37555 - 10 10 10 10 10 10 18 18 18 38 38 38
37556 - 78 78 78 142 134 106 216 158 10 242 186 14
37557 -246 190 14 246 190 14 156 118 10 10 10 10
37558 - 90 90 90 238 238 238 253 253 253 253 253 253
37559 -253 253 253 253 253 253 253 253 253 253 253 253
37560 -253 253 253 253 253 253 231 231 231 250 250 250
37561 -253 253 253 253 253 253 253 253 253 253 253 253
37562 -253 253 253 253 253 253 253 253 253 253 253 253
37563 -253 253 253 253 253 253 253 253 253 253 253 253
37564 -253 253 253 253 253 253 253 253 253 246 230 190
37565 -238 204 91 238 204 91 181 142 44 37 26 9
37566 - 2 2 6 2 2 6 2 2 6 2 2 6
37567 - 2 2 6 2 2 6 38 38 38 46 46 46
37568 - 26 26 26 106 106 106 54 54 54 18 18 18
37569 - 6 6 6 0 0 0 0 0 0 0 0 0
37570 - 0 0 0 0 0 0 0 0 0 0 0 0
37571 - 0 0 0 0 0 0 0 0 0 0 0 0
37572 - 0 0 0 0 0 0 0 0 0 0 0 0
37573 - 0 0 0 0 0 0 0 0 0 0 0 0
37574 - 0 0 0 6 6 6 14 14 14 22 22 22
37575 - 30 30 30 38 38 38 50 50 50 70 70 70
37576 -106 106 106 190 142 34 226 170 11 242 186 14
37577 -246 190 14 246 190 14 246 190 14 154 114 10
37578 - 6 6 6 74 74 74 226 226 226 253 253 253
37579 -253 253 253 253 253 253 253 253 253 253 253 253
37580 -253 253 253 253 253 253 231 231 231 250 250 250
37581 -253 253 253 253 253 253 253 253 253 253 253 253
37582 -253 253 253 253 253 253 253 253 253 253 253 253
37583 -253 253 253 253 253 253 253 253 253 253 253 253
37584 -253 253 253 253 253 253 253 253 253 228 184 62
37585 -241 196 14 241 208 19 232 195 16 38 30 10
37586 - 2 2 6 2 2 6 2 2 6 2 2 6
37587 - 2 2 6 6 6 6 30 30 30 26 26 26
37588 -203 166 17 154 142 90 66 66 66 26 26 26
37589 - 6 6 6 0 0 0 0 0 0 0 0 0
37590 - 0 0 0 0 0 0 0 0 0 0 0 0
37591 - 0 0 0 0 0 0 0 0 0 0 0 0
37592 - 0 0 0 0 0 0 0 0 0 0 0 0
37593 - 0 0 0 0 0 0 0 0 0 0 0 0
37594 - 6 6 6 18 18 18 38 38 38 58 58 58
37595 - 78 78 78 86 86 86 101 101 101 123 123 123
37596 -175 146 61 210 150 10 234 174 13 246 186 14
37597 -246 190 14 246 190 14 246 190 14 238 190 10
37598 -102 78 10 2 2 6 46 46 46 198 198 198
37599 -253 253 253 253 253 253 253 253 253 253 253 253
37600 -253 253 253 253 253 253 234 234 234 242 242 242
37601 -253 253 253 253 253 253 253 253 253 253 253 253
37602 -253 253 253 253 253 253 253 253 253 253 253 253
37603 -253 253 253 253 253 253 253 253 253 253 253 253
37604 -253 253 253 253 253 253 253 253 253 224 178 62
37605 -242 186 14 241 196 14 210 166 10 22 18 6
37606 - 2 2 6 2 2 6 2 2 6 2 2 6
37607 - 2 2 6 2 2 6 6 6 6 121 92 8
37608 -238 202 15 232 195 16 82 82 82 34 34 34
37609 - 10 10 10 0 0 0 0 0 0 0 0 0
37610 - 0 0 0 0 0 0 0 0 0 0 0 0
37611 - 0 0 0 0 0 0 0 0 0 0 0 0
37612 - 0 0 0 0 0 0 0 0 0 0 0 0
37613 - 0 0 0 0 0 0 0 0 0 0 0 0
37614 - 14 14 14 38 38 38 70 70 70 154 122 46
37615 -190 142 34 200 144 11 197 138 11 197 138 11
37616 -213 154 11 226 170 11 242 186 14 246 190 14
37617 -246 190 14 246 190 14 246 190 14 246 190 14
37618 -225 175 15 46 32 6 2 2 6 22 22 22
37619 -158 158 158 250 250 250 253 253 253 253 253 253
37620 -253 253 253 253 253 253 253 253 253 253 253 253
37621 -253 253 253 253 253 253 253 253 253 253 253 253
37622 -253 253 253 253 253 253 253 253 253 253 253 253
37623 -253 253 253 253 253 253 253 253 253 253 253 253
37624 -253 253 253 250 250 250 242 242 242 224 178 62
37625 -239 182 13 236 186 11 213 154 11 46 32 6
37626 - 2 2 6 2 2 6 2 2 6 2 2 6
37627 - 2 2 6 2 2 6 61 42 6 225 175 15
37628 -238 190 10 236 186 11 112 100 78 42 42 42
37629 - 14 14 14 0 0 0 0 0 0 0 0 0
37630 - 0 0 0 0 0 0 0 0 0 0 0 0
37631 - 0 0 0 0 0 0 0 0 0 0 0 0
37632 - 0 0 0 0 0 0 0 0 0 0 0 0
37633 - 0 0 0 0 0 0 0 0 0 6 6 6
37634 - 22 22 22 54 54 54 154 122 46 213 154 11
37635 -226 170 11 230 174 11 226 170 11 226 170 11
37636 -236 178 12 242 186 14 246 190 14 246 190 14
37637 -246 190 14 246 190 14 246 190 14 246 190 14
37638 -241 196 14 184 144 12 10 10 10 2 2 6
37639 - 6 6 6 116 116 116 242 242 242 253 253 253
37640 -253 253 253 253 253 253 253 253 253 253 253 253
37641 -253 253 253 253 253 253 253 253 253 253 253 253
37642 -253 253 253 253 253 253 253 253 253 253 253 253
37643 -253 253 253 253 253 253 253 253 253 253 253 253
37644 -253 253 253 231 231 231 198 198 198 214 170 54
37645 -236 178 12 236 178 12 210 150 10 137 92 6
37646 - 18 14 6 2 2 6 2 2 6 2 2 6
37647 - 6 6 6 70 47 6 200 144 11 236 178 12
37648 -239 182 13 239 182 13 124 112 88 58 58 58
37649 - 22 22 22 6 6 6 0 0 0 0 0 0
37650 - 0 0 0 0 0 0 0 0 0 0 0 0
37651 - 0 0 0 0 0 0 0 0 0 0 0 0
37652 - 0 0 0 0 0 0 0 0 0 0 0 0
37653 - 0 0 0 0 0 0 0 0 0 10 10 10
37654 - 30 30 30 70 70 70 180 133 36 226 170 11
37655 -239 182 13 242 186 14 242 186 14 246 186 14
37656 -246 190 14 246 190 14 246 190 14 246 190 14
37657 -246 190 14 246 190 14 246 190 14 246 190 14
37658 -246 190 14 232 195 16 98 70 6 2 2 6
37659 - 2 2 6 2 2 6 66 66 66 221 221 221
37660 -253 253 253 253 253 253 253 253 253 253 253 253
37661 -253 253 253 253 253 253 253 253 253 253 253 253
37662 -253 253 253 253 253 253 253 253 253 253 253 253
37663 -253 253 253 253 253 253 253 253 253 253 253 253
37664 -253 253 253 206 206 206 198 198 198 214 166 58
37665 -230 174 11 230 174 11 216 158 10 192 133 9
37666 -163 110 8 116 81 8 102 78 10 116 81 8
37667 -167 114 7 197 138 11 226 170 11 239 182 13
37668 -242 186 14 242 186 14 162 146 94 78 78 78
37669 - 34 34 34 14 14 14 6 6 6 0 0 0
37670 - 0 0 0 0 0 0 0 0 0 0 0 0
37671 - 0 0 0 0 0 0 0 0 0 0 0 0
37672 - 0 0 0 0 0 0 0 0 0 0 0 0
37673 - 0 0 0 0 0 0 0 0 0 6 6 6
37674 - 30 30 30 78 78 78 190 142 34 226 170 11
37675 -239 182 13 246 190 14 246 190 14 246 190 14
37676 -246 190 14 246 190 14 246 190 14 246 190 14
37677 -246 190 14 246 190 14 246 190 14 246 190 14
37678 -246 190 14 241 196 14 203 166 17 22 18 6
37679 - 2 2 6 2 2 6 2 2 6 38 38 38
37680 -218 218 218 253 253 253 253 253 253 253 253 253
37681 -253 253 253 253 253 253 253 253 253 253 253 253
37682 -253 253 253 253 253 253 253 253 253 253 253 253
37683 -253 253 253 253 253 253 253 253 253 253 253 253
37684 -250 250 250 206 206 206 198 198 198 202 162 69
37685 -226 170 11 236 178 12 224 166 10 210 150 10
37686 -200 144 11 197 138 11 192 133 9 197 138 11
37687 -210 150 10 226 170 11 242 186 14 246 190 14
37688 -246 190 14 246 186 14 225 175 15 124 112 88
37689 - 62 62 62 30 30 30 14 14 14 6 6 6
37690 - 0 0 0 0 0 0 0 0 0 0 0 0
37691 - 0 0 0 0 0 0 0 0 0 0 0 0
37692 - 0 0 0 0 0 0 0 0 0 0 0 0
37693 - 0 0 0 0 0 0 0 0 0 10 10 10
37694 - 30 30 30 78 78 78 174 135 50 224 166 10
37695 -239 182 13 246 190 14 246 190 14 246 190 14
37696 -246 190 14 246 190 14 246 190 14 246 190 14
37697 -246 190 14 246 190 14 246 190 14 246 190 14
37698 -246 190 14 246 190 14 241 196 14 139 102 15
37699 - 2 2 6 2 2 6 2 2 6 2 2 6
37700 - 78 78 78 250 250 250 253 253 253 253 253 253
37701 -253 253 253 253 253 253 253 253 253 253 253 253
37702 -253 253 253 253 253 253 253 253 253 253 253 253
37703 -253 253 253 253 253 253 253 253 253 253 253 253
37704 -250 250 250 214 214 214 198 198 198 190 150 46
37705 -219 162 10 236 178 12 234 174 13 224 166 10
37706 -216 158 10 213 154 11 213 154 11 216 158 10
37707 -226 170 11 239 182 13 246 190 14 246 190 14
37708 -246 190 14 246 190 14 242 186 14 206 162 42
37709 -101 101 101 58 58 58 30 30 30 14 14 14
37710 - 6 6 6 0 0 0 0 0 0 0 0 0
37711 - 0 0 0 0 0 0 0 0 0 0 0 0
37712 - 0 0 0 0 0 0 0 0 0 0 0 0
37713 - 0 0 0 0 0 0 0 0 0 10 10 10
37714 - 30 30 30 74 74 74 174 135 50 216 158 10
37715 -236 178 12 246 190 14 246 190 14 246 190 14
37716 -246 190 14 246 190 14 246 190 14 246 190 14
37717 -246 190 14 246 190 14 246 190 14 246 190 14
37718 -246 190 14 246 190 14 241 196 14 226 184 13
37719 - 61 42 6 2 2 6 2 2 6 2 2 6
37720 - 22 22 22 238 238 238 253 253 253 253 253 253
37721 -253 253 253 253 253 253 253 253 253 253 253 253
37722 -253 253 253 253 253 253 253 253 253 253 253 253
37723 -253 253 253 253 253 253 253 253 253 253 253 253
37724 -253 253 253 226 226 226 187 187 187 180 133 36
37725 -216 158 10 236 178 12 239 182 13 236 178 12
37726 -230 174 11 226 170 11 226 170 11 230 174 11
37727 -236 178 12 242 186 14 246 190 14 246 190 14
37728 -246 190 14 246 190 14 246 186 14 239 182 13
37729 -206 162 42 106 106 106 66 66 66 34 34 34
37730 - 14 14 14 6 6 6 0 0 0 0 0 0
37731 - 0 0 0 0 0 0 0 0 0 0 0 0
37732 - 0 0 0 0 0 0 0 0 0 0 0 0
37733 - 0 0 0 0 0 0 0 0 0 6 6 6
37734 - 26 26 26 70 70 70 163 133 67 213 154 11
37735 -236 178 12 246 190 14 246 190 14 246 190 14
37736 -246 190 14 246 190 14 246 190 14 246 190 14
37737 -246 190 14 246 190 14 246 190 14 246 190 14
37738 -246 190 14 246 190 14 246 190 14 241 196 14
37739 -190 146 13 18 14 6 2 2 6 2 2 6
37740 - 46 46 46 246 246 246 253 253 253 253 253 253
37741 -253 253 253 253 253 253 253 253 253 253 253 253
37742 -253 253 253 253 253 253 253 253 253 253 253 253
37743 -253 253 253 253 253 253 253 253 253 253 253 253
37744 -253 253 253 221 221 221 86 86 86 156 107 11
37745 -216 158 10 236 178 12 242 186 14 246 186 14
37746 -242 186 14 239 182 13 239 182 13 242 186 14
37747 -242 186 14 246 186 14 246 190 14 246 190 14
37748 -246 190 14 246 190 14 246 190 14 246 190 14
37749 -242 186 14 225 175 15 142 122 72 66 66 66
37750 - 30 30 30 10 10 10 0 0 0 0 0 0
37751 - 0 0 0 0 0 0 0 0 0 0 0 0
37752 - 0 0 0 0 0 0 0 0 0 0 0 0
37753 - 0 0 0 0 0 0 0 0 0 6 6 6
37754 - 26 26 26 70 70 70 163 133 67 210 150 10
37755 -236 178 12 246 190 14 246 190 14 246 190 14
37756 -246 190 14 246 190 14 246 190 14 246 190 14
37757 -246 190 14 246 190 14 246 190 14 246 190 14
37758 -246 190 14 246 190 14 246 190 14 246 190 14
37759 -232 195 16 121 92 8 34 34 34 106 106 106
37760 -221 221 221 253 253 253 253 253 253 253 253 253
37761 -253 253 253 253 253 253 253 253 253 253 253 253
37762 -253 253 253 253 253 253 253 253 253 253 253 253
37763 -253 253 253 253 253 253 253 253 253 253 253 253
37764 -242 242 242 82 82 82 18 14 6 163 110 8
37765 -216 158 10 236 178 12 242 186 14 246 190 14
37766 -246 190 14 246 190 14 246 190 14 246 190 14
37767 -246 190 14 246 190 14 246 190 14 246 190 14
37768 -246 190 14 246 190 14 246 190 14 246 190 14
37769 -246 190 14 246 190 14 242 186 14 163 133 67
37770 - 46 46 46 18 18 18 6 6 6 0 0 0
37771 - 0 0 0 0 0 0 0 0 0 0 0 0
37772 - 0 0 0 0 0 0 0 0 0 0 0 0
37773 - 0 0 0 0 0 0 0 0 0 10 10 10
37774 - 30 30 30 78 78 78 163 133 67 210 150 10
37775 -236 178 12 246 186 14 246 190 14 246 190 14
37776 -246 190 14 246 190 14 246 190 14 246 190 14
37777 -246 190 14 246 190 14 246 190 14 246 190 14
37778 -246 190 14 246 190 14 246 190 14 246 190 14
37779 -241 196 14 215 174 15 190 178 144 253 253 253
37780 -253 253 253 253 253 253 253 253 253 253 253 253
37781 -253 253 253 253 253 253 253 253 253 253 253 253
37782 -253 253 253 253 253 253 253 253 253 253 253 253
37783 -253 253 253 253 253 253 253 253 253 218 218 218
37784 - 58 58 58 2 2 6 22 18 6 167 114 7
37785 -216 158 10 236 178 12 246 186 14 246 190 14
37786 -246 190 14 246 190 14 246 190 14 246 190 14
37787 -246 190 14 246 190 14 246 190 14 246 190 14
37788 -246 190 14 246 190 14 246 190 14 246 190 14
37789 -246 190 14 246 186 14 242 186 14 190 150 46
37790 - 54 54 54 22 22 22 6 6 6 0 0 0
37791 - 0 0 0 0 0 0 0 0 0 0 0 0
37792 - 0 0 0 0 0 0 0 0 0 0 0 0
37793 - 0 0 0 0 0 0 0 0 0 14 14 14
37794 - 38 38 38 86 86 86 180 133 36 213 154 11
37795 -236 178 12 246 186 14 246 190 14 246 190 14
37796 -246 190 14 246 190 14 246 190 14 246 190 14
37797 -246 190 14 246 190 14 246 190 14 246 190 14
37798 -246 190 14 246 190 14 246 190 14 246 190 14
37799 -246 190 14 232 195 16 190 146 13 214 214 214
37800 -253 253 253 253 253 253 253 253 253 253 253 253
37801 -253 253 253 253 253 253 253 253 253 253 253 253
37802 -253 253 253 253 253 253 253 253 253 253 253 253
37803 -253 253 253 250 250 250 170 170 170 26 26 26
37804 - 2 2 6 2 2 6 37 26 9 163 110 8
37805 -219 162 10 239 182 13 246 186 14 246 190 14
37806 -246 190 14 246 190 14 246 190 14 246 190 14
37807 -246 190 14 246 190 14 246 190 14 246 190 14
37808 -246 190 14 246 190 14 246 190 14 246 190 14
37809 -246 186 14 236 178 12 224 166 10 142 122 72
37810 - 46 46 46 18 18 18 6 6 6 0 0 0
37811 - 0 0 0 0 0 0 0 0 0 0 0 0
37812 - 0 0 0 0 0 0 0 0 0 0 0 0
37813 - 0 0 0 0 0 0 6 6 6 18 18 18
37814 - 50 50 50 109 106 95 192 133 9 224 166 10
37815 -242 186 14 246 190 14 246 190 14 246 190 14
37816 -246 190 14 246 190 14 246 190 14 246 190 14
37817 -246 190 14 246 190 14 246 190 14 246 190 14
37818 -246 190 14 246 190 14 246 190 14 246 190 14
37819 -242 186 14 226 184 13 210 162 10 142 110 46
37820 -226 226 226 253 253 253 253 253 253 253 253 253
37821 -253 253 253 253 253 253 253 253 253 253 253 253
37822 -253 253 253 253 253 253 253 253 253 253 253 253
37823 -198 198 198 66 66 66 2 2 6 2 2 6
37824 - 2 2 6 2 2 6 50 34 6 156 107 11
37825 -219 162 10 239 182 13 246 186 14 246 190 14
37826 -246 190 14 246 190 14 246 190 14 246 190 14
37827 -246 190 14 246 190 14 246 190 14 246 190 14
37828 -246 190 14 246 190 14 246 190 14 242 186 14
37829 -234 174 13 213 154 11 154 122 46 66 66 66
37830 - 30 30 30 10 10 10 0 0 0 0 0 0
37831 - 0 0 0 0 0 0 0 0 0 0 0 0
37832 - 0 0 0 0 0 0 0 0 0 0 0 0
37833 - 0 0 0 0 0 0 6 6 6 22 22 22
37834 - 58 58 58 154 121 60 206 145 10 234 174 13
37835 -242 186 14 246 186 14 246 190 14 246 190 14
37836 -246 190 14 246 190 14 246 190 14 246 190 14
37837 -246 190 14 246 190 14 246 190 14 246 190 14
37838 -246 190 14 246 190 14 246 190 14 246 190 14
37839 -246 186 14 236 178 12 210 162 10 163 110 8
37840 - 61 42 6 138 138 138 218 218 218 250 250 250
37841 -253 253 253 253 253 253 253 253 253 250 250 250
37842 -242 242 242 210 210 210 144 144 144 66 66 66
37843 - 6 6 6 2 2 6 2 2 6 2 2 6
37844 - 2 2 6 2 2 6 61 42 6 163 110 8
37845 -216 158 10 236 178 12 246 190 14 246 190 14
37846 -246 190 14 246 190 14 246 190 14 246 190 14
37847 -246 190 14 246 190 14 246 190 14 246 190 14
37848 -246 190 14 239 182 13 230 174 11 216 158 10
37849 -190 142 34 124 112 88 70 70 70 38 38 38
37850 - 18 18 18 6 6 6 0 0 0 0 0 0
37851 - 0 0 0 0 0 0 0 0 0 0 0 0
37852 - 0 0 0 0 0 0 0 0 0 0 0 0
37853 - 0 0 0 0 0 0 6 6 6 22 22 22
37854 - 62 62 62 168 124 44 206 145 10 224 166 10
37855 -236 178 12 239 182 13 242 186 14 242 186 14
37856 -246 186 14 246 190 14 246 190 14 246 190 14
37857 -246 190 14 246 190 14 246 190 14 246 190 14
37858 -246 190 14 246 190 14 246 190 14 246 190 14
37859 -246 190 14 236 178 12 216 158 10 175 118 6
37860 - 80 54 7 2 2 6 6 6 6 30 30 30
37861 - 54 54 54 62 62 62 50 50 50 38 38 38
37862 - 14 14 14 2 2 6 2 2 6 2 2 6
37863 - 2 2 6 2 2 6 2 2 6 2 2 6
37864 - 2 2 6 6 6 6 80 54 7 167 114 7
37865 -213 154 11 236 178 12 246 190 14 246 190 14
37866 -246 190 14 246 190 14 246 190 14 246 190 14
37867 -246 190 14 242 186 14 239 182 13 239 182 13
37868 -230 174 11 210 150 10 174 135 50 124 112 88
37869 - 82 82 82 54 54 54 34 34 34 18 18 18
37870 - 6 6 6 0 0 0 0 0 0 0 0 0
37871 - 0 0 0 0 0 0 0 0 0 0 0 0
37872 - 0 0 0 0 0 0 0 0 0 0 0 0
37873 - 0 0 0 0 0 0 6 6 6 18 18 18
37874 - 50 50 50 158 118 36 192 133 9 200 144 11
37875 -216 158 10 219 162 10 224 166 10 226 170 11
37876 -230 174 11 236 178 12 239 182 13 239 182 13
37877 -242 186 14 246 186 14 246 190 14 246 190 14
37878 -246 190 14 246 190 14 246 190 14 246 190 14
37879 -246 186 14 230 174 11 210 150 10 163 110 8
37880 -104 69 6 10 10 10 2 2 6 2 2 6
37881 - 2 2 6 2 2 6 2 2 6 2 2 6
37882 - 2 2 6 2 2 6 2 2 6 2 2 6
37883 - 2 2 6 2 2 6 2 2 6 2 2 6
37884 - 2 2 6 6 6 6 91 60 6 167 114 7
37885 -206 145 10 230 174 11 242 186 14 246 190 14
37886 -246 190 14 246 190 14 246 186 14 242 186 14
37887 -239 182 13 230 174 11 224 166 10 213 154 11
37888 -180 133 36 124 112 88 86 86 86 58 58 58
37889 - 38 38 38 22 22 22 10 10 10 6 6 6
37890 - 0 0 0 0 0 0 0 0 0 0 0 0
37891 - 0 0 0 0 0 0 0 0 0 0 0 0
37892 - 0 0 0 0 0 0 0 0 0 0 0 0
37893 - 0 0 0 0 0 0 0 0 0 14 14 14
37894 - 34 34 34 70 70 70 138 110 50 158 118 36
37895 -167 114 7 180 123 7 192 133 9 197 138 11
37896 -200 144 11 206 145 10 213 154 11 219 162 10
37897 -224 166 10 230 174 11 239 182 13 242 186 14
37898 -246 186 14 246 186 14 246 186 14 246 186 14
37899 -239 182 13 216 158 10 185 133 11 152 99 6
37900 -104 69 6 18 14 6 2 2 6 2 2 6
37901 - 2 2 6 2 2 6 2 2 6 2 2 6
37902 - 2 2 6 2 2 6 2 2 6 2 2 6
37903 - 2 2 6 2 2 6 2 2 6 2 2 6
37904 - 2 2 6 6 6 6 80 54 7 152 99 6
37905 -192 133 9 219 162 10 236 178 12 239 182 13
37906 -246 186 14 242 186 14 239 182 13 236 178 12
37907 -224 166 10 206 145 10 192 133 9 154 121 60
37908 - 94 94 94 62 62 62 42 42 42 22 22 22
37909 - 14 14 14 6 6 6 0 0 0 0 0 0
37910 - 0 0 0 0 0 0 0 0 0 0 0 0
37911 - 0 0 0 0 0 0 0 0 0 0 0 0
37912 - 0 0 0 0 0 0 0 0 0 0 0 0
37913 - 0 0 0 0 0 0 0 0 0 6 6 6
37914 - 18 18 18 34 34 34 58 58 58 78 78 78
37915 -101 98 89 124 112 88 142 110 46 156 107 11
37916 -163 110 8 167 114 7 175 118 6 180 123 7
37917 -185 133 11 197 138 11 210 150 10 219 162 10
37918 -226 170 11 236 178 12 236 178 12 234 174 13
37919 -219 162 10 197 138 11 163 110 8 130 83 6
37920 - 91 60 6 10 10 10 2 2 6 2 2 6
37921 - 18 18 18 38 38 38 38 38 38 38 38 38
37922 - 38 38 38 38 38 38 38 38 38 38 38 38
37923 - 38 38 38 38 38 38 26 26 26 2 2 6
37924 - 2 2 6 6 6 6 70 47 6 137 92 6
37925 -175 118 6 200 144 11 219 162 10 230 174 11
37926 -234 174 13 230 174 11 219 162 10 210 150 10
37927 -192 133 9 163 110 8 124 112 88 82 82 82
37928 - 50 50 50 30 30 30 14 14 14 6 6 6
37929 - 0 0 0 0 0 0 0 0 0 0 0 0
37930 - 0 0 0 0 0 0 0 0 0 0 0 0
37931 - 0 0 0 0 0 0 0 0 0 0 0 0
37932 - 0 0 0 0 0 0 0 0 0 0 0 0
37933 - 0 0 0 0 0 0 0 0 0 0 0 0
37934 - 6 6 6 14 14 14 22 22 22 34 34 34
37935 - 42 42 42 58 58 58 74 74 74 86 86 86
37936 -101 98 89 122 102 70 130 98 46 121 87 25
37937 -137 92 6 152 99 6 163 110 8 180 123 7
37938 -185 133 11 197 138 11 206 145 10 200 144 11
37939 -180 123 7 156 107 11 130 83 6 104 69 6
37940 - 50 34 6 54 54 54 110 110 110 101 98 89
37941 - 86 86 86 82 82 82 78 78 78 78 78 78
37942 - 78 78 78 78 78 78 78 78 78 78 78 78
37943 - 78 78 78 82 82 82 86 86 86 94 94 94
37944 -106 106 106 101 101 101 86 66 34 124 80 6
37945 -156 107 11 180 123 7 192 133 9 200 144 11
37946 -206 145 10 200 144 11 192 133 9 175 118 6
37947 -139 102 15 109 106 95 70 70 70 42 42 42
37948 - 22 22 22 10 10 10 0 0 0 0 0 0
37949 - 0 0 0 0 0 0 0 0 0 0 0 0
37950 - 0 0 0 0 0 0 0 0 0 0 0 0
37951 - 0 0 0 0 0 0 0 0 0 0 0 0
37952 - 0 0 0 0 0 0 0 0 0 0 0 0
37953 - 0 0 0 0 0 0 0 0 0 0 0 0
37954 - 0 0 0 0 0 0 6 6 6 10 10 10
37955 - 14 14 14 22 22 22 30 30 30 38 38 38
37956 - 50 50 50 62 62 62 74 74 74 90 90 90
37957 -101 98 89 112 100 78 121 87 25 124 80 6
37958 -137 92 6 152 99 6 152 99 6 152 99 6
37959 -138 86 6 124 80 6 98 70 6 86 66 30
37960 -101 98 89 82 82 82 58 58 58 46 46 46
37961 - 38 38 38 34 34 34 34 34 34 34 34 34
37962 - 34 34 34 34 34 34 34 34 34 34 34 34
37963 - 34 34 34 34 34 34 38 38 38 42 42 42
37964 - 54 54 54 82 82 82 94 86 76 91 60 6
37965 -134 86 6 156 107 11 167 114 7 175 118 6
37966 -175 118 6 167 114 7 152 99 6 121 87 25
37967 -101 98 89 62 62 62 34 34 34 18 18 18
37968 - 6 6 6 0 0 0 0 0 0 0 0 0
37969 - 0 0 0 0 0 0 0 0 0 0 0 0
37970 - 0 0 0 0 0 0 0 0 0 0 0 0
37971 - 0 0 0 0 0 0 0 0 0 0 0 0
37972 - 0 0 0 0 0 0 0 0 0 0 0 0
37973 - 0 0 0 0 0 0 0 0 0 0 0 0
37974 - 0 0 0 0 0 0 0 0 0 0 0 0
37975 - 0 0 0 6 6 6 6 6 6 10 10 10
37976 - 18 18 18 22 22 22 30 30 30 42 42 42
37977 - 50 50 50 66 66 66 86 86 86 101 98 89
37978 -106 86 58 98 70 6 104 69 6 104 69 6
37979 -104 69 6 91 60 6 82 62 34 90 90 90
37980 - 62 62 62 38 38 38 22 22 22 14 14 14
37981 - 10 10 10 10 10 10 10 10 10 10 10 10
37982 - 10 10 10 10 10 10 6 6 6 10 10 10
37983 - 10 10 10 10 10 10 10 10 10 14 14 14
37984 - 22 22 22 42 42 42 70 70 70 89 81 66
37985 - 80 54 7 104 69 6 124 80 6 137 92 6
37986 -134 86 6 116 81 8 100 82 52 86 86 86
37987 - 58 58 58 30 30 30 14 14 14 6 6 6
37988 - 0 0 0 0 0 0 0 0 0 0 0 0
37989 - 0 0 0 0 0 0 0 0 0 0 0 0
37990 - 0 0 0 0 0 0 0 0 0 0 0 0
37991 - 0 0 0 0 0 0 0 0 0 0 0 0
37992 - 0 0 0 0 0 0 0 0 0 0 0 0
37993 - 0 0 0 0 0 0 0 0 0 0 0 0
37994 - 0 0 0 0 0 0 0 0 0 0 0 0
37995 - 0 0 0 0 0 0 0 0 0 0 0 0
37996 - 0 0 0 6 6 6 10 10 10 14 14 14
37997 - 18 18 18 26 26 26 38 38 38 54 54 54
37998 - 70 70 70 86 86 86 94 86 76 89 81 66
37999 - 89 81 66 86 86 86 74 74 74 50 50 50
38000 - 30 30 30 14 14 14 6 6 6 0 0 0
38001 - 0 0 0 0 0 0 0 0 0 0 0 0
38002 - 0 0 0 0 0 0 0 0 0 0 0 0
38003 - 0 0 0 0 0 0 0 0 0 0 0 0
38004 - 6 6 6 18 18 18 34 34 34 58 58 58
38005 - 82 82 82 89 81 66 89 81 66 89 81 66
38006 - 94 86 66 94 86 76 74 74 74 50 50 50
38007 - 26 26 26 14 14 14 6 6 6 0 0 0
38008 - 0 0 0 0 0 0 0 0 0 0 0 0
38009 - 0 0 0 0 0 0 0 0 0 0 0 0
38010 - 0 0 0 0 0 0 0 0 0 0 0 0
38011 - 0 0 0 0 0 0 0 0 0 0 0 0
38012 - 0 0 0 0 0 0 0 0 0 0 0 0
38013 - 0 0 0 0 0 0 0 0 0 0 0 0
38014 - 0 0 0 0 0 0 0 0 0 0 0 0
38015 - 0 0 0 0 0 0 0 0 0 0 0 0
38016 - 0 0 0 0 0 0 0 0 0 0 0 0
38017 - 6 6 6 6 6 6 14 14 14 18 18 18
38018 - 30 30 30 38 38 38 46 46 46 54 54 54
38019 - 50 50 50 42 42 42 30 30 30 18 18 18
38020 - 10 10 10 0 0 0 0 0 0 0 0 0
38021 - 0 0 0 0 0 0 0 0 0 0 0 0
38022 - 0 0 0 0 0 0 0 0 0 0 0 0
38023 - 0 0 0 0 0 0 0 0 0 0 0 0
38024 - 0 0 0 6 6 6 14 14 14 26 26 26
38025 - 38 38 38 50 50 50 58 58 58 58 58 58
38026 - 54 54 54 42 42 42 30 30 30 18 18 18
38027 - 10 10 10 0 0 0 0 0 0 0 0 0
38028 - 0 0 0 0 0 0 0 0 0 0 0 0
38029 - 0 0 0 0 0 0 0 0 0 0 0 0
38030 - 0 0 0 0 0 0 0 0 0 0 0 0
38031 - 0 0 0 0 0 0 0 0 0 0 0 0
38032 - 0 0 0 0 0 0 0 0 0 0 0 0
38033 - 0 0 0 0 0 0 0 0 0 0 0 0
38034 - 0 0 0 0 0 0 0 0 0 0 0 0
38035 - 0 0 0 0 0 0 0 0 0 0 0 0
38036 - 0 0 0 0 0 0 0 0 0 0 0 0
38037 - 0 0 0 0 0 0 0 0 0 6 6 6
38038 - 6 6 6 10 10 10 14 14 14 18 18 18
38039 - 18 18 18 14 14 14 10 10 10 6 6 6
38040 - 0 0 0 0 0 0 0 0 0 0 0 0
38041 - 0 0 0 0 0 0 0 0 0 0 0 0
38042 - 0 0 0 0 0 0 0 0 0 0 0 0
38043 - 0 0 0 0 0 0 0 0 0 0 0 0
38044 - 0 0 0 0 0 0 0 0 0 6 6 6
38045 - 14 14 14 18 18 18 22 22 22 22 22 22
38046 - 18 18 18 14 14 14 10 10 10 6 6 6
38047 - 0 0 0 0 0 0 0 0 0 0 0 0
38048 - 0 0 0 0 0 0 0 0 0 0 0 0
38049 - 0 0 0 0 0 0 0 0 0 0 0 0
38050 - 0 0 0 0 0 0 0 0 0 0 0 0
38051 - 0 0 0 0 0 0 0 0 0 0 0 0
38052 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38053 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38054 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38055 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38056 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38057 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38058 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38059 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38060 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38061 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38062 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38063 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38064 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38065 +4 4 4 4 4 4
38066 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38067 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38068 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38069 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38070 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38071 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38072 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38073 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38074 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38075 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38076 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38077 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38078 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38079 +4 4 4 4 4 4
38080 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38081 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38082 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38083 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38084 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38085 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38086 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38087 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38088 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38089 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38090 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38091 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38092 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38093 +4 4 4 4 4 4
38094 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38095 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38096 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38097 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38098 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38099 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38100 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38101 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38102 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38103 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38104 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38105 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38106 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38107 +4 4 4 4 4 4
38108 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38109 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38110 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38111 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38112 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38113 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38114 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38115 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38116 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38117 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38118 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38119 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38120 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38121 +4 4 4 4 4 4
38122 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38123 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38124 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38125 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38126 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38127 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38128 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38129 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38130 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38131 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38132 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38133 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38134 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38135 +4 4 4 4 4 4
38136 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38137 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38138 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38139 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38140 +4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
38141 +0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
38142 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38143 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38144 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38145 +4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
38146 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
38147 +4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
38148 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38149 +4 4 4 4 4 4
38150 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38151 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38152 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38153 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38154 +4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
38155 +37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
38156 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38157 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38158 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38159 +4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
38160 +2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
38161 +4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
38162 +1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38163 +4 4 4 4 4 4
38164 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38165 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38166 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38167 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38168 +2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
38169 +153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
38170 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
38171 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38172 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38173 +4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
38174 +60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
38175 +4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
38176 +2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
38177 +4 4 4 4 4 4
38178 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38179 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38180 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38181 +4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
38182 +4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
38183 +165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
38184 +1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
38185 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38186 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
38187 +3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
38188 +163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
38189 +0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
38190 +37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
38191 +4 4 4 4 4 4
38192 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38193 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38194 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38195 +4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
38196 +37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
38197 +156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
38198 +125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
38199 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
38200 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
38201 +0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
38202 +174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
38203 +0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
38204 +64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
38205 +4 4 4 4 4 4
38206 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38207 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38208 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
38209 +5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
38210 +156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
38211 +156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
38212 +174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
38213 +1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
38214 +4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
38215 +13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
38216 +174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
38217 +22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
38218 +90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
38219 +4 4 4 4 4 4
38220 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38221 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38222 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
38223 +0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
38224 +174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
38225 +156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
38226 +163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
38227 +4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
38228 +5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
38229 +131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
38230 +190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
38231 +90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
38232 +31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
38233 +4 4 4 4 4 4
38234 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38235 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38236 +4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
38237 +4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
38238 +155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
38239 +167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
38240 +153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
38241 +41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
38242 +1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
38243 +177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
38244 +125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
38245 +136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
38246 +7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
38247 +4 4 4 4 4 4
38248 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38249 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38250 +4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
38251 +125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
38252 +156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
38253 +137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
38254 +156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
38255 +167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
38256 +0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
38257 +166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
38258 +6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
38259 +90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
38260 +1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
38261 +4 4 4 4 4 4
38262 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38263 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38264 +1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
38265 +167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
38266 +157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
38267 +26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
38268 +158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
38269 +165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
38270 +60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
38271 +137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
38272 +52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
38273 +13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
38274 +4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
38275 +4 4 4 4 4 4
38276 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38277 +4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
38278 +0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
38279 +158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
38280 +167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
38281 +4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
38282 +174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
38283 +155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
38284 +137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
38285 +16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
38286 +136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
38287 +2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
38288 +4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
38289 +4 4 4 4 4 4
38290 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38291 +4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
38292 +37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
38293 +157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
38294 +153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
38295 +4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
38296 +125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
38297 +156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
38298 +174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
38299 +4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
38300 +136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
38301 +1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
38302 +2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
38303 +0 0 0 4 4 4
38304 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
38305 +4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
38306 +158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
38307 +153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
38308 +37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
38309 +4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
38310 +4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
38311 +154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
38312 +174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
38313 +32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
38314 +28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
38315 +50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
38316 +0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
38317 +2 0 0 0 0 0
38318 +4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
38319 +0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
38320 +174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
38321 +165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
38322 +4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
38323 +4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
38324 +4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
38325 +174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
38326 +60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
38327 +136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
38328 +22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
38329 +136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
38330 +26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
38331 +37 38 37 0 0 0
38332 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
38333 +13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
38334 +153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
38335 +177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
38336 +4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
38337 +5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
38338 +6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
38339 +166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
38340 +4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
38341 +146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
38342 +71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
38343 +90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
38344 +125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
38345 +85 115 134 4 0 0
38346 +4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
38347 +125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
38348 +155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
38349 +125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
38350 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
38351 +0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
38352 +5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
38353 +37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
38354 +4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
38355 +90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
38356 +2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
38357 +13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
38358 +166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
38359 +60 73 81 4 0 0
38360 +4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
38361 +174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
38362 +156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
38363 +4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
38364 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
38365 +10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
38366 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
38367 +4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
38368 +80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
38369 +28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
38370 +50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
38371 +1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
38372 +167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
38373 +16 19 21 4 0 0
38374 +4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
38375 +158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
38376 +167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
38377 +4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
38378 +4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
38379 +80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
38380 +4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
38381 +3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
38382 +146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
38383 +68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
38384 +136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
38385 +24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
38386 +163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
38387 +4 0 0 4 3 3
38388 +3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
38389 +156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
38390 +155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
38391 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
38392 +2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
38393 +136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
38394 +0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
38395 +0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
38396 +136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
38397 +28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
38398 +22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
38399 +137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
38400 +60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
38401 +3 2 2 4 4 4
38402 +3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
38403 +157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
38404 +37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
38405 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
38406 +0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
38407 +101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
38408 +14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
38409 +22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
38410 +136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
38411 +17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
38412 +2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
38413 +166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
38414 +13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
38415 +4 4 4 4 4 4
38416 +1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
38417 +163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
38418 +4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
38419 +4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
38420 +40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
38421 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
38422 +101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
38423 +136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
38424 +136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
38425 +136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
38426 +3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
38427 +174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
38428 +4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
38429 +4 4 4 4 4 4
38430 +4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
38431 +155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
38432 +4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
38433 +4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
38434 +101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
38435 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
38436 +136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
38437 +136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
38438 +136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
38439 +90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
38440 +85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
38441 +167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
38442 +6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
38443 +5 5 5 5 5 5
38444 +1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
38445 +131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
38446 +6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
38447 +0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
38448 +101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
38449 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38450 +101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
38451 +136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
38452 +101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
38453 +7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
38454 +174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
38455 +24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
38456 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
38457 +5 5 5 4 4 4
38458 +4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
38459 +131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
38460 +6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
38461 +13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
38462 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
38463 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
38464 +101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
38465 +136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
38466 +136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
38467 +2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
38468 +174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
38469 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
38470 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38471 +4 4 4 4 4 4
38472 +1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
38473 +137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
38474 +4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
38475 +64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
38476 +90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
38477 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38478 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38479 +136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
38480 +101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
38481 +37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
38482 +167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
38483 +3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
38484 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38485 +4 4 4 4 4 4
38486 +4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
38487 +153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
38488 +4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
38489 +90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
38490 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
38491 +90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
38492 +101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
38493 +101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
38494 +35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
38495 +154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
38496 +60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
38497 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38498 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38499 +4 4 4 4 4 4
38500 +1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
38501 +153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
38502 +4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
38503 +64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
38504 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
38505 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38506 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38507 +136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
38508 +13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
38509 +174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
38510 +6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
38511 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38512 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38513 +4 4 4 4 4 4
38514 +4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
38515 +156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
38516 +4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
38517 +90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
38518 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
38519 +90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
38520 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
38521 +101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
38522 +2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
38523 +174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
38524 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38525 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38526 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38527 +4 4 4 4 4 4
38528 +3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
38529 +158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
38530 +4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
38531 +37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
38532 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
38533 +90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
38534 +101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
38535 +90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
38536 +5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
38537 +167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
38538 +6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
38539 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38540 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38541 +4 4 4 4 4 4
38542 +4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
38543 +163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
38544 +4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
38545 +18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
38546 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
38547 +90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
38548 +101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
38549 +13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
38550 +3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
38551 +174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
38552 +4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
38553 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38554 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38555 +4 4 4 4 4 4
38556 +1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
38557 +167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
38558 +4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
38559 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
38560 +26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
38561 +90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
38562 +101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
38563 +7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
38564 +4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
38565 +174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
38566 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38567 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38568 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38569 +4 4 4 4 4 4
38570 +4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
38571 +174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
38572 +5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
38573 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
38574 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38575 +90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
38576 +101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
38577 +2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
38578 +3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
38579 +153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
38580 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38581 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38582 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38583 +4 4 4 4 4 4
38584 +1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
38585 +174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
38586 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
38587 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
38588 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38589 +26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
38590 +35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
38591 +2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
38592 +3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
38593 +131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
38594 +4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38595 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38596 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38597 +4 4 4 4 4 4
38598 +3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
38599 +174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
38600 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
38601 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
38602 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38603 +26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
38604 +7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
38605 +4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
38606 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
38607 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38608 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38609 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38610 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38611 +4 4 4 4 4 4
38612 +1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
38613 +174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
38614 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
38615 +18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
38616 +18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
38617 +26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
38618 +28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
38619 +3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
38620 +4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38621 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38622 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38623 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38624 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38625 +4 4 4 4 4 4
38626 +4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
38627 +174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
38628 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
38629 +10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
38630 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38631 +18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
38632 +90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
38633 +3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
38634 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
38635 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38636 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38637 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38638 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38639 +4 4 4 4 4 4
38640 +1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
38641 +177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
38642 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
38643 +10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
38644 +26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
38645 +6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
38646 +10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
38647 +2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
38648 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38649 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38650 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38651 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38652 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38653 +4 4 4 4 4 4
38654 +4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
38655 +177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
38656 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
38657 +10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
38658 +26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
38659 +7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
38660 +3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
38661 +21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
38662 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
38663 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38664 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38665 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38666 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38667 +4 4 4 4 4 4
38668 +3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
38669 +190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
38670 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
38671 +10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
38672 +24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
38673 +18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
38674 +28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
38675 +26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
38676 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38677 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38678 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38679 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38680 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38681 +4 4 4 4 4 4
38682 +4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
38683 +190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
38684 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
38685 +10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
38686 +0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
38687 +26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
38688 +37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
38689 +90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
38690 +4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
38691 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38692 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38693 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38694 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38695 +4 4 4 4 4 4
38696 +4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
38697 +193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
38698 +5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
38699 +10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
38700 +1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
38701 +26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
38702 +22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
38703 +26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
38704 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38705 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38706 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38707 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38708 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38709 +4 4 4 4 4 4
38710 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38711 +190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
38712 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
38713 +10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
38714 +2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
38715 +26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
38716 +10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
38717 +26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
38718 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
38719 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38720 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38721 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38722 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38723 +4 4 4 4 4 4
38724 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
38725 +193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
38726 +5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
38727 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
38728 +13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
38729 +10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
38730 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38731 +26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
38732 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38733 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38734 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38735 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38736 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38737 +4 4 4 4 4 4
38738 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38739 +190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
38740 +5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
38741 +28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
38742 +10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
38743 +28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
38744 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38745 +26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
38746 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
38747 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38748 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38749 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38750 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38751 +4 4 4 4 4 4
38752 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
38753 +193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
38754 +5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
38755 +4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
38756 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
38757 +10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
38758 +18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
38759 +22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
38760 +4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
38761 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38762 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38763 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38764 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38765 +4 4 4 4 4 4
38766 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38767 +190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
38768 +6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
38769 +1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
38770 +18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
38771 +10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
38772 +26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
38773 +1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
38774 +5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
38775 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38776 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38777 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38778 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38779 +4 4 4 4 4 4
38780 +4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
38781 +193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
38782 +2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
38783 +4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
38784 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
38785 +10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
38786 +26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
38787 +2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
38788 +3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
38789 +131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38790 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38791 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38792 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38793 +4 4 4 4 4 4
38794 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38795 +193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
38796 +0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
38797 +4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
38798 +13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
38799 +10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
38800 +28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
38801 +4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
38802 +0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
38803 +125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38804 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38805 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38806 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38807 +4 4 4 4 4 4
38808 +4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
38809 +193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
38810 +120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
38811 +4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
38812 +4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
38813 +10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
38814 +4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
38815 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
38816 +24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
38817 +125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
38818 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38819 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38820 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38821 +4 4 4 4 4 4
38822 +4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
38823 +174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
38824 +220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
38825 +3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
38826 +4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
38827 +10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
38828 +1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
38829 +5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
38830 +137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
38831 +125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
38832 +0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38833 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38834 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38835 +4 4 4 4 4 4
38836 +5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
38837 +193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
38838 +220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
38839 +4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
38840 +4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
38841 +22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
38842 +4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38843 +1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
38844 +166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
38845 +125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
38846 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
38847 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38848 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38849 +4 4 4 4 4 4
38850 +4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
38851 +220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
38852 +205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
38853 +24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
38854 +4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
38855 +4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
38856 +4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
38857 +2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
38858 +156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
38859 +137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
38860 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38861 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38862 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38863 +4 4 4 4 4 4
38864 +5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
38865 +125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
38866 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
38867 +193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
38868 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
38869 +1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
38870 +5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
38871 +60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
38872 +153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
38873 +125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
38874 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38875 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38876 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38877 +4 4 4 4 4 4
38878 +4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
38879 +6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
38880 +193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
38881 +244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
38882 +0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
38883 +4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
38884 +3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
38885 +220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
38886 +153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
38887 +13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
38888 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38889 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38890 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38891 +4 4 4 4 4 4
38892 +5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
38893 +6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
38894 +244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
38895 +220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
38896 +3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
38897 +4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
38898 +0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
38899 +177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
38900 +158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
38901 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
38902 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38903 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38904 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38905 +4 4 4 4 4 4
38906 +5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
38907 +6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
38908 +177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
38909 +220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
38910 +125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
38911 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
38912 +37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
38913 +174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
38914 +158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
38915 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
38916 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38917 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38918 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38919 +4 4 4 4 4 4
38920 +4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
38921 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
38922 +26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
38923 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
38924 +244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
38925 +0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
38926 +177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
38927 +174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
38928 +60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
38929 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38930 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38931 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38932 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38933 +4 4 4 4 4 4
38934 +5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
38935 +6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
38936 +6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
38937 +220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
38938 +220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
38939 +0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
38940 +220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
38941 +174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
38942 +4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
38943 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38944 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38945 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38946 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38947 +4 4 4 4 4 4
38948 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
38949 +6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
38950 +4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
38951 +220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
38952 +205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
38953 +60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
38954 +177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
38955 +190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
38956 +4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38957 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38958 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38959 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38960 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38961 +4 4 4 4 4 4
38962 +4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
38963 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
38964 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
38965 +125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
38966 +205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
38967 +193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
38968 +190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
38969 +153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
38970 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38971 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38972 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38973 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38974 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38975 +4 4 4 4 4 4
38976 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
38977 +6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
38978 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
38979 +4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
38980 +205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
38981 +220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
38982 +174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
38983 +6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
38984 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38985 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38986 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38987 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38988 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38989 +4 4 4 4 4 4
38990 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
38991 +5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
38992 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
38993 +4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
38994 +220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
38995 +190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
38996 +193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
38997 +4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
38998 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38999 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39000 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39001 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39002 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39003 +4 4 4 4 4 4
39004 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39005 +4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
39006 +4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
39007 +6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
39008 +174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
39009 +193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
39010 +193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
39011 +6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
39012 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39013 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39014 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39015 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39016 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39017 +4 4 4 4 4 4
39018 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39019 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
39020 +5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
39021 +5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
39022 +6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
39023 +193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
39024 +60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
39025 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
39026 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39027 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39028 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39029 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39030 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39031 +4 4 4 4 4 4
39032 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39033 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39034 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
39035 +5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
39036 +4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
39037 +193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
39038 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
39039 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
39040 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39041 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39042 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39043 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39044 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39045 +4 4 4 4 4 4
39046 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39047 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39048 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
39049 +4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
39050 +6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
39051 +153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
39052 +6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
39053 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39054 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39055 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39056 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39057 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39058 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39059 +4 4 4 4 4 4
39060 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39061 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39062 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39063 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
39064 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
39065 +24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
39066 +6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
39067 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39068 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39069 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39070 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39071 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39072 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39073 +4 4 4 4 4 4
39074 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39075 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39076 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39077 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
39078 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
39079 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
39080 +4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
39081 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39082 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39083 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39084 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39085 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39086 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39087 +4 4 4 4 4 4
39088 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39089 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39090 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39091 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
39092 +5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
39093 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
39094 +6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
39095 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39096 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39097 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39098 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39099 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39100 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39101 +4 4 4 4 4 4
39102 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39103 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39104 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39105 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
39106 +4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
39107 +4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
39108 +6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
39109 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39110 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39111 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39112 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39113 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39114 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39115 +4 4 4 4 4 4
39116 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39117 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39118 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39119 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39120 +4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
39121 +6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
39122 +4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
39123 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39124 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39125 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39126 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39127 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39128 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39129 +4 4 4 4 4 4
39130 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39131 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39132 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39133 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39134 +4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
39135 +4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
39136 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39137 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39138 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39139 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39140 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39141 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39142 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39143 +4 4 4 4 4 4
39144 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39145 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39146 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39147 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39148 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
39149 +5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
39150 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39151 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39152 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39153 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39154 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39155 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39156 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39157 +4 4 4 4 4 4
39158 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39159 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39160 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39161 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39162 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
39163 +5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
39164 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39165 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39166 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39167 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39168 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39169 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39170 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39171 +4 4 4 4 4 4
39172 diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
39173 index 3473e75..c930142 100644
39174 --- a/drivers/video/udlfb.c
39175 +++ b/drivers/video/udlfb.c
39176 @@ -619,11 +619,11 @@ int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
39177 dlfb_urb_completion(urb);
39178
39179 error:
39180 - atomic_add(bytes_sent, &dev->bytes_sent);
39181 - atomic_add(bytes_identical, &dev->bytes_identical);
39182 - atomic_add(width*height*2, &dev->bytes_rendered);
39183 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
39184 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
39185 + atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
39186 end_cycles = get_cycles();
39187 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
39188 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
39189 >> 10)), /* Kcycles */
39190 &dev->cpu_kcycles_used);
39191
39192 @@ -744,11 +744,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
39193 dlfb_urb_completion(urb);
39194
39195 error:
39196 - atomic_add(bytes_sent, &dev->bytes_sent);
39197 - atomic_add(bytes_identical, &dev->bytes_identical);
39198 - atomic_add(bytes_rendered, &dev->bytes_rendered);
39199 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
39200 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
39201 + atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
39202 end_cycles = get_cycles();
39203 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
39204 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
39205 >> 10)), /* Kcycles */
39206 &dev->cpu_kcycles_used);
39207 }
39208 @@ -1368,7 +1368,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
39209 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39210 struct dlfb_data *dev = fb_info->par;
39211 return snprintf(buf, PAGE_SIZE, "%u\n",
39212 - atomic_read(&dev->bytes_rendered));
39213 + atomic_read_unchecked(&dev->bytes_rendered));
39214 }
39215
39216 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
39217 @@ -1376,7 +1376,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
39218 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39219 struct dlfb_data *dev = fb_info->par;
39220 return snprintf(buf, PAGE_SIZE, "%u\n",
39221 - atomic_read(&dev->bytes_identical));
39222 + atomic_read_unchecked(&dev->bytes_identical));
39223 }
39224
39225 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
39226 @@ -1384,7 +1384,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
39227 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39228 struct dlfb_data *dev = fb_info->par;
39229 return snprintf(buf, PAGE_SIZE, "%u\n",
39230 - atomic_read(&dev->bytes_sent));
39231 + atomic_read_unchecked(&dev->bytes_sent));
39232 }
39233
39234 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
39235 @@ -1392,7 +1392,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
39236 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39237 struct dlfb_data *dev = fb_info->par;
39238 return snprintf(buf, PAGE_SIZE, "%u\n",
39239 - atomic_read(&dev->cpu_kcycles_used));
39240 + atomic_read_unchecked(&dev->cpu_kcycles_used));
39241 }
39242
39243 static ssize_t edid_show(
39244 @@ -1449,10 +1449,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
39245 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39246 struct dlfb_data *dev = fb_info->par;
39247
39248 - atomic_set(&dev->bytes_rendered, 0);
39249 - atomic_set(&dev->bytes_identical, 0);
39250 - atomic_set(&dev->bytes_sent, 0);
39251 - atomic_set(&dev->cpu_kcycles_used, 0);
39252 + atomic_set_unchecked(&dev->bytes_rendered, 0);
39253 + atomic_set_unchecked(&dev->bytes_identical, 0);
39254 + atomic_set_unchecked(&dev->bytes_sent, 0);
39255 + atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
39256
39257 return count;
39258 }
39259 diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
39260 index 7f8472c..9842e87 100644
39261 --- a/drivers/video/uvesafb.c
39262 +++ b/drivers/video/uvesafb.c
39263 @@ -19,6 +19,7 @@
39264 #include <linux/io.h>
39265 #include <linux/mutex.h>
39266 #include <linux/slab.h>
39267 +#include <linux/moduleloader.h>
39268 #include <video/edid.h>
39269 #include <video/uvesafb.h>
39270 #ifdef CONFIG_X86
39271 @@ -121,7 +122,7 @@ static int uvesafb_helper_start(void)
39272 NULL,
39273 };
39274
39275 - return call_usermodehelper(v86d_path, argv, envp, 1);
39276 + return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
39277 }
39278
39279 /*
39280 @@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
39281 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
39282 par->pmi_setpal = par->ypan = 0;
39283 } else {
39284 +
39285 +#ifdef CONFIG_PAX_KERNEXEC
39286 +#ifdef CONFIG_MODULES
39287 + par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
39288 +#endif
39289 + if (!par->pmi_code) {
39290 + par->pmi_setpal = par->ypan = 0;
39291 + return 0;
39292 + }
39293 +#endif
39294 +
39295 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
39296 + task->t.regs.edi);
39297 +
39298 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39299 + pax_open_kernel();
39300 + memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
39301 + pax_close_kernel();
39302 +
39303 + par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
39304 + par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
39305 +#else
39306 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
39307 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
39308 +#endif
39309 +
39310 printk(KERN_INFO "uvesafb: protected mode interface info at "
39311 "%04x:%04x\n",
39312 (u16)task->t.regs.es, (u16)task->t.regs.edi);
39313 @@ -1821,6 +1844,11 @@ out:
39314 if (par->vbe_modes)
39315 kfree(par->vbe_modes);
39316
39317 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39318 + if (par->pmi_code)
39319 + module_free_exec(NULL, par->pmi_code);
39320 +#endif
39321 +
39322 framebuffer_release(info);
39323 return err;
39324 }
39325 @@ -1847,6 +1875,12 @@ static int uvesafb_remove(struct platform_device *dev)
39326 kfree(par->vbe_state_orig);
39327 if (par->vbe_state_saved)
39328 kfree(par->vbe_state_saved);
39329 +
39330 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39331 + if (par->pmi_code)
39332 + module_free_exec(NULL, par->pmi_code);
39333 +#endif
39334 +
39335 }
39336
39337 framebuffer_release(info);
39338 diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
39339 index 501b340..86bd4cf 100644
39340 --- a/drivers/video/vesafb.c
39341 +++ b/drivers/video/vesafb.c
39342 @@ -9,6 +9,7 @@
39343 */
39344
39345 #include <linux/module.h>
39346 +#include <linux/moduleloader.h>
39347 #include <linux/kernel.h>
39348 #include <linux/errno.h>
39349 #include <linux/string.h>
39350 @@ -52,8 +53,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
39351 static int vram_total __initdata; /* Set total amount of memory */
39352 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
39353 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
39354 -static void (*pmi_start)(void) __read_mostly;
39355 -static void (*pmi_pal) (void) __read_mostly;
39356 +static void (*pmi_start)(void) __read_only;
39357 +static void (*pmi_pal) (void) __read_only;
39358 static int depth __read_mostly;
39359 static int vga_compat __read_mostly;
39360 /* --------------------------------------------------------------------- */
39361 @@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
39362 unsigned int size_vmode;
39363 unsigned int size_remap;
39364 unsigned int size_total;
39365 + void *pmi_code = NULL;
39366
39367 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
39368 return -ENODEV;
39369 @@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
39370 size_remap = size_total;
39371 vesafb_fix.smem_len = size_remap;
39372
39373 -#ifndef __i386__
39374 - screen_info.vesapm_seg = 0;
39375 -#endif
39376 -
39377 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
39378 printk(KERN_WARNING
39379 "vesafb: cannot reserve video memory at 0x%lx\n",
39380 @@ -307,9 +305,21 @@ static int __init vesafb_probe(struct platform_device *dev)
39381 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
39382 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
39383
39384 +#ifdef __i386__
39385 +
39386 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39387 + pmi_code = module_alloc_exec(screen_info.vesapm_size);
39388 + if (!pmi_code)
39389 +#elif !defined(CONFIG_PAX_KERNEXEC)
39390 + if (0)
39391 +#endif
39392 +
39393 +#endif
39394 + screen_info.vesapm_seg = 0;
39395 +
39396 if (screen_info.vesapm_seg) {
39397 - printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
39398 - screen_info.vesapm_seg,screen_info.vesapm_off);
39399 + printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
39400 + screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
39401 }
39402
39403 if (screen_info.vesapm_seg < 0xc000)
39404 @@ -317,9 +327,25 @@ static int __init vesafb_probe(struct platform_device *dev)
39405
39406 if (ypan || pmi_setpal) {
39407 unsigned short *pmi_base;
39408 +
39409 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
39410 - pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
39411 - pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
39412 +
39413 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39414 + pax_open_kernel();
39415 + memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
39416 +#else
39417 + pmi_code = pmi_base;
39418 +#endif
39419 +
39420 + pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
39421 + pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
39422 +
39423 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39424 + pmi_start = ktva_ktla(pmi_start);
39425 + pmi_pal = ktva_ktla(pmi_pal);
39426 + pax_close_kernel();
39427 +#endif
39428 +
39429 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
39430 if (pmi_base[3]) {
39431 printk(KERN_INFO "vesafb: pmi: ports = ");
39432 @@ -488,6 +514,11 @@ static int __init vesafb_probe(struct platform_device *dev)
39433 info->node, info->fix.id);
39434 return 0;
39435 err:
39436 +
39437 +#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39438 + module_free_exec(NULL, pmi_code);
39439 +#endif
39440 +
39441 if (info->screen_base)
39442 iounmap(info->screen_base);
39443 framebuffer_release(info);
39444 diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
39445 index 88714ae..16c2e11 100644
39446 --- a/drivers/video/via/via_clock.h
39447 +++ b/drivers/video/via/via_clock.h
39448 @@ -56,7 +56,7 @@ struct via_clock {
39449
39450 void (*set_engine_pll_state)(u8 state);
39451 void (*set_engine_pll)(struct via_pll_config config);
39452 -};
39453 +} __no_const;
39454
39455
39456 static inline u32 get_pll_internal_frequency(u32 ref_freq,
39457 diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h
39458 index e56c934..fc22f4b 100644
39459 --- a/drivers/xen/xen-pciback/conf_space.h
39460 +++ b/drivers/xen/xen-pciback/conf_space.h
39461 @@ -44,15 +44,15 @@ struct config_field {
39462 struct {
39463 conf_dword_write write;
39464 conf_dword_read read;
39465 - } dw;
39466 + } __no_const dw;
39467 struct {
39468 conf_word_write write;
39469 conf_word_read read;
39470 - } w;
39471 + } __no_const w;
39472 struct {
39473 conf_byte_write write;
39474 conf_byte_read read;
39475 - } b;
39476 + } __no_const b;
39477 } u;
39478 struct list_head list;
39479 };
39480 diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
39481 index 879ed88..bc03a01 100644
39482 --- a/fs/9p/vfs_inode.c
39483 +++ b/fs/9p/vfs_inode.c
39484 @@ -1286,7 +1286,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
39485 void
39486 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
39487 {
39488 - char *s = nd_get_link(nd);
39489 + const char *s = nd_get_link(nd);
39490
39491 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
39492 IS_ERR(s) ? "<error>" : s);
39493 diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
39494 index 79e2ca7..5828ad1 100644
39495 --- a/fs/Kconfig.binfmt
39496 +++ b/fs/Kconfig.binfmt
39497 @@ -86,7 +86,7 @@ config HAVE_AOUT
39498
39499 config BINFMT_AOUT
39500 tristate "Kernel support for a.out and ECOFF binaries"
39501 - depends on HAVE_AOUT
39502 + depends on HAVE_AOUT && BROKEN
39503 ---help---
39504 A.out (Assembler.OUTput) is a set of formats for libraries and
39505 executables used in the earliest versions of UNIX. Linux used
39506 diff --git a/fs/aio.c b/fs/aio.c
39507 index 969beb0..09fab51 100644
39508 --- a/fs/aio.c
39509 +++ b/fs/aio.c
39510 @@ -119,7 +119,7 @@ static int aio_setup_ring(struct kioctx *ctx)
39511 size += sizeof(struct io_event) * nr_events;
39512 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
39513
39514 - if (nr_pages < 0)
39515 + if (nr_pages <= 0)
39516 return -EINVAL;
39517
39518 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
39519 @@ -1461,22 +1461,27 @@ static ssize_t aio_fsync(struct kiocb *iocb)
39520 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
39521 {
39522 ssize_t ret;
39523 + struct iovec iovstack;
39524
39525 #ifdef CONFIG_COMPAT
39526 if (compat)
39527 ret = compat_rw_copy_check_uvector(type,
39528 (struct compat_iovec __user *)kiocb->ki_buf,
39529 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
39530 + kiocb->ki_nbytes, 1, &iovstack,
39531 &kiocb->ki_iovec, 1);
39532 else
39533 #endif
39534 ret = rw_copy_check_uvector(type,
39535 (struct iovec __user *)kiocb->ki_buf,
39536 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
39537 + kiocb->ki_nbytes, 1, &iovstack,
39538 &kiocb->ki_iovec, 1);
39539 if (ret < 0)
39540 goto out;
39541
39542 + if (kiocb->ki_iovec == &iovstack) {
39543 + kiocb->ki_inline_vec = iovstack;
39544 + kiocb->ki_iovec = &kiocb->ki_inline_vec;
39545 + }
39546 kiocb->ki_nr_segs = kiocb->ki_nbytes;
39547 kiocb->ki_cur_seg = 0;
39548 /* ki_nbytes/left now reflect bytes instead of segs */
39549 diff --git a/fs/attr.c b/fs/attr.c
39550 index 7ee7ba4..0c61a60 100644
39551 --- a/fs/attr.c
39552 +++ b/fs/attr.c
39553 @@ -99,6 +99,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
39554 unsigned long limit;
39555
39556 limit = rlimit(RLIMIT_FSIZE);
39557 + gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
39558 if (limit != RLIM_INFINITY && offset > limit)
39559 goto out_sig;
39560 if (offset > inode->i_sb->s_maxbytes)
39561 diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
39562 index e1fbdee..cd5ea56 100644
39563 --- a/fs/autofs4/waitq.c
39564 +++ b/fs/autofs4/waitq.c
39565 @@ -60,7 +60,7 @@ static int autofs4_write(struct file *file, const void *addr, int bytes)
39566 {
39567 unsigned long sigpipe, flags;
39568 mm_segment_t fs;
39569 - const char *data = (const char *)addr;
39570 + const char __user *data = (const char __force_user *)addr;
39571 ssize_t wr = 0;
39572
39573 /** WARNING: this is not safe for writing more than PIPE_BUF bytes! **/
39574 diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
39575 index 8342ca6..82fd192 100644
39576 --- a/fs/befs/linuxvfs.c
39577 +++ b/fs/befs/linuxvfs.c
39578 @@ -503,7 +503,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
39579 {
39580 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
39581 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
39582 - char *link = nd_get_link(nd);
39583 + const char *link = nd_get_link(nd);
39584 if (!IS_ERR(link))
39585 kfree(link);
39586 }
39587 diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
39588 index a6395bd..a5b24c4 100644
39589 --- a/fs/binfmt_aout.c
39590 +++ b/fs/binfmt_aout.c
39591 @@ -16,6 +16,7 @@
39592 #include <linux/string.h>
39593 #include <linux/fs.h>
39594 #include <linux/file.h>
39595 +#include <linux/security.h>
39596 #include <linux/stat.h>
39597 #include <linux/fcntl.h>
39598 #include <linux/ptrace.h>
39599 @@ -86,6 +87,8 @@ static int aout_core_dump(struct coredump_params *cprm)
39600 #endif
39601 # define START_STACK(u) ((void __user *)u.start_stack)
39602
39603 + memset(&dump, 0, sizeof(dump));
39604 +
39605 fs = get_fs();
39606 set_fs(KERNEL_DS);
39607 has_dumped = 1;
39608 @@ -97,10 +100,12 @@ static int aout_core_dump(struct coredump_params *cprm)
39609
39610 /* If the size of the dump file exceeds the rlimit, then see what would happen
39611 if we wrote the stack, but not the data area. */
39612 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
39613 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
39614 dump.u_dsize = 0;
39615
39616 /* Make sure we have enough room to write the stack and data areas. */
39617 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
39618 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
39619 dump.u_ssize = 0;
39620
39621 @@ -234,6 +239,8 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
39622 rlim = rlimit(RLIMIT_DATA);
39623 if (rlim >= RLIM_INFINITY)
39624 rlim = ~0;
39625 +
39626 + gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
39627 if (ex.a_data + ex.a_bss > rlim)
39628 return -ENOMEM;
39629
39630 @@ -262,6 +269,27 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
39631 install_exec_creds(bprm);
39632 current->flags &= ~PF_FORKNOEXEC;
39633
39634 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
39635 + current->mm->pax_flags = 0UL;
39636 +#endif
39637 +
39638 +#ifdef CONFIG_PAX_PAGEEXEC
39639 + if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
39640 + current->mm->pax_flags |= MF_PAX_PAGEEXEC;
39641 +
39642 +#ifdef CONFIG_PAX_EMUTRAMP
39643 + if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
39644 + current->mm->pax_flags |= MF_PAX_EMUTRAMP;
39645 +#endif
39646 +
39647 +#ifdef CONFIG_PAX_MPROTECT
39648 + if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
39649 + current->mm->pax_flags |= MF_PAX_MPROTECT;
39650 +#endif
39651 +
39652 + }
39653 +#endif
39654 +
39655 if (N_MAGIC(ex) == OMAGIC) {
39656 unsigned long text_addr, map_size;
39657 loff_t pos;
39658 @@ -334,7 +362,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
39659
39660 down_write(&current->mm->mmap_sem);
39661 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
39662 - PROT_READ | PROT_WRITE | PROT_EXEC,
39663 + PROT_READ | PROT_WRITE,
39664 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
39665 fd_offset + ex.a_text);
39666 up_write(&current->mm->mmap_sem);
39667 diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
39668 index 21ac5ee..c1090ea 100644
39669 --- a/fs/binfmt_elf.c
39670 +++ b/fs/binfmt_elf.c
39671 @@ -32,6 +32,7 @@
39672 #include <linux/elf.h>
39673 #include <linux/utsname.h>
39674 #include <linux/coredump.h>
39675 +#include <linux/xattr.h>
39676 #include <asm/uaccess.h>
39677 #include <asm/param.h>
39678 #include <asm/page.h>
39679 @@ -51,6 +52,10 @@ static int elf_core_dump(struct coredump_params *cprm);
39680 #define elf_core_dump NULL
39681 #endif
39682
39683 +#ifdef CONFIG_PAX_MPROTECT
39684 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
39685 +#endif
39686 +
39687 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
39688 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
39689 #else
39690 @@ -70,6 +75,11 @@ static struct linux_binfmt elf_format = {
39691 .load_binary = load_elf_binary,
39692 .load_shlib = load_elf_library,
39693 .core_dump = elf_core_dump,
39694 +
39695 +#ifdef CONFIG_PAX_MPROTECT
39696 + .handle_mprotect= elf_handle_mprotect,
39697 +#endif
39698 +
39699 .min_coredump = ELF_EXEC_PAGESIZE,
39700 };
39701
39702 @@ -77,6 +87,8 @@ static struct linux_binfmt elf_format = {
39703
39704 static int set_brk(unsigned long start, unsigned long end)
39705 {
39706 + unsigned long e = end;
39707 +
39708 start = ELF_PAGEALIGN(start);
39709 end = ELF_PAGEALIGN(end);
39710 if (end > start) {
39711 @@ -87,7 +99,7 @@ static int set_brk(unsigned long start, unsigned long end)
39712 if (BAD_ADDR(addr))
39713 return addr;
39714 }
39715 - current->mm->start_brk = current->mm->brk = end;
39716 + current->mm->start_brk = current->mm->brk = e;
39717 return 0;
39718 }
39719
39720 @@ -148,12 +160,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
39721 elf_addr_t __user *u_rand_bytes;
39722 const char *k_platform = ELF_PLATFORM;
39723 const char *k_base_platform = ELF_BASE_PLATFORM;
39724 - unsigned char k_rand_bytes[16];
39725 + u32 k_rand_bytes[4];
39726 int items;
39727 elf_addr_t *elf_info;
39728 int ei_index = 0;
39729 const struct cred *cred = current_cred();
39730 struct vm_area_struct *vma;
39731 + unsigned long saved_auxv[AT_VECTOR_SIZE];
39732
39733 /*
39734 * In some cases (e.g. Hyper-Threading), we want to avoid L1
39735 @@ -195,8 +208,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
39736 * Generate 16 random bytes for userspace PRNG seeding.
39737 */
39738 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
39739 - u_rand_bytes = (elf_addr_t __user *)
39740 - STACK_ALLOC(p, sizeof(k_rand_bytes));
39741 + srandom32(k_rand_bytes[0] ^ random32());
39742 + srandom32(k_rand_bytes[1] ^ random32());
39743 + srandom32(k_rand_bytes[2] ^ random32());
39744 + srandom32(k_rand_bytes[3] ^ random32());
39745 + p = STACK_ROUND(p, sizeof(k_rand_bytes));
39746 + u_rand_bytes = (elf_addr_t __user *) p;
39747 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
39748 return -EFAULT;
39749
39750 @@ -308,9 +325,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
39751 return -EFAULT;
39752 current->mm->env_end = p;
39753
39754 + memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
39755 +
39756 /* Put the elf_info on the stack in the right place. */
39757 sp = (elf_addr_t __user *)envp + 1;
39758 - if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
39759 + if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
39760 return -EFAULT;
39761 return 0;
39762 }
39763 @@ -381,10 +400,10 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
39764 {
39765 struct elf_phdr *elf_phdata;
39766 struct elf_phdr *eppnt;
39767 - unsigned long load_addr = 0;
39768 + unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
39769 int load_addr_set = 0;
39770 unsigned long last_bss = 0, elf_bss = 0;
39771 - unsigned long error = ~0UL;
39772 + unsigned long error = -EINVAL;
39773 unsigned long total_size;
39774 int retval, i, size;
39775
39776 @@ -430,6 +449,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
39777 goto out_close;
39778 }
39779
39780 +#ifdef CONFIG_PAX_SEGMEXEC
39781 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
39782 + pax_task_size = SEGMEXEC_TASK_SIZE;
39783 +#endif
39784 +
39785 eppnt = elf_phdata;
39786 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
39787 if (eppnt->p_type == PT_LOAD) {
39788 @@ -473,8 +497,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
39789 k = load_addr + eppnt->p_vaddr;
39790 if (BAD_ADDR(k) ||
39791 eppnt->p_filesz > eppnt->p_memsz ||
39792 - eppnt->p_memsz > TASK_SIZE ||
39793 - TASK_SIZE - eppnt->p_memsz < k) {
39794 + eppnt->p_memsz > pax_task_size ||
39795 + pax_task_size - eppnt->p_memsz < k) {
39796 error = -ENOMEM;
39797 goto out_close;
39798 }
39799 @@ -528,6 +552,348 @@ out:
39800 return error;
39801 }
39802
39803 +static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
39804 +{
39805 + unsigned long pax_flags = 0UL;
39806 +
39807 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
39808 +
39809 +#ifdef CONFIG_PAX_PAGEEXEC
39810 + if (elf_phdata->p_flags & PF_PAGEEXEC)
39811 + pax_flags |= MF_PAX_PAGEEXEC;
39812 +#endif
39813 +
39814 +#ifdef CONFIG_PAX_SEGMEXEC
39815 + if (elf_phdata->p_flags & PF_SEGMEXEC)
39816 + pax_flags |= MF_PAX_SEGMEXEC;
39817 +#endif
39818 +
39819 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39820 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39821 + if ((__supported_pte_mask & _PAGE_NX))
39822 + pax_flags &= ~MF_PAX_SEGMEXEC;
39823 + else
39824 + pax_flags &= ~MF_PAX_PAGEEXEC;
39825 + }
39826 +#endif
39827 +
39828 +#ifdef CONFIG_PAX_EMUTRAMP
39829 + if (elf_phdata->p_flags & PF_EMUTRAMP)
39830 + pax_flags |= MF_PAX_EMUTRAMP;
39831 +#endif
39832 +
39833 +#ifdef CONFIG_PAX_MPROTECT
39834 + if (elf_phdata->p_flags & PF_MPROTECT)
39835 + pax_flags |= MF_PAX_MPROTECT;
39836 +#endif
39837 +
39838 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
39839 + if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
39840 + pax_flags |= MF_PAX_RANDMMAP;
39841 +#endif
39842 +
39843 +#endif
39844 +
39845 + return pax_flags;
39846 +}
39847 +
39848 +static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
39849 +{
39850 + unsigned long pax_flags = 0UL;
39851 +
39852 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
39853 +
39854 +#ifdef CONFIG_PAX_PAGEEXEC
39855 + if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
39856 + pax_flags |= MF_PAX_PAGEEXEC;
39857 +#endif
39858 +
39859 +#ifdef CONFIG_PAX_SEGMEXEC
39860 + if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
39861 + pax_flags |= MF_PAX_SEGMEXEC;
39862 +#endif
39863 +
39864 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39865 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39866 + if ((__supported_pte_mask & _PAGE_NX))
39867 + pax_flags &= ~MF_PAX_SEGMEXEC;
39868 + else
39869 + pax_flags &= ~MF_PAX_PAGEEXEC;
39870 + }
39871 +#endif
39872 +
39873 +#ifdef CONFIG_PAX_EMUTRAMP
39874 + if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
39875 + pax_flags |= MF_PAX_EMUTRAMP;
39876 +#endif
39877 +
39878 +#ifdef CONFIG_PAX_MPROTECT
39879 + if (!(elf_phdata->p_flags & PF_NOMPROTECT))
39880 + pax_flags |= MF_PAX_MPROTECT;
39881 +#endif
39882 +
39883 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
39884 + if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
39885 + pax_flags |= MF_PAX_RANDMMAP;
39886 +#endif
39887 +
39888 +#endif
39889 +
39890 + return pax_flags;
39891 +}
39892 +
39893 +static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
39894 +{
39895 + unsigned long pax_flags = 0UL;
39896 +
39897 +#ifdef CONFIG_PAX_EI_PAX
39898 +
39899 +#ifdef CONFIG_PAX_PAGEEXEC
39900 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
39901 + pax_flags |= MF_PAX_PAGEEXEC;
39902 +#endif
39903 +
39904 +#ifdef CONFIG_PAX_SEGMEXEC
39905 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
39906 + pax_flags |= MF_PAX_SEGMEXEC;
39907 +#endif
39908 +
39909 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39910 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39911 + if ((__supported_pte_mask & _PAGE_NX))
39912 + pax_flags &= ~MF_PAX_SEGMEXEC;
39913 + else
39914 + pax_flags &= ~MF_PAX_PAGEEXEC;
39915 + }
39916 +#endif
39917 +
39918 +#ifdef CONFIG_PAX_EMUTRAMP
39919 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
39920 + pax_flags |= MF_PAX_EMUTRAMP;
39921 +#endif
39922 +
39923 +#ifdef CONFIG_PAX_MPROTECT
39924 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
39925 + pax_flags |= MF_PAX_MPROTECT;
39926 +#endif
39927 +
39928 +#ifdef CONFIG_PAX_ASLR
39929 + if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
39930 + pax_flags |= MF_PAX_RANDMMAP;
39931 +#endif
39932 +
39933 +#else
39934 +
39935 +#ifdef CONFIG_PAX_PAGEEXEC
39936 + pax_flags |= MF_PAX_PAGEEXEC;
39937 +#endif
39938 +
39939 +#ifdef CONFIG_PAX_MPROTECT
39940 + pax_flags |= MF_PAX_MPROTECT;
39941 +#endif
39942 +
39943 +#ifdef CONFIG_PAX_RANDMMAP
39944 + pax_flags |= MF_PAX_RANDMMAP;
39945 +#endif
39946 +
39947 +#ifdef CONFIG_PAX_SEGMEXEC
39948 + if (!(__supported_pte_mask & _PAGE_NX)) {
39949 + pax_flags &= ~MF_PAX_PAGEEXEC;
39950 + pax_flags |= MF_PAX_SEGMEXEC;
39951 + }
39952 +#endif
39953 +
39954 +#endif
39955 +
39956 + return pax_flags;
39957 +}
39958 +
39959 +static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
39960 +{
39961 +
39962 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
39963 + unsigned long i;
39964 +
39965 + for (i = 0UL; i < elf_ex->e_phnum; i++)
39966 + if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
39967 + if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
39968 + ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
39969 + ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
39970 + ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
39971 + ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
39972 + return ~0UL;
39973 +
39974 +#ifdef CONFIG_PAX_SOFTMODE
39975 + if (pax_softmode)
39976 + return pax_parse_pt_pax_softmode(&elf_phdata[i]);
39977 + else
39978 +#endif
39979 +
39980 + return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
39981 + break;
39982 + }
39983 +#endif
39984 +
39985 + return ~0UL;
39986 +}
39987 +
39988 +static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
39989 +{
39990 + unsigned long pax_flags = 0UL;
39991 +
39992 +#ifdef CONFIG_PAX_PAGEEXEC
39993 + if (pax_flags_softmode & MF_PAX_PAGEEXEC)
39994 + pax_flags |= MF_PAX_PAGEEXEC;
39995 +#endif
39996 +
39997 +#ifdef CONFIG_PAX_SEGMEXEC
39998 + if (pax_flags_softmode & MF_PAX_SEGMEXEC)
39999 + pax_flags |= MF_PAX_SEGMEXEC;
40000 +#endif
40001 +
40002 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
40003 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40004 + if ((__supported_pte_mask & _PAGE_NX))
40005 + pax_flags &= ~MF_PAX_SEGMEXEC;
40006 + else
40007 + pax_flags &= ~MF_PAX_PAGEEXEC;
40008 + }
40009 +#endif
40010 +
40011 +#ifdef CONFIG_PAX_EMUTRAMP
40012 + if (pax_flags_softmode & MF_PAX_EMUTRAMP)
40013 + pax_flags |= MF_PAX_EMUTRAMP;
40014 +#endif
40015 +
40016 +#ifdef CONFIG_PAX_MPROTECT
40017 + if (pax_flags_softmode & MF_PAX_MPROTECT)
40018 + pax_flags |= MF_PAX_MPROTECT;
40019 +#endif
40020 +
40021 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
40022 + if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
40023 + pax_flags |= MF_PAX_RANDMMAP;
40024 +#endif
40025 +
40026 + return pax_flags;
40027 +}
40028 +
40029 +static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
40030 +{
40031 + unsigned long pax_flags = 0UL;
40032 +
40033 +#ifdef CONFIG_PAX_PAGEEXEC
40034 + if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
40035 + pax_flags |= MF_PAX_PAGEEXEC;
40036 +#endif
40037 +
40038 +#ifdef CONFIG_PAX_SEGMEXEC
40039 + if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
40040 + pax_flags |= MF_PAX_SEGMEXEC;
40041 +#endif
40042 +
40043 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
40044 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40045 + if ((__supported_pte_mask & _PAGE_NX))
40046 + pax_flags &= ~MF_PAX_SEGMEXEC;
40047 + else
40048 + pax_flags &= ~MF_PAX_PAGEEXEC;
40049 + }
40050 +#endif
40051 +
40052 +#ifdef CONFIG_PAX_EMUTRAMP
40053 + if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
40054 + pax_flags |= MF_PAX_EMUTRAMP;
40055 +#endif
40056 +
40057 +#ifdef CONFIG_PAX_MPROTECT
40058 + if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
40059 + pax_flags |= MF_PAX_MPROTECT;
40060 +#endif
40061 +
40062 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
40063 + if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
40064 + pax_flags |= MF_PAX_RANDMMAP;
40065 +#endif
40066 +
40067 + return pax_flags;
40068 +}
40069 +
40070 +static unsigned long pax_parse_xattr_pax(struct file * const file)
40071 +{
40072 +
40073 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
40074 + ssize_t xattr_size, i;
40075 + unsigned char xattr_value[5];
40076 + unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
40077 +
40078 + xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
40079 + if (xattr_size <= 0)
40080 + return ~0UL;
40081 +
40082 + for (i = 0; i < xattr_size; i++)
40083 + switch (xattr_value[i]) {
40084 + default:
40085 + return ~0UL;
40086 +
40087 +#define parse_flag(option1, option2, flag) \
40088 + case option1: \
40089 + pax_flags_hardmode |= MF_PAX_##flag; \
40090 + break; \
40091 + case option2: \
40092 + pax_flags_softmode |= MF_PAX_##flag; \
40093 + break;
40094 +
40095 + parse_flag('p', 'P', PAGEEXEC);
40096 + parse_flag('e', 'E', EMUTRAMP);
40097 + parse_flag('m', 'M', MPROTECT);
40098 + parse_flag('r', 'R', RANDMMAP);
40099 + parse_flag('s', 'S', SEGMEXEC);
40100 +
40101 +#undef parse_flag
40102 + }
40103 +
40104 + if (pax_flags_hardmode & pax_flags_softmode)
40105 + return ~0UL;
40106 +
40107 +#ifdef CONFIG_PAX_SOFTMODE
40108 + if (pax_softmode)
40109 + return pax_parse_xattr_pax_softmode(pax_flags_softmode);
40110 + else
40111 +#endif
40112 +
40113 + return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
40114 +#else
40115 + return ~0UL;
40116 +#endif
40117 +}
40118 +
40119 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
40120 +static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
40121 +{
40122 + unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
40123 +
40124 + pax_flags = pax_parse_ei_pax(elf_ex);
40125 + pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
40126 + xattr_pax_flags = pax_parse_xattr_pax(file);
40127 +
40128 + if (pt_pax_flags == ~0UL)
40129 + pt_pax_flags = xattr_pax_flags;
40130 + else if (xattr_pax_flags == ~0UL)
40131 + xattr_pax_flags = pt_pax_flags;
40132 + if (pt_pax_flags != xattr_pax_flags)
40133 + return -EINVAL;
40134 + if (pt_pax_flags != ~0UL)
40135 + pax_flags = pt_pax_flags;
40136 +
40137 + if (0 > pax_check_flags(&pax_flags))
40138 + return -EINVAL;
40139 +
40140 + current->mm->pax_flags = pax_flags;
40141 + return 0;
40142 +}
40143 +#endif
40144 +
40145 /*
40146 * These are the functions used to load ELF style executables and shared
40147 * libraries. There is no binary dependent code anywhere else.
40148 @@ -544,6 +910,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
40149 {
40150 unsigned int random_variable = 0;
40151
40152 +#ifdef CONFIG_PAX_RANDUSTACK
40153 + if (randomize_va_space)
40154 + return stack_top - current->mm->delta_stack;
40155 +#endif
40156 +
40157 if ((current->flags & PF_RANDOMIZE) &&
40158 !(current->personality & ADDR_NO_RANDOMIZE)) {
40159 random_variable = get_random_int() & STACK_RND_MASK;
40160 @@ -562,7 +933,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40161 unsigned long load_addr = 0, load_bias = 0;
40162 int load_addr_set = 0;
40163 char * elf_interpreter = NULL;
40164 - unsigned long error;
40165 + unsigned long error = 0;
40166 struct elf_phdr *elf_ppnt, *elf_phdata;
40167 unsigned long elf_bss, elf_brk;
40168 int retval, i;
40169 @@ -572,11 +943,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40170 unsigned long start_code, end_code, start_data, end_data;
40171 unsigned long reloc_func_desc __maybe_unused = 0;
40172 int executable_stack = EXSTACK_DEFAULT;
40173 - unsigned long def_flags = 0;
40174 struct {
40175 struct elfhdr elf_ex;
40176 struct elfhdr interp_elf_ex;
40177 } *loc;
40178 + unsigned long pax_task_size = TASK_SIZE;
40179
40180 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
40181 if (!loc) {
40182 @@ -713,11 +1084,81 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40183
40184 /* OK, This is the point of no return */
40185 current->flags &= ~PF_FORKNOEXEC;
40186 - current->mm->def_flags = def_flags;
40187 +
40188 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
40189 + current->mm->pax_flags = 0UL;
40190 +#endif
40191 +
40192 +#ifdef CONFIG_PAX_DLRESOLVE
40193 + current->mm->call_dl_resolve = 0UL;
40194 +#endif
40195 +
40196 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
40197 + current->mm->call_syscall = 0UL;
40198 +#endif
40199 +
40200 +#ifdef CONFIG_PAX_ASLR
40201 + current->mm->delta_mmap = 0UL;
40202 + current->mm->delta_stack = 0UL;
40203 +#endif
40204 +
40205 + current->mm->def_flags = 0;
40206 +
40207 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
40208 + if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
40209 + send_sig(SIGKILL, current, 0);
40210 + goto out_free_dentry;
40211 + }
40212 +#endif
40213 +
40214 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
40215 + pax_set_initial_flags(bprm);
40216 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
40217 + if (pax_set_initial_flags_func)
40218 + (pax_set_initial_flags_func)(bprm);
40219 +#endif
40220 +
40221 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
40222 + if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
40223 + current->mm->context.user_cs_limit = PAGE_SIZE;
40224 + current->mm->def_flags |= VM_PAGEEXEC;
40225 + }
40226 +#endif
40227 +
40228 +#ifdef CONFIG_PAX_SEGMEXEC
40229 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
40230 + current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
40231 + current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
40232 + pax_task_size = SEGMEXEC_TASK_SIZE;
40233 + current->mm->def_flags |= VM_NOHUGEPAGE;
40234 + }
40235 +#endif
40236 +
40237 +#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
40238 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40239 + set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
40240 + put_cpu();
40241 + }
40242 +#endif
40243
40244 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
40245 may depend on the personality. */
40246 SET_PERSONALITY(loc->elf_ex);
40247 +
40248 +#ifdef CONFIG_PAX_ASLR
40249 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
40250 + current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
40251 + current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
40252 + }
40253 +#endif
40254 +
40255 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
40256 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40257 + executable_stack = EXSTACK_DISABLE_X;
40258 + current->personality &= ~READ_IMPLIES_EXEC;
40259 + } else
40260 +#endif
40261 +
40262 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
40263 current->personality |= READ_IMPLIES_EXEC;
40264
40265 @@ -808,6 +1249,20 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40266 #else
40267 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
40268 #endif
40269 +
40270 +#ifdef CONFIG_PAX_RANDMMAP
40271 + /* PaX: randomize base address at the default exe base if requested */
40272 + if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
40273 +#ifdef CONFIG_SPARC64
40274 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
40275 +#else
40276 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
40277 +#endif
40278 + load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
40279 + elf_flags |= MAP_FIXED;
40280 + }
40281 +#endif
40282 +
40283 }
40284
40285 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
40286 @@ -840,9 +1295,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40287 * allowed task size. Note that p_filesz must always be
40288 * <= p_memsz so it is only necessary to check p_memsz.
40289 */
40290 - if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
40291 - elf_ppnt->p_memsz > TASK_SIZE ||
40292 - TASK_SIZE - elf_ppnt->p_memsz < k) {
40293 + if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
40294 + elf_ppnt->p_memsz > pax_task_size ||
40295 + pax_task_size - elf_ppnt->p_memsz < k) {
40296 /* set_brk can never work. Avoid overflows. */
40297 send_sig(SIGKILL, current, 0);
40298 retval = -EINVAL;
40299 @@ -870,6 +1325,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40300 start_data += load_bias;
40301 end_data += load_bias;
40302
40303 +#ifdef CONFIG_PAX_RANDMMAP
40304 + if (current->mm->pax_flags & MF_PAX_RANDMMAP)
40305 + elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
40306 +#endif
40307 +
40308 /* Calling set_brk effectively mmaps the pages that we need
40309 * for the bss and break sections. We must do this before
40310 * mapping in the interpreter, to make sure it doesn't wind
40311 @@ -881,9 +1341,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40312 goto out_free_dentry;
40313 }
40314 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
40315 - send_sig(SIGSEGV, current, 0);
40316 - retval = -EFAULT; /* Nobody gets to see this, but.. */
40317 - goto out_free_dentry;
40318 + /*
40319 + * This bss-zeroing can fail if the ELF
40320 + * file specifies odd protections. So
40321 + * we don't check the return value
40322 + */
40323 }
40324
40325 if (elf_interpreter) {
40326 @@ -1098,7 +1560,7 @@ out:
40327 * Decide what to dump of a segment, part, all or none.
40328 */
40329 static unsigned long vma_dump_size(struct vm_area_struct *vma,
40330 - unsigned long mm_flags)
40331 + unsigned long mm_flags, long signr)
40332 {
40333 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
40334
40335 @@ -1132,7 +1594,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
40336 if (vma->vm_file == NULL)
40337 return 0;
40338
40339 - if (FILTER(MAPPED_PRIVATE))
40340 + if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
40341 goto whole;
40342
40343 /*
40344 @@ -1354,9 +1816,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
40345 {
40346 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
40347 int i = 0;
40348 - do
40349 + do {
40350 i += 2;
40351 - while (auxv[i - 2] != AT_NULL);
40352 + } while (auxv[i - 2] != AT_NULL);
40353 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
40354 }
40355
40356 @@ -1862,14 +2324,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
40357 }
40358
40359 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
40360 - unsigned long mm_flags)
40361 + struct coredump_params *cprm)
40362 {
40363 struct vm_area_struct *vma;
40364 size_t size = 0;
40365
40366 for (vma = first_vma(current, gate_vma); vma != NULL;
40367 vma = next_vma(vma, gate_vma))
40368 - size += vma_dump_size(vma, mm_flags);
40369 + size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
40370 return size;
40371 }
40372
40373 @@ -1963,7 +2425,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40374
40375 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
40376
40377 - offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
40378 + offset += elf_core_vma_data_size(gate_vma, cprm);
40379 offset += elf_core_extra_data_size();
40380 e_shoff = offset;
40381
40382 @@ -1977,10 +2439,12 @@ static int elf_core_dump(struct coredump_params *cprm)
40383 offset = dataoff;
40384
40385 size += sizeof(*elf);
40386 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
40387 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
40388 goto end_coredump;
40389
40390 size += sizeof(*phdr4note);
40391 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
40392 if (size > cprm->limit
40393 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
40394 goto end_coredump;
40395 @@ -1994,7 +2458,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40396 phdr.p_offset = offset;
40397 phdr.p_vaddr = vma->vm_start;
40398 phdr.p_paddr = 0;
40399 - phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
40400 + phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
40401 phdr.p_memsz = vma->vm_end - vma->vm_start;
40402 offset += phdr.p_filesz;
40403 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
40404 @@ -2005,6 +2469,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40405 phdr.p_align = ELF_EXEC_PAGESIZE;
40406
40407 size += sizeof(phdr);
40408 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
40409 if (size > cprm->limit
40410 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
40411 goto end_coredump;
40412 @@ -2029,7 +2494,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40413 unsigned long addr;
40414 unsigned long end;
40415
40416 - end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
40417 + end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
40418
40419 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
40420 struct page *page;
40421 @@ -2038,6 +2503,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40422 page = get_dump_page(addr);
40423 if (page) {
40424 void *kaddr = kmap(page);
40425 + gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
40426 stop = ((size += PAGE_SIZE) > cprm->limit) ||
40427 !dump_write(cprm->file, kaddr,
40428 PAGE_SIZE);
40429 @@ -2055,6 +2521,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40430
40431 if (e_phnum == PN_XNUM) {
40432 size += sizeof(*shdr4extnum);
40433 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
40434 if (size > cprm->limit
40435 || !dump_write(cprm->file, shdr4extnum,
40436 sizeof(*shdr4extnum)))
40437 @@ -2075,6 +2542,97 @@ out:
40438
40439 #endif /* CONFIG_ELF_CORE */
40440
40441 +#ifdef CONFIG_PAX_MPROTECT
40442 +/* PaX: non-PIC ELF libraries need relocations on their executable segments
40443 + * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
40444 + * we'll remove VM_MAYWRITE for good on RELRO segments.
40445 + *
40446 + * The checks favour ld-linux.so behaviour which operates on a per ELF segment
40447 + * basis because we want to allow the common case and not the special ones.
40448 + */
40449 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
40450 +{
40451 + struct elfhdr elf_h;
40452 + struct elf_phdr elf_p;
40453 + unsigned long i;
40454 + unsigned long oldflags;
40455 + bool is_textrel_rw, is_textrel_rx, is_relro;
40456 +
40457 + if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
40458 + return;
40459 +
40460 + oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
40461 + newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
40462 +
40463 +#ifdef CONFIG_PAX_ELFRELOCS
40464 + /* possible TEXTREL */
40465 + is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
40466 + is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
40467 +#else
40468 + is_textrel_rw = false;
40469 + is_textrel_rx = false;
40470 +#endif
40471 +
40472 + /* possible RELRO */
40473 + is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
40474 +
40475 + if (!is_textrel_rw && !is_textrel_rx && !is_relro)
40476 + return;
40477 +
40478 + if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
40479 + memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
40480 +
40481 +#ifdef CONFIG_PAX_ETEXECRELOCS
40482 + ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
40483 +#else
40484 + ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
40485 +#endif
40486 +
40487 + (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
40488 + !elf_check_arch(&elf_h) ||
40489 + elf_h.e_phentsize != sizeof(struct elf_phdr) ||
40490 + elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
40491 + return;
40492 +
40493 + for (i = 0UL; i < elf_h.e_phnum; i++) {
40494 + if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
40495 + return;
40496 + switch (elf_p.p_type) {
40497 + case PT_DYNAMIC:
40498 + if (!is_textrel_rw && !is_textrel_rx)
40499 + continue;
40500 + i = 0UL;
40501 + while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
40502 + elf_dyn dyn;
40503 +
40504 + if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
40505 + return;
40506 + if (dyn.d_tag == DT_NULL)
40507 + return;
40508 + if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
40509 + gr_log_textrel(vma);
40510 + if (is_textrel_rw)
40511 + vma->vm_flags |= VM_MAYWRITE;
40512 + else
40513 + /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
40514 + vma->vm_flags &= ~VM_MAYWRITE;
40515 + return;
40516 + }
40517 + i++;
40518 + }
40519 + return;
40520 +
40521 + case PT_GNU_RELRO:
40522 + if (!is_relro)
40523 + continue;
40524 + if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
40525 + vma->vm_flags &= ~VM_MAYWRITE;
40526 + return;
40527 + }
40528 + }
40529 +}
40530 +#endif
40531 +
40532 static int __init init_elf_binfmt(void)
40533 {
40534 return register_binfmt(&elf_format);
40535 diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
40536 index 1bffbe0..c8c283e 100644
40537 --- a/fs/binfmt_flat.c
40538 +++ b/fs/binfmt_flat.c
40539 @@ -567,7 +567,9 @@ static int load_flat_file(struct linux_binprm * bprm,
40540 realdatastart = (unsigned long) -ENOMEM;
40541 printk("Unable to allocate RAM for process data, errno %d\n",
40542 (int)-realdatastart);
40543 + down_write(&current->mm->mmap_sem);
40544 do_munmap(current->mm, textpos, text_len);
40545 + up_write(&current->mm->mmap_sem);
40546 ret = realdatastart;
40547 goto err;
40548 }
40549 @@ -591,8 +593,10 @@ static int load_flat_file(struct linux_binprm * bprm,
40550 }
40551 if (IS_ERR_VALUE(result)) {
40552 printk("Unable to read data+bss, errno %d\n", (int)-result);
40553 + down_write(&current->mm->mmap_sem);
40554 do_munmap(current->mm, textpos, text_len);
40555 do_munmap(current->mm, realdatastart, len);
40556 + up_write(&current->mm->mmap_sem);
40557 ret = result;
40558 goto err;
40559 }
40560 @@ -661,8 +665,10 @@ static int load_flat_file(struct linux_binprm * bprm,
40561 }
40562 if (IS_ERR_VALUE(result)) {
40563 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
40564 + down_write(&current->mm->mmap_sem);
40565 do_munmap(current->mm, textpos, text_len + data_len + extra +
40566 MAX_SHARED_LIBS * sizeof(unsigned long));
40567 + up_write(&current->mm->mmap_sem);
40568 ret = result;
40569 goto err;
40570 }
40571 diff --git a/fs/bio.c b/fs/bio.c
40572 index b1fe82c..84da0a9 100644
40573 --- a/fs/bio.c
40574 +++ b/fs/bio.c
40575 @@ -1233,7 +1233,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
40576 const int read = bio_data_dir(bio) == READ;
40577 struct bio_map_data *bmd = bio->bi_private;
40578 int i;
40579 - char *p = bmd->sgvecs[0].iov_base;
40580 + char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
40581
40582 __bio_for_each_segment(bvec, bio, i, 0) {
40583 char *addr = page_address(bvec->bv_page);
40584 diff --git a/fs/block_dev.c b/fs/block_dev.c
40585 index b07f1da..9efcb92 100644
40586 --- a/fs/block_dev.c
40587 +++ b/fs/block_dev.c
40588 @@ -681,7 +681,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
40589 else if (bdev->bd_contains == bdev)
40590 return true; /* is a whole device which isn't held */
40591
40592 - else if (whole->bd_holder == bd_may_claim)
40593 + else if (whole->bd_holder == (void *)bd_may_claim)
40594 return true; /* is a partition of a device that is being partitioned */
40595 else if (whole->bd_holder != NULL)
40596 return false; /* is a partition of a held device */
40597 diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
40598 index dede441..f2a2507 100644
40599 --- a/fs/btrfs/ctree.c
40600 +++ b/fs/btrfs/ctree.c
40601 @@ -488,9 +488,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
40602 free_extent_buffer(buf);
40603 add_root_to_dirty_list(root);
40604 } else {
40605 - if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
40606 - parent_start = parent->start;
40607 - else
40608 + if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
40609 + if (parent)
40610 + parent_start = parent->start;
40611 + else
40612 + parent_start = 0;
40613 + } else
40614 parent_start = 0;
40615
40616 WARN_ON(trans->transid != btrfs_header_generation(parent));
40617 diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
40618 index fd1a06d..6e9033d 100644
40619 --- a/fs/btrfs/inode.c
40620 +++ b/fs/btrfs/inode.c
40621 @@ -6895,7 +6895,7 @@ fail:
40622 return -ENOMEM;
40623 }
40624
40625 -static int btrfs_getattr(struct vfsmount *mnt,
40626 +int btrfs_getattr(struct vfsmount *mnt,
40627 struct dentry *dentry, struct kstat *stat)
40628 {
40629 struct inode *inode = dentry->d_inode;
40630 @@ -6909,6 +6909,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
40631 return 0;
40632 }
40633
40634 +EXPORT_SYMBOL(btrfs_getattr);
40635 +
40636 +dev_t get_btrfs_dev_from_inode(struct inode *inode)
40637 +{
40638 + return BTRFS_I(inode)->root->anon_dev;
40639 +}
40640 +EXPORT_SYMBOL(get_btrfs_dev_from_inode);
40641 +
40642 /*
40643 * If a file is moved, it will inherit the cow and compression flags of the new
40644 * directory.
40645 diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
40646 index c04f02c..f5c9e2e 100644
40647 --- a/fs/btrfs/ioctl.c
40648 +++ b/fs/btrfs/ioctl.c
40649 @@ -2733,9 +2733,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
40650 for (i = 0; i < num_types; i++) {
40651 struct btrfs_space_info *tmp;
40652
40653 + /* Don't copy in more than we allocated */
40654 if (!slot_count)
40655 break;
40656
40657 + slot_count--;
40658 +
40659 info = NULL;
40660 rcu_read_lock();
40661 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
40662 @@ -2757,15 +2760,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
40663 memcpy(dest, &space, sizeof(space));
40664 dest++;
40665 space_args.total_spaces++;
40666 - slot_count--;
40667 }
40668 - if (!slot_count)
40669 - break;
40670 }
40671 up_read(&info->groups_sem);
40672 }
40673
40674 - user_dest = (struct btrfs_ioctl_space_info *)
40675 + user_dest = (struct btrfs_ioctl_space_info __user *)
40676 (arg + sizeof(struct btrfs_ioctl_space_args));
40677
40678 if (copy_to_user(user_dest, dest_orig, alloc_size))
40679 diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
40680 index cfb5543..1ae7347 100644
40681 --- a/fs/btrfs/relocation.c
40682 +++ b/fs/btrfs/relocation.c
40683 @@ -1244,7 +1244,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
40684 }
40685 spin_unlock(&rc->reloc_root_tree.lock);
40686
40687 - BUG_ON((struct btrfs_root *)node->data != root);
40688 + BUG_ON(!node || (struct btrfs_root *)node->data != root);
40689
40690 if (!del) {
40691 spin_lock(&rc->reloc_root_tree.lock);
40692 diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
40693 index 622f469..e8d2d55 100644
40694 --- a/fs/cachefiles/bind.c
40695 +++ b/fs/cachefiles/bind.c
40696 @@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
40697 args);
40698
40699 /* start by checking things over */
40700 - ASSERT(cache->fstop_percent >= 0 &&
40701 - cache->fstop_percent < cache->fcull_percent &&
40702 + ASSERT(cache->fstop_percent < cache->fcull_percent &&
40703 cache->fcull_percent < cache->frun_percent &&
40704 cache->frun_percent < 100);
40705
40706 - ASSERT(cache->bstop_percent >= 0 &&
40707 - cache->bstop_percent < cache->bcull_percent &&
40708 + ASSERT(cache->bstop_percent < cache->bcull_percent &&
40709 cache->bcull_percent < cache->brun_percent &&
40710 cache->brun_percent < 100);
40711
40712 diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
40713 index 0a1467b..6a53245 100644
40714 --- a/fs/cachefiles/daemon.c
40715 +++ b/fs/cachefiles/daemon.c
40716 @@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
40717 if (n > buflen)
40718 return -EMSGSIZE;
40719
40720 - if (copy_to_user(_buffer, buffer, n) != 0)
40721 + if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
40722 return -EFAULT;
40723
40724 return n;
40725 @@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
40726 if (test_bit(CACHEFILES_DEAD, &cache->flags))
40727 return -EIO;
40728
40729 - if (datalen < 0 || datalen > PAGE_SIZE - 1)
40730 + if (datalen > PAGE_SIZE - 1)
40731 return -EOPNOTSUPP;
40732
40733 /* drag the command string into the kernel so we can parse it */
40734 @@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
40735 if (args[0] != '%' || args[1] != '\0')
40736 return -EINVAL;
40737
40738 - if (fstop < 0 || fstop >= cache->fcull_percent)
40739 + if (fstop >= cache->fcull_percent)
40740 return cachefiles_daemon_range_error(cache, args);
40741
40742 cache->fstop_percent = fstop;
40743 @@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
40744 if (args[0] != '%' || args[1] != '\0')
40745 return -EINVAL;
40746
40747 - if (bstop < 0 || bstop >= cache->bcull_percent)
40748 + if (bstop >= cache->bcull_percent)
40749 return cachefiles_daemon_range_error(cache, args);
40750
40751 cache->bstop_percent = bstop;
40752 diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
40753 index bd6bc1b..b627b53 100644
40754 --- a/fs/cachefiles/internal.h
40755 +++ b/fs/cachefiles/internal.h
40756 @@ -57,7 +57,7 @@ struct cachefiles_cache {
40757 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
40758 struct rb_root active_nodes; /* active nodes (can't be culled) */
40759 rwlock_t active_lock; /* lock for active_nodes */
40760 - atomic_t gravecounter; /* graveyard uniquifier */
40761 + atomic_unchecked_t gravecounter; /* graveyard uniquifier */
40762 unsigned frun_percent; /* when to stop culling (% files) */
40763 unsigned fcull_percent; /* when to start culling (% files) */
40764 unsigned fstop_percent; /* when to stop allocating (% files) */
40765 @@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
40766 * proc.c
40767 */
40768 #ifdef CONFIG_CACHEFILES_HISTOGRAM
40769 -extern atomic_t cachefiles_lookup_histogram[HZ];
40770 -extern atomic_t cachefiles_mkdir_histogram[HZ];
40771 -extern atomic_t cachefiles_create_histogram[HZ];
40772 +extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
40773 +extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
40774 +extern atomic_unchecked_t cachefiles_create_histogram[HZ];
40775
40776 extern int __init cachefiles_proc_init(void);
40777 extern void cachefiles_proc_cleanup(void);
40778 static inline
40779 -void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
40780 +void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
40781 {
40782 unsigned long jif = jiffies - start_jif;
40783 if (jif >= HZ)
40784 jif = HZ - 1;
40785 - atomic_inc(&histogram[jif]);
40786 + atomic_inc_unchecked(&histogram[jif]);
40787 }
40788
40789 #else
40790 diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
40791 index a0358c2..d6137f2 100644
40792 --- a/fs/cachefiles/namei.c
40793 +++ b/fs/cachefiles/namei.c
40794 @@ -318,7 +318,7 @@ try_again:
40795 /* first step is to make up a grave dentry in the graveyard */
40796 sprintf(nbuffer, "%08x%08x",
40797 (uint32_t) get_seconds(),
40798 - (uint32_t) atomic_inc_return(&cache->gravecounter));
40799 + (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
40800
40801 /* do the multiway lock magic */
40802 trap = lock_rename(cache->graveyard, dir);
40803 diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
40804 index eccd339..4c1d995 100644
40805 --- a/fs/cachefiles/proc.c
40806 +++ b/fs/cachefiles/proc.c
40807 @@ -14,9 +14,9 @@
40808 #include <linux/seq_file.h>
40809 #include "internal.h"
40810
40811 -atomic_t cachefiles_lookup_histogram[HZ];
40812 -atomic_t cachefiles_mkdir_histogram[HZ];
40813 -atomic_t cachefiles_create_histogram[HZ];
40814 +atomic_unchecked_t cachefiles_lookup_histogram[HZ];
40815 +atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
40816 +atomic_unchecked_t cachefiles_create_histogram[HZ];
40817
40818 /*
40819 * display the latency histogram
40820 @@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
40821 return 0;
40822 default:
40823 index = (unsigned long) v - 3;
40824 - x = atomic_read(&cachefiles_lookup_histogram[index]);
40825 - y = atomic_read(&cachefiles_mkdir_histogram[index]);
40826 - z = atomic_read(&cachefiles_create_histogram[index]);
40827 + x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
40828 + y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
40829 + z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
40830 if (x == 0 && y == 0 && z == 0)
40831 return 0;
40832
40833 diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
40834 index 0e3c092..818480e 100644
40835 --- a/fs/cachefiles/rdwr.c
40836 +++ b/fs/cachefiles/rdwr.c
40837 @@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
40838 old_fs = get_fs();
40839 set_fs(KERNEL_DS);
40840 ret = file->f_op->write(
40841 - file, (const void __user *) data, len, &pos);
40842 + file, (const void __force_user *) data, len, &pos);
40843 set_fs(old_fs);
40844 kunmap(page);
40845 if (ret != len)
40846 diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
40847 index 9895400..fa40a7d 100644
40848 --- a/fs/ceph/dir.c
40849 +++ b/fs/ceph/dir.c
40850 @@ -244,7 +244,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
40851 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
40852 struct ceph_mds_client *mdsc = fsc->mdsc;
40853 unsigned frag = fpos_frag(filp->f_pos);
40854 - int off = fpos_off(filp->f_pos);
40855 + unsigned int off = fpos_off(filp->f_pos);
40856 int err;
40857 u32 ftype;
40858 struct ceph_mds_reply_info_parsed *rinfo;
40859 diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
40860 index 84e8c07..6170d31 100644
40861 --- a/fs/cifs/cifs_debug.c
40862 +++ b/fs/cifs/cifs_debug.c
40863 @@ -265,8 +265,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
40864
40865 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
40866 #ifdef CONFIG_CIFS_STATS2
40867 - atomic_set(&totBufAllocCount, 0);
40868 - atomic_set(&totSmBufAllocCount, 0);
40869 + atomic_set_unchecked(&totBufAllocCount, 0);
40870 + atomic_set_unchecked(&totSmBufAllocCount, 0);
40871 #endif /* CONFIG_CIFS_STATS2 */
40872 spin_lock(&cifs_tcp_ses_lock);
40873 list_for_each(tmp1, &cifs_tcp_ses_list) {
40874 @@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(struct file *file,
40875 tcon = list_entry(tmp3,
40876 struct cifs_tcon,
40877 tcon_list);
40878 - atomic_set(&tcon->num_smbs_sent, 0);
40879 - atomic_set(&tcon->num_writes, 0);
40880 - atomic_set(&tcon->num_reads, 0);
40881 - atomic_set(&tcon->num_oplock_brks, 0);
40882 - atomic_set(&tcon->num_opens, 0);
40883 - atomic_set(&tcon->num_posixopens, 0);
40884 - atomic_set(&tcon->num_posixmkdirs, 0);
40885 - atomic_set(&tcon->num_closes, 0);
40886 - atomic_set(&tcon->num_deletes, 0);
40887 - atomic_set(&tcon->num_mkdirs, 0);
40888 - atomic_set(&tcon->num_rmdirs, 0);
40889 - atomic_set(&tcon->num_renames, 0);
40890 - atomic_set(&tcon->num_t2renames, 0);
40891 - atomic_set(&tcon->num_ffirst, 0);
40892 - atomic_set(&tcon->num_fnext, 0);
40893 - atomic_set(&tcon->num_fclose, 0);
40894 - atomic_set(&tcon->num_hardlinks, 0);
40895 - atomic_set(&tcon->num_symlinks, 0);
40896 - atomic_set(&tcon->num_locks, 0);
40897 + atomic_set_unchecked(&tcon->num_smbs_sent, 0);
40898 + atomic_set_unchecked(&tcon->num_writes, 0);
40899 + atomic_set_unchecked(&tcon->num_reads, 0);
40900 + atomic_set_unchecked(&tcon->num_oplock_brks, 0);
40901 + atomic_set_unchecked(&tcon->num_opens, 0);
40902 + atomic_set_unchecked(&tcon->num_posixopens, 0);
40903 + atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
40904 + atomic_set_unchecked(&tcon->num_closes, 0);
40905 + atomic_set_unchecked(&tcon->num_deletes, 0);
40906 + atomic_set_unchecked(&tcon->num_mkdirs, 0);
40907 + atomic_set_unchecked(&tcon->num_rmdirs, 0);
40908 + atomic_set_unchecked(&tcon->num_renames, 0);
40909 + atomic_set_unchecked(&tcon->num_t2renames, 0);
40910 + atomic_set_unchecked(&tcon->num_ffirst, 0);
40911 + atomic_set_unchecked(&tcon->num_fnext, 0);
40912 + atomic_set_unchecked(&tcon->num_fclose, 0);
40913 + atomic_set_unchecked(&tcon->num_hardlinks, 0);
40914 + atomic_set_unchecked(&tcon->num_symlinks, 0);
40915 + atomic_set_unchecked(&tcon->num_locks, 0);
40916 }
40917 }
40918 }
40919 @@ -327,8 +327,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
40920 smBufAllocCount.counter, cifs_min_small);
40921 #ifdef CONFIG_CIFS_STATS2
40922 seq_printf(m, "Total Large %d Small %d Allocations\n",
40923 - atomic_read(&totBufAllocCount),
40924 - atomic_read(&totSmBufAllocCount));
40925 + atomic_read_unchecked(&totBufAllocCount),
40926 + atomic_read_unchecked(&totSmBufAllocCount));
40927 #endif /* CONFIG_CIFS_STATS2 */
40928
40929 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
40930 @@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
40931 if (tcon->need_reconnect)
40932 seq_puts(m, "\tDISCONNECTED ");
40933 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
40934 - atomic_read(&tcon->num_smbs_sent),
40935 - atomic_read(&tcon->num_oplock_brks));
40936 + atomic_read_unchecked(&tcon->num_smbs_sent),
40937 + atomic_read_unchecked(&tcon->num_oplock_brks));
40938 seq_printf(m, "\nReads: %d Bytes: %lld",
40939 - atomic_read(&tcon->num_reads),
40940 + atomic_read_unchecked(&tcon->num_reads),
40941 (long long)(tcon->bytes_read));
40942 seq_printf(m, "\nWrites: %d Bytes: %lld",
40943 - atomic_read(&tcon->num_writes),
40944 + atomic_read_unchecked(&tcon->num_writes),
40945 (long long)(tcon->bytes_written));
40946 seq_printf(m, "\nFlushes: %d",
40947 - atomic_read(&tcon->num_flushes));
40948 + atomic_read_unchecked(&tcon->num_flushes));
40949 seq_printf(m, "\nLocks: %d HardLinks: %d "
40950 "Symlinks: %d",
40951 - atomic_read(&tcon->num_locks),
40952 - atomic_read(&tcon->num_hardlinks),
40953 - atomic_read(&tcon->num_symlinks));
40954 + atomic_read_unchecked(&tcon->num_locks),
40955 + atomic_read_unchecked(&tcon->num_hardlinks),
40956 + atomic_read_unchecked(&tcon->num_symlinks));
40957 seq_printf(m, "\nOpens: %d Closes: %d "
40958 "Deletes: %d",
40959 - atomic_read(&tcon->num_opens),
40960 - atomic_read(&tcon->num_closes),
40961 - atomic_read(&tcon->num_deletes));
40962 + atomic_read_unchecked(&tcon->num_opens),
40963 + atomic_read_unchecked(&tcon->num_closes),
40964 + atomic_read_unchecked(&tcon->num_deletes));
40965 seq_printf(m, "\nPosix Opens: %d "
40966 "Posix Mkdirs: %d",
40967 - atomic_read(&tcon->num_posixopens),
40968 - atomic_read(&tcon->num_posixmkdirs));
40969 + atomic_read_unchecked(&tcon->num_posixopens),
40970 + atomic_read_unchecked(&tcon->num_posixmkdirs));
40971 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
40972 - atomic_read(&tcon->num_mkdirs),
40973 - atomic_read(&tcon->num_rmdirs));
40974 + atomic_read_unchecked(&tcon->num_mkdirs),
40975 + atomic_read_unchecked(&tcon->num_rmdirs));
40976 seq_printf(m, "\nRenames: %d T2 Renames %d",
40977 - atomic_read(&tcon->num_renames),
40978 - atomic_read(&tcon->num_t2renames));
40979 + atomic_read_unchecked(&tcon->num_renames),
40980 + atomic_read_unchecked(&tcon->num_t2renames));
40981 seq_printf(m, "\nFindFirst: %d FNext %d "
40982 "FClose %d",
40983 - atomic_read(&tcon->num_ffirst),
40984 - atomic_read(&tcon->num_fnext),
40985 - atomic_read(&tcon->num_fclose));
40986 + atomic_read_unchecked(&tcon->num_ffirst),
40987 + atomic_read_unchecked(&tcon->num_fnext),
40988 + atomic_read_unchecked(&tcon->num_fclose));
40989 }
40990 }
40991 }
40992 diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
40993 index 8f1fe32..38f9e27 100644
40994 --- a/fs/cifs/cifsfs.c
40995 +++ b/fs/cifs/cifsfs.c
40996 @@ -989,7 +989,7 @@ cifs_init_request_bufs(void)
40997 cifs_req_cachep = kmem_cache_create("cifs_request",
40998 CIFSMaxBufSize +
40999 MAX_CIFS_HDR_SIZE, 0,
41000 - SLAB_HWCACHE_ALIGN, NULL);
41001 + SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
41002 if (cifs_req_cachep == NULL)
41003 return -ENOMEM;
41004
41005 @@ -1016,7 +1016,7 @@ cifs_init_request_bufs(void)
41006 efficient to alloc 1 per page off the slab compared to 17K (5page)
41007 alloc of large cifs buffers even when page debugging is on */
41008 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
41009 - MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
41010 + MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
41011 NULL);
41012 if (cifs_sm_req_cachep == NULL) {
41013 mempool_destroy(cifs_req_poolp);
41014 @@ -1101,8 +1101,8 @@ init_cifs(void)
41015 atomic_set(&bufAllocCount, 0);
41016 atomic_set(&smBufAllocCount, 0);
41017 #ifdef CONFIG_CIFS_STATS2
41018 - atomic_set(&totBufAllocCount, 0);
41019 - atomic_set(&totSmBufAllocCount, 0);
41020 + atomic_set_unchecked(&totBufAllocCount, 0);
41021 + atomic_set_unchecked(&totSmBufAllocCount, 0);
41022 #endif /* CONFIG_CIFS_STATS2 */
41023
41024 atomic_set(&midCount, 0);
41025 diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
41026 index 8238aa1..0347196 100644
41027 --- a/fs/cifs/cifsglob.h
41028 +++ b/fs/cifs/cifsglob.h
41029 @@ -392,28 +392,28 @@ struct cifs_tcon {
41030 __u16 Flags; /* optional support bits */
41031 enum statusEnum tidStatus;
41032 #ifdef CONFIG_CIFS_STATS
41033 - atomic_t num_smbs_sent;
41034 - atomic_t num_writes;
41035 - atomic_t num_reads;
41036 - atomic_t num_flushes;
41037 - atomic_t num_oplock_brks;
41038 - atomic_t num_opens;
41039 - atomic_t num_closes;
41040 - atomic_t num_deletes;
41041 - atomic_t num_mkdirs;
41042 - atomic_t num_posixopens;
41043 - atomic_t num_posixmkdirs;
41044 - atomic_t num_rmdirs;
41045 - atomic_t num_renames;
41046 - atomic_t num_t2renames;
41047 - atomic_t num_ffirst;
41048 - atomic_t num_fnext;
41049 - atomic_t num_fclose;
41050 - atomic_t num_hardlinks;
41051 - atomic_t num_symlinks;
41052 - atomic_t num_locks;
41053 - atomic_t num_acl_get;
41054 - atomic_t num_acl_set;
41055 + atomic_unchecked_t num_smbs_sent;
41056 + atomic_unchecked_t num_writes;
41057 + atomic_unchecked_t num_reads;
41058 + atomic_unchecked_t num_flushes;
41059 + atomic_unchecked_t num_oplock_brks;
41060 + atomic_unchecked_t num_opens;
41061 + atomic_unchecked_t num_closes;
41062 + atomic_unchecked_t num_deletes;
41063 + atomic_unchecked_t num_mkdirs;
41064 + atomic_unchecked_t num_posixopens;
41065 + atomic_unchecked_t num_posixmkdirs;
41066 + atomic_unchecked_t num_rmdirs;
41067 + atomic_unchecked_t num_renames;
41068 + atomic_unchecked_t num_t2renames;
41069 + atomic_unchecked_t num_ffirst;
41070 + atomic_unchecked_t num_fnext;
41071 + atomic_unchecked_t num_fclose;
41072 + atomic_unchecked_t num_hardlinks;
41073 + atomic_unchecked_t num_symlinks;
41074 + atomic_unchecked_t num_locks;
41075 + atomic_unchecked_t num_acl_get;
41076 + atomic_unchecked_t num_acl_set;
41077 #ifdef CONFIG_CIFS_STATS2
41078 unsigned long long time_writes;
41079 unsigned long long time_reads;
41080 @@ -628,7 +628,7 @@ convert_delimiter(char *path, char delim)
41081 }
41082
41083 #ifdef CONFIG_CIFS_STATS
41084 -#define cifs_stats_inc atomic_inc
41085 +#define cifs_stats_inc atomic_inc_unchecked
41086
41087 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
41088 unsigned int bytes)
41089 @@ -985,8 +985,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
41090 /* Various Debug counters */
41091 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
41092 #ifdef CONFIG_CIFS_STATS2
41093 -GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
41094 -GLOBAL_EXTERN atomic_t totSmBufAllocCount;
41095 +GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
41096 +GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
41097 #endif
41098 GLOBAL_EXTERN atomic_t smBufAllocCount;
41099 GLOBAL_EXTERN atomic_t midCount;
41100 diff --git a/fs/cifs/link.c b/fs/cifs/link.c
41101 index 6b0e064..94e6c3c 100644
41102 --- a/fs/cifs/link.c
41103 +++ b/fs/cifs/link.c
41104 @@ -600,7 +600,7 @@ symlink_exit:
41105
41106 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
41107 {
41108 - char *p = nd_get_link(nd);
41109 + const char *p = nd_get_link(nd);
41110 if (!IS_ERR(p))
41111 kfree(p);
41112 }
41113 diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
41114 index 703ef5c..2a44ed5 100644
41115 --- a/fs/cifs/misc.c
41116 +++ b/fs/cifs/misc.c
41117 @@ -156,7 +156,7 @@ cifs_buf_get(void)
41118 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
41119 atomic_inc(&bufAllocCount);
41120 #ifdef CONFIG_CIFS_STATS2
41121 - atomic_inc(&totBufAllocCount);
41122 + atomic_inc_unchecked(&totBufAllocCount);
41123 #endif /* CONFIG_CIFS_STATS2 */
41124 }
41125
41126 @@ -191,7 +191,7 @@ cifs_small_buf_get(void)
41127 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
41128 atomic_inc(&smBufAllocCount);
41129 #ifdef CONFIG_CIFS_STATS2
41130 - atomic_inc(&totSmBufAllocCount);
41131 + atomic_inc_unchecked(&totSmBufAllocCount);
41132 #endif /* CONFIG_CIFS_STATS2 */
41133
41134 }
41135 diff --git a/fs/coda/cache.c b/fs/coda/cache.c
41136 index 6901578..d402eb5 100644
41137 --- a/fs/coda/cache.c
41138 +++ b/fs/coda/cache.c
41139 @@ -24,7 +24,7 @@
41140 #include "coda_linux.h"
41141 #include "coda_cache.h"
41142
41143 -static atomic_t permission_epoch = ATOMIC_INIT(0);
41144 +static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
41145
41146 /* replace or extend an acl cache hit */
41147 void coda_cache_enter(struct inode *inode, int mask)
41148 @@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
41149 struct coda_inode_info *cii = ITOC(inode);
41150
41151 spin_lock(&cii->c_lock);
41152 - cii->c_cached_epoch = atomic_read(&permission_epoch);
41153 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
41154 if (cii->c_uid != current_fsuid()) {
41155 cii->c_uid = current_fsuid();
41156 cii->c_cached_perm = mask;
41157 @@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
41158 {
41159 struct coda_inode_info *cii = ITOC(inode);
41160 spin_lock(&cii->c_lock);
41161 - cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
41162 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
41163 spin_unlock(&cii->c_lock);
41164 }
41165
41166 /* remove all acl caches */
41167 void coda_cache_clear_all(struct super_block *sb)
41168 {
41169 - atomic_inc(&permission_epoch);
41170 + atomic_inc_unchecked(&permission_epoch);
41171 }
41172
41173
41174 @@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
41175 spin_lock(&cii->c_lock);
41176 hit = (mask & cii->c_cached_perm) == mask &&
41177 cii->c_uid == current_fsuid() &&
41178 - cii->c_cached_epoch == atomic_read(&permission_epoch);
41179 + cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
41180 spin_unlock(&cii->c_lock);
41181
41182 return hit;
41183 diff --git a/fs/compat.c b/fs/compat.c
41184 index c987875..08771ca 100644
41185 --- a/fs/compat.c
41186 +++ b/fs/compat.c
41187 @@ -132,8 +132,8 @@ asmlinkage long compat_sys_utimes(const char __user *filename, struct compat_tim
41188 static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
41189 {
41190 compat_ino_t ino = stat->ino;
41191 - typeof(ubuf->st_uid) uid = 0;
41192 - typeof(ubuf->st_gid) gid = 0;
41193 + typeof(((struct compat_stat *)0)->st_uid) uid = 0;
41194 + typeof(((struct compat_stat *)0)->st_gid) gid = 0;
41195 int err;
41196
41197 SET_UID(uid, stat->uid);
41198 @@ -504,7 +504,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
41199
41200 set_fs(KERNEL_DS);
41201 /* The __user pointer cast is valid because of the set_fs() */
41202 - ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
41203 + ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
41204 set_fs(oldfs);
41205 /* truncating is ok because it's a user address */
41206 if (!ret)
41207 @@ -562,7 +562,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
41208 goto out;
41209
41210 ret = -EINVAL;
41211 - if (nr_segs > UIO_MAXIOV || nr_segs < 0)
41212 + if (nr_segs > UIO_MAXIOV)
41213 goto out;
41214 if (nr_segs > fast_segs) {
41215 ret = -ENOMEM;
41216 @@ -845,6 +845,7 @@ struct compat_old_linux_dirent {
41217
41218 struct compat_readdir_callback {
41219 struct compat_old_linux_dirent __user *dirent;
41220 + struct file * file;
41221 int result;
41222 };
41223
41224 @@ -862,6 +863,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
41225 buf->result = -EOVERFLOW;
41226 return -EOVERFLOW;
41227 }
41228 +
41229 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
41230 + return 0;
41231 +
41232 buf->result++;
41233 dirent = buf->dirent;
41234 if (!access_ok(VERIFY_WRITE, dirent,
41235 @@ -894,6 +899,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
41236
41237 buf.result = 0;
41238 buf.dirent = dirent;
41239 + buf.file = file;
41240
41241 error = vfs_readdir(file, compat_fillonedir, &buf);
41242 if (buf.result)
41243 @@ -914,6 +920,7 @@ struct compat_linux_dirent {
41244 struct compat_getdents_callback {
41245 struct compat_linux_dirent __user *current_dir;
41246 struct compat_linux_dirent __user *previous;
41247 + struct file * file;
41248 int count;
41249 int error;
41250 };
41251 @@ -935,6 +942,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
41252 buf->error = -EOVERFLOW;
41253 return -EOVERFLOW;
41254 }
41255 +
41256 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
41257 + return 0;
41258 +
41259 dirent = buf->previous;
41260 if (dirent) {
41261 if (__put_user(offset, &dirent->d_off))
41262 @@ -982,6 +993,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
41263 buf.previous = NULL;
41264 buf.count = count;
41265 buf.error = 0;
41266 + buf.file = file;
41267
41268 error = vfs_readdir(file, compat_filldir, &buf);
41269 if (error >= 0)
41270 @@ -1003,6 +1015,7 @@ out:
41271 struct compat_getdents_callback64 {
41272 struct linux_dirent64 __user *current_dir;
41273 struct linux_dirent64 __user *previous;
41274 + struct file * file;
41275 int count;
41276 int error;
41277 };
41278 @@ -1019,6 +1032,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
41279 buf->error = -EINVAL; /* only used if we fail.. */
41280 if (reclen > buf->count)
41281 return -EINVAL;
41282 +
41283 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
41284 + return 0;
41285 +
41286 dirent = buf->previous;
41287
41288 if (dirent) {
41289 @@ -1070,13 +1087,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
41290 buf.previous = NULL;
41291 buf.count = count;
41292 buf.error = 0;
41293 + buf.file = file;
41294
41295 error = vfs_readdir(file, compat_filldir64, &buf);
41296 if (error >= 0)
41297 error = buf.error;
41298 lastdirent = buf.previous;
41299 if (lastdirent) {
41300 - typeof(lastdirent->d_off) d_off = file->f_pos;
41301 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
41302 if (__put_user_unaligned(d_off, &lastdirent->d_off))
41303 error = -EFAULT;
41304 else
41305 diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
41306 index 112e45a..b59845b 100644
41307 --- a/fs/compat_binfmt_elf.c
41308 +++ b/fs/compat_binfmt_elf.c
41309 @@ -30,11 +30,13 @@
41310 #undef elf_phdr
41311 #undef elf_shdr
41312 #undef elf_note
41313 +#undef elf_dyn
41314 #undef elf_addr_t
41315 #define elfhdr elf32_hdr
41316 #define elf_phdr elf32_phdr
41317 #define elf_shdr elf32_shdr
41318 #define elf_note elf32_note
41319 +#define elf_dyn Elf32_Dyn
41320 #define elf_addr_t Elf32_Addr
41321
41322 /*
41323 diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
41324 index 51352de..93292ff 100644
41325 --- a/fs/compat_ioctl.c
41326 +++ b/fs/compat_ioctl.c
41327 @@ -210,6 +210,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd,
41328
41329 err = get_user(palp, &up->palette);
41330 err |= get_user(length, &up->length);
41331 + if (err)
41332 + return -EFAULT;
41333
41334 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
41335 err = put_user(compat_ptr(palp), &up_native->palette);
41336 @@ -621,7 +623,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
41337 return -EFAULT;
41338 if (__get_user(udata, &ss32->iomem_base))
41339 return -EFAULT;
41340 - ss.iomem_base = compat_ptr(udata);
41341 + ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
41342 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
41343 __get_user(ss.port_high, &ss32->port_high))
41344 return -EFAULT;
41345 @@ -796,7 +798,7 @@ static int compat_ioctl_preallocate(struct file *file,
41346 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
41347 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
41348 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
41349 - copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
41350 + copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
41351 return -EFAULT;
41352
41353 return ioctl_preallocate(file, p);
41354 @@ -1644,8 +1646,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
41355 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
41356 {
41357 unsigned int a, b;
41358 - a = *(unsigned int *)p;
41359 - b = *(unsigned int *)q;
41360 + a = *(const unsigned int *)p;
41361 + b = *(const unsigned int *)q;
41362 if (a > b)
41363 return 1;
41364 if (a < b)
41365 diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
41366 index 9a37a9b..35792b6 100644
41367 --- a/fs/configfs/dir.c
41368 +++ b/fs/configfs/dir.c
41369 @@ -1575,7 +1575,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
41370 }
41371 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
41372 struct configfs_dirent *next;
41373 - const char * name;
41374 + const unsigned char * name;
41375 + char d_name[sizeof(next->s_dentry->d_iname)];
41376 int len;
41377 struct inode *inode = NULL;
41378
41379 @@ -1585,7 +1586,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
41380 continue;
41381
41382 name = configfs_get_name(next);
41383 - len = strlen(name);
41384 + if (next->s_dentry && name == next->s_dentry->d_iname) {
41385 + len = next->s_dentry->d_name.len;
41386 + memcpy(d_name, name, len);
41387 + name = d_name;
41388 + } else
41389 + len = strlen(name);
41390
41391 /*
41392 * We'll have a dentry and an inode for
41393 diff --git a/fs/dcache.c b/fs/dcache.c
41394 index f7908ae..920a680 100644
41395 --- a/fs/dcache.c
41396 +++ b/fs/dcache.c
41397 @@ -3042,7 +3042,7 @@ void __init vfs_caches_init(unsigned long mempages)
41398 mempages -= reserve;
41399
41400 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
41401 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
41402 + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
41403
41404 dcache_init();
41405 inode_init();
41406 diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
41407 index 2a83425..b082cec 100644
41408 --- a/fs/ecryptfs/crypto.c
41409 +++ b/fs/ecryptfs/crypto.c
41410 @@ -417,17 +417,6 @@ static int ecryptfs_encrypt_extent(struct page *enc_extent_page,
41411 (unsigned long long)(extent_base + extent_offset), rc);
41412 goto out;
41413 }
41414 - if (unlikely(ecryptfs_verbosity > 0)) {
41415 - ecryptfs_printk(KERN_DEBUG, "Encrypting extent "
41416 - "with iv:\n");
41417 - ecryptfs_dump_hex(extent_iv, crypt_stat->iv_bytes);
41418 - ecryptfs_printk(KERN_DEBUG, "First 8 bytes before "
41419 - "encryption:\n");
41420 - ecryptfs_dump_hex((char *)
41421 - (page_address(page)
41422 - + (extent_offset * crypt_stat->extent_size)),
41423 - 8);
41424 - }
41425 rc = ecryptfs_encrypt_page_offset(crypt_stat, enc_extent_page, 0,
41426 page, (extent_offset
41427 * crypt_stat->extent_size),
41428 @@ -440,14 +429,6 @@ static int ecryptfs_encrypt_extent(struct page *enc_extent_page,
41429 goto out;
41430 }
41431 rc = 0;
41432 - if (unlikely(ecryptfs_verbosity > 0)) {
41433 - ecryptfs_printk(KERN_DEBUG, "Encrypt extent [0x%.16llx]; "
41434 - "rc = [%d]\n",
41435 - (unsigned long long)(extent_base + extent_offset), rc);
41436 - ecryptfs_printk(KERN_DEBUG, "First 8 bytes after "
41437 - "encryption:\n");
41438 - ecryptfs_dump_hex((char *)(page_address(enc_extent_page)), 8);
41439 - }
41440 out:
41441 return rc;
41442 }
41443 @@ -543,17 +524,6 @@ static int ecryptfs_decrypt_extent(struct page *page,
41444 (unsigned long long)(extent_base + extent_offset), rc);
41445 goto out;
41446 }
41447 - if (unlikely(ecryptfs_verbosity > 0)) {
41448 - ecryptfs_printk(KERN_DEBUG, "Decrypting extent "
41449 - "with iv:\n");
41450 - ecryptfs_dump_hex(extent_iv, crypt_stat->iv_bytes);
41451 - ecryptfs_printk(KERN_DEBUG, "First 8 bytes before "
41452 - "decryption:\n");
41453 - ecryptfs_dump_hex((char *)
41454 - (page_address(enc_extent_page)
41455 - + (extent_offset * crypt_stat->extent_size)),
41456 - 8);
41457 - }
41458 rc = ecryptfs_decrypt_page_offset(crypt_stat, page,
41459 (extent_offset
41460 * crypt_stat->extent_size),
41461 @@ -567,16 +537,6 @@ static int ecryptfs_decrypt_extent(struct page *page,
41462 goto out;
41463 }
41464 rc = 0;
41465 - if (unlikely(ecryptfs_verbosity > 0)) {
41466 - ecryptfs_printk(KERN_DEBUG, "Decrypt extent [0x%.16llx]; "
41467 - "rc = [%d]\n",
41468 - (unsigned long long)(extent_base + extent_offset), rc);
41469 - ecryptfs_printk(KERN_DEBUG, "First 8 bytes after "
41470 - "decryption:\n");
41471 - ecryptfs_dump_hex((char *)(page_address(page)
41472 - + (extent_offset
41473 - * crypt_stat->extent_size)), 8);
41474 - }
41475 out:
41476 return rc;
41477 }
41478 diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
41479 index 32f90a3..a766407 100644
41480 --- a/fs/ecryptfs/inode.c
41481 +++ b/fs/ecryptfs/inode.c
41482 @@ -691,7 +691,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
41483 old_fs = get_fs();
41484 set_fs(get_ds());
41485 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
41486 - (char __user *)lower_buf,
41487 + (char __force_user *)lower_buf,
41488 lower_bufsiz);
41489 set_fs(old_fs);
41490 if (rc < 0)
41491 @@ -737,7 +737,7 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
41492 }
41493 old_fs = get_fs();
41494 set_fs(get_ds());
41495 - rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
41496 + rc = dentry->d_inode->i_op->readlink(dentry, (char __force_user *)buf, len);
41497 set_fs(old_fs);
41498 if (rc < 0) {
41499 kfree(buf);
41500 @@ -752,7 +752,7 @@ out:
41501 static void
41502 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
41503 {
41504 - char *buf = nd_get_link(nd);
41505 + const char *buf = nd_get_link(nd);
41506 if (!IS_ERR(buf)) {
41507 /* Free the char* */
41508 kfree(buf);
41509 @@ -841,18 +841,6 @@ static int truncate_upper(struct dentry *dentry, struct iattr *ia,
41510 size_t num_zeros = (PAGE_CACHE_SIZE
41511 - (ia->ia_size & ~PAGE_CACHE_MASK));
41512
41513 -
41514 - /*
41515 - * XXX(truncate) this should really happen at the begginning
41516 - * of ->setattr. But the code is too messy to that as part
41517 - * of a larger patch. ecryptfs is also totally missing out
41518 - * on the inode_change_ok check at the beginning of
41519 - * ->setattr while would include this.
41520 - */
41521 - rc = inode_newsize_ok(inode, ia->ia_size);
41522 - if (rc)
41523 - goto out;
41524 -
41525 if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
41526 truncate_setsize(inode, ia->ia_size);
41527 lower_ia->ia_size = ia->ia_size;
41528 @@ -902,6 +890,28 @@ out:
41529 return rc;
41530 }
41531
41532 +static int ecryptfs_inode_newsize_ok(struct inode *inode, loff_t offset)
41533 +{
41534 + struct ecryptfs_crypt_stat *crypt_stat;
41535 + loff_t lower_oldsize, lower_newsize;
41536 +
41537 + crypt_stat = &ecryptfs_inode_to_private(inode)->crypt_stat;
41538 + lower_oldsize = upper_size_to_lower_size(crypt_stat,
41539 + i_size_read(inode));
41540 + lower_newsize = upper_size_to_lower_size(crypt_stat, offset);
41541 + if (lower_newsize > lower_oldsize) {
41542 + /*
41543 + * The eCryptfs inode and the new *lower* size are mixed here
41544 + * because we may not have the lower i_mutex held and/or it may
41545 + * not be appropriate to call inode_newsize_ok() with inodes
41546 + * from other filesystems.
41547 + */
41548 + return inode_newsize_ok(inode, lower_newsize);
41549 + }
41550 +
41551 + return 0;
41552 +}
41553 +
41554 /**
41555 * ecryptfs_truncate
41556 * @dentry: The ecryptfs layer dentry
41557 @@ -918,6 +928,10 @@ int ecryptfs_truncate(struct dentry *dentry, loff_t new_length)
41558 struct iattr lower_ia = { .ia_valid = 0 };
41559 int rc;
41560
41561 + rc = ecryptfs_inode_newsize_ok(dentry->d_inode, new_length);
41562 + if (rc)
41563 + return rc;
41564 +
41565 rc = truncate_upper(dentry, &ia, &lower_ia);
41566 if (!rc && lower_ia.ia_valid & ATTR_SIZE) {
41567 struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
41568 @@ -997,6 +1011,16 @@ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia)
41569 }
41570 }
41571 mutex_unlock(&crypt_stat->cs_mutex);
41572 +
41573 + rc = inode_change_ok(inode, ia);
41574 + if (rc)
41575 + goto out;
41576 + if (ia->ia_valid & ATTR_SIZE) {
41577 + rc = ecryptfs_inode_newsize_ok(inode, ia->ia_size);
41578 + if (rc)
41579 + goto out;
41580 + }
41581 +
41582 if (S_ISREG(inode->i_mode)) {
41583 rc = filemap_write_and_wait(inode->i_mapping);
41584 if (rc)
41585 diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
41586 index 940a82e..d3cdeea 100644
41587 --- a/fs/ecryptfs/miscdev.c
41588 +++ b/fs/ecryptfs/miscdev.c
41589 @@ -328,7 +328,7 @@ check_list:
41590 goto out_unlock_msg_ctx;
41591 i = 5;
41592 if (msg_ctx->msg) {
41593 - if (copy_to_user(&buf[i], packet_length, packet_length_size))
41594 + if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
41595 goto out_unlock_msg_ctx;
41596 i += packet_length_size;
41597 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
41598 @@ -409,11 +409,47 @@ ecryptfs_miscdev_write(struct file *file, const char __user *buf,
41599 ssize_t sz = 0;
41600 char *data;
41601 uid_t euid = current_euid();
41602 + unsigned char packet_size_peek[3];
41603 int rc;
41604
41605 - if (count == 0)
41606 + if (count == 0) {
41607 goto out;
41608 + } else if (count == (1 + 4)) {
41609 + /* Likely a harmless MSG_HELO or MSG_QUIT - no packet length */
41610 + goto memdup;
41611 + } else if (count < (1 + 4 + 1)
41612 + || count > (1 + 4 + 2 + sizeof(struct ecryptfs_message) + 4
41613 + + ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES)) {
41614 + printk(KERN_WARNING "%s: Acceptable packet size range is "
41615 + "[%d-%lu], but amount of data written is [%zu].",
41616 + __func__, (1 + 4 + 1),
41617 + (1 + 4 + 2 + sizeof(struct ecryptfs_message) + 4
41618 + + ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES), count);
41619 + return -EINVAL;
41620 + }
41621
41622 + if (copy_from_user(packet_size_peek, (buf + 1 + 4),
41623 + sizeof(packet_size_peek))) {
41624 + printk(KERN_WARNING "%s: Error while inspecting packet size\n",
41625 + __func__);
41626 + return -EFAULT;
41627 + }
41628 +
41629 + rc = ecryptfs_parse_packet_length(packet_size_peek, &packet_size,
41630 + &packet_size_length);
41631 + if (rc) {
41632 + printk(KERN_WARNING "%s: Error parsing packet length; "
41633 + "rc = [%d]\n", __func__, rc);
41634 + return rc;
41635 + }
41636 +
41637 + if ((1 + 4 + packet_size_length + packet_size) != count) {
41638 + printk(KERN_WARNING "%s: Invalid packet size [%zu]\n", __func__,
41639 + packet_size);
41640 + return -EINVAL;
41641 + }
41642 +
41643 +memdup:
41644 data = memdup_user(buf, count);
41645 if (IS_ERR(data)) {
41646 printk(KERN_ERR "%s: memdup_user returned error [%ld]\n",
41647 @@ -435,23 +471,7 @@ ecryptfs_miscdev_write(struct file *file, const char __user *buf,
41648 }
41649 memcpy(&counter_nbo, &data[i], 4);
41650 seq = be32_to_cpu(counter_nbo);
41651 - i += 4;
41652 - rc = ecryptfs_parse_packet_length(&data[i], &packet_size,
41653 - &packet_size_length);
41654 - if (rc) {
41655 - printk(KERN_WARNING "%s: Error parsing packet length; "
41656 - "rc = [%d]\n", __func__, rc);
41657 - goto out_free;
41658 - }
41659 - i += packet_size_length;
41660 - if ((1 + 4 + packet_size_length + packet_size) != count) {
41661 - printk(KERN_WARNING "%s: (1 + packet_size_length([%zd])"
41662 - " + packet_size([%zd]))([%zd]) != "
41663 - "count([%zd]). Invalid packet format.\n",
41664 - __func__, packet_size_length, packet_size,
41665 - (1 + packet_size_length + packet_size), count);
41666 - goto out_free;
41667 - }
41668 + i += 4 + packet_size_length;
41669 rc = ecryptfs_miscdev_response(&data[i], packet_size,
41670 euid, current_user_ns(),
41671 task_pid(current), seq);
41672 diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
41673 index 3745f7c..7d040a8 100644
41674 --- a/fs/ecryptfs/read_write.c
41675 +++ b/fs/ecryptfs/read_write.c
41676 @@ -48,7 +48,7 @@ int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
41677 return -EIO;
41678 fs_save = get_fs();
41679 set_fs(get_ds());
41680 - rc = vfs_write(lower_file, data, size, &offset);
41681 + rc = vfs_write(lower_file, (const char __force_user *)data, size, &offset);
41682 set_fs(fs_save);
41683 mark_inode_dirty_sync(ecryptfs_inode);
41684 return rc;
41685 @@ -130,13 +130,18 @@ int ecryptfs_write(struct inode *ecryptfs_inode, char *data, loff_t offset,
41686 pgoff_t ecryptfs_page_idx = (pos >> PAGE_CACHE_SHIFT);
41687 size_t start_offset_in_page = (pos & ~PAGE_CACHE_MASK);
41688 size_t num_bytes = (PAGE_CACHE_SIZE - start_offset_in_page);
41689 - size_t total_remaining_bytes = ((offset + size) - pos);
41690 + loff_t total_remaining_bytes = ((offset + size) - pos);
41691 +
41692 + if (fatal_signal_pending(current)) {
41693 + rc = -EINTR;
41694 + break;
41695 + }
41696
41697 if (num_bytes > total_remaining_bytes)
41698 num_bytes = total_remaining_bytes;
41699 if (pos < offset) {
41700 /* remaining zeros to write, up to destination offset */
41701 - size_t total_remaining_zeros = (offset - pos);
41702 + loff_t total_remaining_zeros = (offset - pos);
41703
41704 if (num_bytes > total_remaining_zeros)
41705 num_bytes = total_remaining_zeros;
41706 @@ -193,15 +198,19 @@ int ecryptfs_write(struct inode *ecryptfs_inode, char *data, loff_t offset,
41707 }
41708 pos += num_bytes;
41709 }
41710 - if ((offset + size) > ecryptfs_file_size) {
41711 - i_size_write(ecryptfs_inode, (offset + size));
41712 + if (pos > ecryptfs_file_size) {
41713 + i_size_write(ecryptfs_inode, pos);
41714 if (crypt_stat->flags & ECRYPTFS_ENCRYPTED) {
41715 - rc = ecryptfs_write_inode_size_to_metadata(
41716 + int rc2;
41717 +
41718 + rc2 = ecryptfs_write_inode_size_to_metadata(
41719 ecryptfs_inode);
41720 - if (rc) {
41721 + if (rc2) {
41722 printk(KERN_ERR "Problem with "
41723 "ecryptfs_write_inode_size_to_metadata; "
41724 - "rc = [%d]\n", rc);
41725 + "rc = [%d]\n", rc2);
41726 + if (!rc)
41727 + rc = rc2;
41728 goto out;
41729 }
41730 }
41731 @@ -235,7 +244,7 @@ int ecryptfs_read_lower(char *data, loff_t offset, size_t size,
41732 return -EIO;
41733 fs_save = get_fs();
41734 set_fs(get_ds());
41735 - rc = vfs_read(lower_file, data, size, &offset);
41736 + rc = vfs_read(lower_file, (char __force_user *)data, size, &offset);
41737 set_fs(fs_save);
41738 return rc;
41739 }
41740 diff --git a/fs/exec.c b/fs/exec.c
41741 index 3625464..fac01f4 100644
41742 --- a/fs/exec.c
41743 +++ b/fs/exec.c
41744 @@ -55,12 +55,28 @@
41745 #include <linux/pipe_fs_i.h>
41746 #include <linux/oom.h>
41747 #include <linux/compat.h>
41748 +#include <linux/random.h>
41749 +#include <linux/seq_file.h>
41750 +
41751 +#ifdef CONFIG_PAX_REFCOUNT
41752 +#include <linux/kallsyms.h>
41753 +#include <linux/kdebug.h>
41754 +#endif
41755
41756 #include <asm/uaccess.h>
41757 #include <asm/mmu_context.h>
41758 #include <asm/tlb.h>
41759 #include "internal.h"
41760
41761 +#ifndef CONFIG_PAX_HAVE_ACL_FLAGS
41762 +void __weak pax_set_initial_flags(struct linux_binprm *bprm) {}
41763 +#endif
41764 +
41765 +#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
41766 +void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
41767 +EXPORT_SYMBOL(pax_set_initial_flags_func);
41768 +#endif
41769 +
41770 int core_uses_pid;
41771 char core_pattern[CORENAME_MAX_SIZE] = "core";
41772 unsigned int core_pipe_limit;
41773 @@ -70,7 +86,7 @@ struct core_name {
41774 char *corename;
41775 int used, size;
41776 };
41777 -static atomic_t call_count = ATOMIC_INIT(1);
41778 +static atomic_unchecked_t call_count = ATOMIC_INIT(1);
41779
41780 /* The maximal length of core_pattern is also specified in sysctl.c */
41781
41782 @@ -188,18 +204,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
41783 int write)
41784 {
41785 struct page *page;
41786 - int ret;
41787
41788 -#ifdef CONFIG_STACK_GROWSUP
41789 - if (write) {
41790 - ret = expand_downwards(bprm->vma, pos);
41791 - if (ret < 0)
41792 - return NULL;
41793 - }
41794 -#endif
41795 - ret = get_user_pages(current, bprm->mm, pos,
41796 - 1, write, 1, &page, NULL);
41797 - if (ret <= 0)
41798 + if (0 > expand_downwards(bprm->vma, pos))
41799 + return NULL;
41800 + if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
41801 return NULL;
41802
41803 if (write) {
41804 @@ -274,6 +282,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
41805 vma->vm_end = STACK_TOP_MAX;
41806 vma->vm_start = vma->vm_end - PAGE_SIZE;
41807 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
41808 +
41809 +#ifdef CONFIG_PAX_SEGMEXEC
41810 + vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
41811 +#endif
41812 +
41813 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
41814 INIT_LIST_HEAD(&vma->anon_vma_chain);
41815
41816 @@ -288,6 +301,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
41817 mm->stack_vm = mm->total_vm = 1;
41818 up_write(&mm->mmap_sem);
41819 bprm->p = vma->vm_end - sizeof(void *);
41820 +
41821 +#ifdef CONFIG_PAX_RANDUSTACK
41822 + if (randomize_va_space)
41823 + bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
41824 +#endif
41825 +
41826 return 0;
41827 err:
41828 up_write(&mm->mmap_sem);
41829 @@ -396,19 +415,7 @@ err:
41830 return err;
41831 }
41832
41833 -struct user_arg_ptr {
41834 -#ifdef CONFIG_COMPAT
41835 - bool is_compat;
41836 -#endif
41837 - union {
41838 - const char __user *const __user *native;
41839 -#ifdef CONFIG_COMPAT
41840 - compat_uptr_t __user *compat;
41841 -#endif
41842 - } ptr;
41843 -};
41844 -
41845 -static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
41846 +const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
41847 {
41848 const char __user *native;
41849
41850 @@ -417,14 +424,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
41851 compat_uptr_t compat;
41852
41853 if (get_user(compat, argv.ptr.compat + nr))
41854 - return ERR_PTR(-EFAULT);
41855 + return (const char __force_user *)ERR_PTR(-EFAULT);
41856
41857 return compat_ptr(compat);
41858 }
41859 #endif
41860
41861 if (get_user(native, argv.ptr.native + nr))
41862 - return ERR_PTR(-EFAULT);
41863 + return (const char __force_user *)ERR_PTR(-EFAULT);
41864
41865 return native;
41866 }
41867 @@ -443,7 +450,7 @@ static int count(struct user_arg_ptr argv, int max)
41868 if (!p)
41869 break;
41870
41871 - if (IS_ERR(p))
41872 + if (IS_ERR((const char __force_kernel *)p))
41873 return -EFAULT;
41874
41875 if (i++ >= max)
41876 @@ -477,7 +484,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
41877
41878 ret = -EFAULT;
41879 str = get_user_arg_ptr(argv, argc);
41880 - if (IS_ERR(str))
41881 + if (IS_ERR((const char __force_kernel *)str))
41882 goto out;
41883
41884 len = strnlen_user(str, MAX_ARG_STRLEN);
41885 @@ -559,7 +566,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
41886 int r;
41887 mm_segment_t oldfs = get_fs();
41888 struct user_arg_ptr argv = {
41889 - .ptr.native = (const char __user *const __user *)__argv,
41890 + .ptr.native = (const char __force_user *const __force_user *)__argv,
41891 };
41892
41893 set_fs(KERNEL_DS);
41894 @@ -594,7 +601,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
41895 unsigned long new_end = old_end - shift;
41896 struct mmu_gather tlb;
41897
41898 - BUG_ON(new_start > new_end);
41899 + if (new_start >= new_end || new_start < mmap_min_addr)
41900 + return -ENOMEM;
41901
41902 /*
41903 * ensure there are no vmas between where we want to go
41904 @@ -603,6 +611,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
41905 if (vma != find_vma(mm, new_start))
41906 return -EFAULT;
41907
41908 +#ifdef CONFIG_PAX_SEGMEXEC
41909 + BUG_ON(pax_find_mirror_vma(vma));
41910 +#endif
41911 +
41912 /*
41913 * cover the whole range: [new_start, old_end)
41914 */
41915 @@ -683,10 +695,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
41916 stack_top = arch_align_stack(stack_top);
41917 stack_top = PAGE_ALIGN(stack_top);
41918
41919 - if (unlikely(stack_top < mmap_min_addr) ||
41920 - unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
41921 - return -ENOMEM;
41922 -
41923 stack_shift = vma->vm_end - stack_top;
41924
41925 bprm->p -= stack_shift;
41926 @@ -698,8 +706,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
41927 bprm->exec -= stack_shift;
41928
41929 down_write(&mm->mmap_sem);
41930 +
41931 + /* Move stack pages down in memory. */
41932 + if (stack_shift) {
41933 + ret = shift_arg_pages(vma, stack_shift);
41934 + if (ret)
41935 + goto out_unlock;
41936 + }
41937 +
41938 vm_flags = VM_STACK_FLAGS;
41939
41940 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
41941 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41942 + vm_flags &= ~VM_EXEC;
41943 +
41944 +#ifdef CONFIG_PAX_MPROTECT
41945 + if (mm->pax_flags & MF_PAX_MPROTECT)
41946 + vm_flags &= ~VM_MAYEXEC;
41947 +#endif
41948 +
41949 + }
41950 +#endif
41951 +
41952 /*
41953 * Adjust stack execute permissions; explicitly enable for
41954 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
41955 @@ -718,13 +746,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
41956 goto out_unlock;
41957 BUG_ON(prev != vma);
41958
41959 - /* Move stack pages down in memory. */
41960 - if (stack_shift) {
41961 - ret = shift_arg_pages(vma, stack_shift);
41962 - if (ret)
41963 - goto out_unlock;
41964 - }
41965 -
41966 /* mprotect_fixup is overkill to remove the temporary stack flags */
41967 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
41968
41969 @@ -805,7 +826,7 @@ int kernel_read(struct file *file, loff_t offset,
41970 old_fs = get_fs();
41971 set_fs(get_ds());
41972 /* The cast to a user pointer is valid due to the set_fs() */
41973 - result = vfs_read(file, (void __user *)addr, count, &pos);
41974 + result = vfs_read(file, (void __force_user *)addr, count, &pos);
41975 set_fs(old_fs);
41976 return result;
41977 }
41978 @@ -1247,7 +1268,7 @@ int check_unsafe_exec(struct linux_binprm *bprm)
41979 }
41980 rcu_read_unlock();
41981
41982 - if (p->fs->users > n_fs) {
41983 + if (atomic_read(&p->fs->users) > n_fs) {
41984 bprm->unsafe |= LSM_UNSAFE_SHARE;
41985 } else {
41986 res = -EAGAIN;
41987 @@ -1450,6 +1471,11 @@ static int do_execve_common(const char *filename,
41988 struct user_arg_ptr envp,
41989 struct pt_regs *regs)
41990 {
41991 +#ifdef CONFIG_GRKERNSEC
41992 + struct file *old_exec_file;
41993 + struct acl_subject_label *old_acl;
41994 + struct rlimit old_rlim[RLIM_NLIMITS];
41995 +#endif
41996 struct linux_binprm *bprm;
41997 struct file *file;
41998 struct files_struct *displaced;
41999 @@ -1457,6 +1483,8 @@ static int do_execve_common(const char *filename,
42000 int retval;
42001 const struct cred *cred = current_cred();
42002
42003 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
42004 +
42005 /*
42006 * We move the actual failure in case of RLIMIT_NPROC excess from
42007 * set*uid() to execve() because too many poorly written programs
42008 @@ -1497,12 +1525,27 @@ static int do_execve_common(const char *filename,
42009 if (IS_ERR(file))
42010 goto out_unmark;
42011
42012 + if (gr_ptrace_readexec(file, bprm->unsafe)) {
42013 + retval = -EPERM;
42014 + goto out_file;
42015 + }
42016 +
42017 sched_exec();
42018
42019 bprm->file = file;
42020 bprm->filename = filename;
42021 bprm->interp = filename;
42022
42023 + if (gr_process_user_ban()) {
42024 + retval = -EPERM;
42025 + goto out_file;
42026 + }
42027 +
42028 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
42029 + retval = -EACCES;
42030 + goto out_file;
42031 + }
42032 +
42033 retval = bprm_mm_init(bprm);
42034 if (retval)
42035 goto out_file;
42036 @@ -1532,9 +1575,40 @@ static int do_execve_common(const char *filename,
42037 if (retval < 0)
42038 goto out;
42039
42040 + if (!gr_tpe_allow(file)) {
42041 + retval = -EACCES;
42042 + goto out;
42043 + }
42044 +
42045 + if (gr_check_crash_exec(file)) {
42046 + retval = -EACCES;
42047 + goto out;
42048 + }
42049 +
42050 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
42051 +
42052 + gr_handle_exec_args(bprm, argv);
42053 +
42054 +#ifdef CONFIG_GRKERNSEC
42055 + old_acl = current->acl;
42056 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
42057 + old_exec_file = current->exec_file;
42058 + get_file(file);
42059 + current->exec_file = file;
42060 +#endif
42061 +
42062 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
42063 + bprm->unsafe);
42064 + if (retval < 0)
42065 + goto out_fail;
42066 +
42067 retval = search_binary_handler(bprm,regs);
42068 if (retval < 0)
42069 - goto out;
42070 + goto out_fail;
42071 +#ifdef CONFIG_GRKERNSEC
42072 + if (old_exec_file)
42073 + fput(old_exec_file);
42074 +#endif
42075
42076 /* execve succeeded */
42077 current->fs->in_exec = 0;
42078 @@ -1545,6 +1619,14 @@ static int do_execve_common(const char *filename,
42079 put_files_struct(displaced);
42080 return retval;
42081
42082 +out_fail:
42083 +#ifdef CONFIG_GRKERNSEC
42084 + current->acl = old_acl;
42085 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
42086 + fput(current->exec_file);
42087 + current->exec_file = old_exec_file;
42088 +#endif
42089 +
42090 out:
42091 if (bprm->mm) {
42092 acct_arg_size(bprm, 0);
42093 @@ -1618,7 +1700,7 @@ static int expand_corename(struct core_name *cn)
42094 {
42095 char *old_corename = cn->corename;
42096
42097 - cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
42098 + cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
42099 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
42100
42101 if (!cn->corename) {
42102 @@ -1715,7 +1797,7 @@ static int format_corename(struct core_name *cn, long signr)
42103 int pid_in_pattern = 0;
42104 int err = 0;
42105
42106 - cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
42107 + cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
42108 cn->corename = kmalloc(cn->size, GFP_KERNEL);
42109 cn->used = 0;
42110
42111 @@ -1812,6 +1894,218 @@ out:
42112 return ispipe;
42113 }
42114
42115 +int pax_check_flags(unsigned long *flags)
42116 +{
42117 + int retval = 0;
42118 +
42119 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
42120 + if (*flags & MF_PAX_SEGMEXEC)
42121 + {
42122 + *flags &= ~MF_PAX_SEGMEXEC;
42123 + retval = -EINVAL;
42124 + }
42125 +#endif
42126 +
42127 + if ((*flags & MF_PAX_PAGEEXEC)
42128 +
42129 +#ifdef CONFIG_PAX_PAGEEXEC
42130 + && (*flags & MF_PAX_SEGMEXEC)
42131 +#endif
42132 +
42133 + )
42134 + {
42135 + *flags &= ~MF_PAX_PAGEEXEC;
42136 + retval = -EINVAL;
42137 + }
42138 +
42139 + if ((*flags & MF_PAX_MPROTECT)
42140 +
42141 +#ifdef CONFIG_PAX_MPROTECT
42142 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
42143 +#endif
42144 +
42145 + )
42146 + {
42147 + *flags &= ~MF_PAX_MPROTECT;
42148 + retval = -EINVAL;
42149 + }
42150 +
42151 + if ((*flags & MF_PAX_EMUTRAMP)
42152 +
42153 +#ifdef CONFIG_PAX_EMUTRAMP
42154 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
42155 +#endif
42156 +
42157 + )
42158 + {
42159 + *flags &= ~MF_PAX_EMUTRAMP;
42160 + retval = -EINVAL;
42161 + }
42162 +
42163 + return retval;
42164 +}
42165 +
42166 +EXPORT_SYMBOL(pax_check_flags);
42167 +
42168 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
42169 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
42170 +{
42171 + struct task_struct *tsk = current;
42172 + struct mm_struct *mm = current->mm;
42173 + char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
42174 + char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
42175 + char *path_exec = NULL;
42176 + char *path_fault = NULL;
42177 + unsigned long start = 0UL, end = 0UL, offset = 0UL;
42178 +
42179 + if (buffer_exec && buffer_fault) {
42180 + struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
42181 +
42182 + down_read(&mm->mmap_sem);
42183 + vma = mm->mmap;
42184 + while (vma && (!vma_exec || !vma_fault)) {
42185 + if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
42186 + vma_exec = vma;
42187 + if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
42188 + vma_fault = vma;
42189 + vma = vma->vm_next;
42190 + }
42191 + if (vma_exec) {
42192 + path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
42193 + if (IS_ERR(path_exec))
42194 + path_exec = "<path too long>";
42195 + else {
42196 + path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
42197 + if (path_exec) {
42198 + *path_exec = 0;
42199 + path_exec = buffer_exec;
42200 + } else
42201 + path_exec = "<path too long>";
42202 + }
42203 + }
42204 + if (vma_fault) {
42205 + start = vma_fault->vm_start;
42206 + end = vma_fault->vm_end;
42207 + offset = vma_fault->vm_pgoff << PAGE_SHIFT;
42208 + if (vma_fault->vm_file) {
42209 + path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
42210 + if (IS_ERR(path_fault))
42211 + path_fault = "<path too long>";
42212 + else {
42213 + path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
42214 + if (path_fault) {
42215 + *path_fault = 0;
42216 + path_fault = buffer_fault;
42217 + } else
42218 + path_fault = "<path too long>";
42219 + }
42220 + } else
42221 + path_fault = "<anonymous mapping>";
42222 + }
42223 + up_read(&mm->mmap_sem);
42224 + }
42225 + if (tsk->signal->curr_ip)
42226 + printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
42227 + else
42228 + printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
42229 + printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
42230 + "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
42231 + task_uid(tsk), task_euid(tsk), pc, sp);
42232 + free_page((unsigned long)buffer_exec);
42233 + free_page((unsigned long)buffer_fault);
42234 + pax_report_insns(regs, pc, sp);
42235 + do_coredump(SIGKILL, SIGKILL, regs);
42236 +}
42237 +#endif
42238 +
42239 +#ifdef CONFIG_PAX_REFCOUNT
42240 +void pax_report_refcount_overflow(struct pt_regs *regs)
42241 +{
42242 + if (current->signal->curr_ip)
42243 + printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
42244 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
42245 + else
42246 + printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
42247 + current->comm, task_pid_nr(current), current_uid(), current_euid());
42248 + print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
42249 + show_regs(regs);
42250 + force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
42251 +}
42252 +#endif
42253 +
42254 +#ifdef CONFIG_PAX_USERCOPY
42255 +/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
42256 +int object_is_on_stack(const void *obj, unsigned long len)
42257 +{
42258 + const void * const stack = task_stack_page(current);
42259 + const void * const stackend = stack + THREAD_SIZE;
42260 +
42261 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
42262 + const void *frame = NULL;
42263 + const void *oldframe;
42264 +#endif
42265 +
42266 + if (obj + len < obj)
42267 + return -1;
42268 +
42269 + if (obj + len <= stack || stackend <= obj)
42270 + return 0;
42271 +
42272 + if (obj < stack || stackend < obj + len)
42273 + return -1;
42274 +
42275 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
42276 + oldframe = __builtin_frame_address(1);
42277 + if (oldframe)
42278 + frame = __builtin_frame_address(2);
42279 + /*
42280 + low ----------------------------------------------> high
42281 + [saved bp][saved ip][args][local vars][saved bp][saved ip]
42282 + ^----------------^
42283 + allow copies only within here
42284 + */
42285 + while (stack <= frame && frame < stackend) {
42286 + /* if obj + len extends past the last frame, this
42287 + check won't pass and the next frame will be 0,
42288 + causing us to bail out and correctly report
42289 + the copy as invalid
42290 + */
42291 + if (obj + len <= frame)
42292 + return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
42293 + oldframe = frame;
42294 + frame = *(const void * const *)frame;
42295 + }
42296 + return -1;
42297 +#else
42298 + return 1;
42299 +#endif
42300 +}
42301 +
42302 +NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
42303 +{
42304 + if (current->signal->curr_ip)
42305 + printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
42306 + &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
42307 + else
42308 + printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
42309 + to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
42310 + dump_stack();
42311 + gr_handle_kernel_exploit();
42312 + do_group_exit(SIGKILL);
42313 +}
42314 +#endif
42315 +
42316 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
42317 +void pax_track_stack(void)
42318 +{
42319 + unsigned long sp = (unsigned long)&sp;
42320 + if (sp < current_thread_info()->lowest_stack &&
42321 + sp > (unsigned long)task_stack_page(current))
42322 + current_thread_info()->lowest_stack = sp;
42323 +}
42324 +EXPORT_SYMBOL(pax_track_stack);
42325 +#endif
42326 +
42327 static int zap_process(struct task_struct *start, int exit_code)
42328 {
42329 struct task_struct *t;
42330 @@ -2023,17 +2317,17 @@ static void wait_for_dump_helpers(struct file *file)
42331 pipe = file->f_path.dentry->d_inode->i_pipe;
42332
42333 pipe_lock(pipe);
42334 - pipe->readers++;
42335 - pipe->writers--;
42336 + atomic_inc(&pipe->readers);
42337 + atomic_dec(&pipe->writers);
42338
42339 - while ((pipe->readers > 1) && (!signal_pending(current))) {
42340 + while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
42341 wake_up_interruptible_sync(&pipe->wait);
42342 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
42343 pipe_wait(pipe);
42344 }
42345
42346 - pipe->readers--;
42347 - pipe->writers++;
42348 + atomic_dec(&pipe->readers);
42349 + atomic_inc(&pipe->writers);
42350 pipe_unlock(pipe);
42351
42352 }
42353 @@ -2094,7 +2388,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
42354 int retval = 0;
42355 int flag = 0;
42356 int ispipe;
42357 - static atomic_t core_dump_count = ATOMIC_INIT(0);
42358 + static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
42359 struct coredump_params cprm = {
42360 .signr = signr,
42361 .regs = regs,
42362 @@ -2109,6 +2403,9 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
42363
42364 audit_core_dumps(signr);
42365
42366 + if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
42367 + gr_handle_brute_attach(current, cprm.mm_flags);
42368 +
42369 binfmt = mm->binfmt;
42370 if (!binfmt || !binfmt->core_dump)
42371 goto fail;
42372 @@ -2176,7 +2473,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
42373 }
42374 cprm.limit = RLIM_INFINITY;
42375
42376 - dump_count = atomic_inc_return(&core_dump_count);
42377 + dump_count = atomic_inc_return_unchecked(&core_dump_count);
42378 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
42379 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
42380 task_tgid_vnr(current), current->comm);
42381 @@ -2203,6 +2500,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
42382 } else {
42383 struct inode *inode;
42384
42385 + gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
42386 +
42387 if (cprm.limit < binfmt->min_coredump)
42388 goto fail_unlock;
42389
42390 @@ -2246,7 +2545,7 @@ close_fail:
42391 filp_close(cprm.file, NULL);
42392 fail_dropcount:
42393 if (ispipe)
42394 - atomic_dec(&core_dump_count);
42395 + atomic_dec_unchecked(&core_dump_count);
42396 fail_unlock:
42397 kfree(cn.corename);
42398 fail_corename:
42399 @@ -2265,7 +2564,7 @@ fail:
42400 */
42401 int dump_write(struct file *file, const void *addr, int nr)
42402 {
42403 - return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
42404 + return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
42405 }
42406 EXPORT_SYMBOL(dump_write);
42407
42408 diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
42409 index a8cbe1b..fed04cb 100644
42410 --- a/fs/ext2/balloc.c
42411 +++ b/fs/ext2/balloc.c
42412 @@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
42413
42414 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
42415 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
42416 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
42417 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
42418 sbi->s_resuid != current_fsuid() &&
42419 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
42420 return 0;
42421 diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
42422 index a203892..4e64db5 100644
42423 --- a/fs/ext3/balloc.c
42424 +++ b/fs/ext3/balloc.c
42425 @@ -1446,9 +1446,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
42426
42427 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
42428 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
42429 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
42430 + if (free_blocks < root_blocks + 1 &&
42431 !use_reservation && sbi->s_resuid != current_fsuid() &&
42432 - (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
42433 + (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid)) &&
42434 + !capable_nolog(CAP_SYS_RESOURCE)) {
42435 return 0;
42436 }
42437 return 1;
42438 diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
42439 index 12ccacd..a6035fce0 100644
42440 --- a/fs/ext4/balloc.c
42441 +++ b/fs/ext4/balloc.c
42442 @@ -436,8 +436,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
42443 /* Hm, nope. Are (enough) root reserved clusters available? */
42444 if (sbi->s_resuid == current_fsuid() ||
42445 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
42446 - capable(CAP_SYS_RESOURCE) ||
42447 - (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
42448 + (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
42449 + capable_nolog(CAP_SYS_RESOURCE)) {
42450
42451 if (free_clusters >= (nclusters + dirty_clusters))
42452 return 1;
42453 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
42454 index 5b0e26a..0aa002d 100644
42455 --- a/fs/ext4/ext4.h
42456 +++ b/fs/ext4/ext4.h
42457 @@ -1208,19 +1208,19 @@ struct ext4_sb_info {
42458 unsigned long s_mb_last_start;
42459
42460 /* stats for buddy allocator */
42461 - atomic_t s_bal_reqs; /* number of reqs with len > 1 */
42462 - atomic_t s_bal_success; /* we found long enough chunks */
42463 - atomic_t s_bal_allocated; /* in blocks */
42464 - atomic_t s_bal_ex_scanned; /* total extents scanned */
42465 - atomic_t s_bal_goals; /* goal hits */
42466 - atomic_t s_bal_breaks; /* too long searches */
42467 - atomic_t s_bal_2orders; /* 2^order hits */
42468 + atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
42469 + atomic_unchecked_t s_bal_success; /* we found long enough chunks */
42470 + atomic_unchecked_t s_bal_allocated; /* in blocks */
42471 + atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
42472 + atomic_unchecked_t s_bal_goals; /* goal hits */
42473 + atomic_unchecked_t s_bal_breaks; /* too long searches */
42474 + atomic_unchecked_t s_bal_2orders; /* 2^order hits */
42475 spinlock_t s_bal_lock;
42476 unsigned long s_mb_buddies_generated;
42477 unsigned long long s_mb_generation_time;
42478 - atomic_t s_mb_lost_chunks;
42479 - atomic_t s_mb_preallocated;
42480 - atomic_t s_mb_discarded;
42481 + atomic_unchecked_t s_mb_lost_chunks;
42482 + atomic_unchecked_t s_mb_preallocated;
42483 + atomic_unchecked_t s_mb_discarded;
42484 atomic_t s_lock_busy;
42485
42486 /* locality groups */
42487 diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
42488 index e2d8be8..c7f0ce9 100644
42489 --- a/fs/ext4/mballoc.c
42490 +++ b/fs/ext4/mballoc.c
42491 @@ -1794,7 +1794,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
42492 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
42493
42494 if (EXT4_SB(sb)->s_mb_stats)
42495 - atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
42496 + atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
42497
42498 break;
42499 }
42500 @@ -2088,7 +2088,7 @@ repeat:
42501 ac->ac_status = AC_STATUS_CONTINUE;
42502 ac->ac_flags |= EXT4_MB_HINT_FIRST;
42503 cr = 3;
42504 - atomic_inc(&sbi->s_mb_lost_chunks);
42505 + atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
42506 goto repeat;
42507 }
42508 }
42509 @@ -2592,25 +2592,25 @@ int ext4_mb_release(struct super_block *sb)
42510 if (sbi->s_mb_stats) {
42511 ext4_msg(sb, KERN_INFO,
42512 "mballoc: %u blocks %u reqs (%u success)",
42513 - atomic_read(&sbi->s_bal_allocated),
42514 - atomic_read(&sbi->s_bal_reqs),
42515 - atomic_read(&sbi->s_bal_success));
42516 + atomic_read_unchecked(&sbi->s_bal_allocated),
42517 + atomic_read_unchecked(&sbi->s_bal_reqs),
42518 + atomic_read_unchecked(&sbi->s_bal_success));
42519 ext4_msg(sb, KERN_INFO,
42520 "mballoc: %u extents scanned, %u goal hits, "
42521 "%u 2^N hits, %u breaks, %u lost",
42522 - atomic_read(&sbi->s_bal_ex_scanned),
42523 - atomic_read(&sbi->s_bal_goals),
42524 - atomic_read(&sbi->s_bal_2orders),
42525 - atomic_read(&sbi->s_bal_breaks),
42526 - atomic_read(&sbi->s_mb_lost_chunks));
42527 + atomic_read_unchecked(&sbi->s_bal_ex_scanned),
42528 + atomic_read_unchecked(&sbi->s_bal_goals),
42529 + atomic_read_unchecked(&sbi->s_bal_2orders),
42530 + atomic_read_unchecked(&sbi->s_bal_breaks),
42531 + atomic_read_unchecked(&sbi->s_mb_lost_chunks));
42532 ext4_msg(sb, KERN_INFO,
42533 "mballoc: %lu generated and it took %Lu",
42534 sbi->s_mb_buddies_generated,
42535 sbi->s_mb_generation_time);
42536 ext4_msg(sb, KERN_INFO,
42537 "mballoc: %u preallocated, %u discarded",
42538 - atomic_read(&sbi->s_mb_preallocated),
42539 - atomic_read(&sbi->s_mb_discarded));
42540 + atomic_read_unchecked(&sbi->s_mb_preallocated),
42541 + atomic_read_unchecked(&sbi->s_mb_discarded));
42542 }
42543
42544 free_percpu(sbi->s_locality_groups);
42545 @@ -3096,16 +3096,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
42546 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
42547
42548 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
42549 - atomic_inc(&sbi->s_bal_reqs);
42550 - atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
42551 + atomic_inc_unchecked(&sbi->s_bal_reqs);
42552 + atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
42553 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
42554 - atomic_inc(&sbi->s_bal_success);
42555 - atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
42556 + atomic_inc_unchecked(&sbi->s_bal_success);
42557 + atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
42558 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
42559 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
42560 - atomic_inc(&sbi->s_bal_goals);
42561 + atomic_inc_unchecked(&sbi->s_bal_goals);
42562 if (ac->ac_found > sbi->s_mb_max_to_scan)
42563 - atomic_inc(&sbi->s_bal_breaks);
42564 + atomic_inc_unchecked(&sbi->s_bal_breaks);
42565 }
42566
42567 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
42568 @@ -3509,7 +3509,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
42569 trace_ext4_mb_new_inode_pa(ac, pa);
42570
42571 ext4_mb_use_inode_pa(ac, pa);
42572 - atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
42573 + atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
42574
42575 ei = EXT4_I(ac->ac_inode);
42576 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
42577 @@ -3569,7 +3569,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
42578 trace_ext4_mb_new_group_pa(ac, pa);
42579
42580 ext4_mb_use_group_pa(ac, pa);
42581 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
42582 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
42583
42584 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
42585 lg = ac->ac_lg;
42586 @@ -3658,7 +3658,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
42587 * from the bitmap and continue.
42588 */
42589 }
42590 - atomic_add(free, &sbi->s_mb_discarded);
42591 + atomic_add_unchecked(free, &sbi->s_mb_discarded);
42592
42593 return err;
42594 }
42595 @@ -3676,7 +3676,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
42596 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
42597 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
42598 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
42599 - atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
42600 + atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
42601 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
42602
42603 return 0;
42604 diff --git a/fs/fcntl.c b/fs/fcntl.c
42605 index 22764c7..86372c9 100644
42606 --- a/fs/fcntl.c
42607 +++ b/fs/fcntl.c
42608 @@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
42609 if (err)
42610 return err;
42611
42612 + if (gr_handle_chroot_fowner(pid, type))
42613 + return -ENOENT;
42614 + if (gr_check_protected_task_fowner(pid, type))
42615 + return -EACCES;
42616 +
42617 f_modown(filp, pid, type, force);
42618 return 0;
42619 }
42620 @@ -266,7 +271,7 @@ pid_t f_getown(struct file *filp)
42621
42622 static int f_setown_ex(struct file *filp, unsigned long arg)
42623 {
42624 - struct f_owner_ex * __user owner_p = (void * __user)arg;
42625 + struct f_owner_ex __user *owner_p = (void __user *)arg;
42626 struct f_owner_ex owner;
42627 struct pid *pid;
42628 int type;
42629 @@ -306,7 +311,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
42630
42631 static int f_getown_ex(struct file *filp, unsigned long arg)
42632 {
42633 - struct f_owner_ex * __user owner_p = (void * __user)arg;
42634 + struct f_owner_ex __user *owner_p = (void __user *)arg;
42635 struct f_owner_ex owner;
42636 int ret = 0;
42637
42638 @@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
42639 switch (cmd) {
42640 case F_DUPFD:
42641 case F_DUPFD_CLOEXEC:
42642 + gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
42643 if (arg >= rlimit(RLIMIT_NOFILE))
42644 break;
42645 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
42646 diff --git a/fs/fifo.c b/fs/fifo.c
42647 index b1a524d..4ee270e 100644
42648 --- a/fs/fifo.c
42649 +++ b/fs/fifo.c
42650 @@ -58,10 +58,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
42651 */
42652 filp->f_op = &read_pipefifo_fops;
42653 pipe->r_counter++;
42654 - if (pipe->readers++ == 0)
42655 + if (atomic_inc_return(&pipe->readers) == 1)
42656 wake_up_partner(inode);
42657
42658 - if (!pipe->writers) {
42659 + if (!atomic_read(&pipe->writers)) {
42660 if ((filp->f_flags & O_NONBLOCK)) {
42661 /* suppress POLLHUP until we have
42662 * seen a writer */
42663 @@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
42664 * errno=ENXIO when there is no process reading the FIFO.
42665 */
42666 ret = -ENXIO;
42667 - if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
42668 + if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
42669 goto err;
42670
42671 filp->f_op = &write_pipefifo_fops;
42672 pipe->w_counter++;
42673 - if (!pipe->writers++)
42674 + if (atomic_inc_return(&pipe->writers) == 1)
42675 wake_up_partner(inode);
42676
42677 - if (!pipe->readers) {
42678 + if (!atomic_read(&pipe->readers)) {
42679 wait_for_partner(inode, &pipe->r_counter);
42680 if (signal_pending(current))
42681 goto err_wr;
42682 @@ -105,11 +105,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
42683 */
42684 filp->f_op = &rdwr_pipefifo_fops;
42685
42686 - pipe->readers++;
42687 - pipe->writers++;
42688 + atomic_inc(&pipe->readers);
42689 + atomic_inc(&pipe->writers);
42690 pipe->r_counter++;
42691 pipe->w_counter++;
42692 - if (pipe->readers == 1 || pipe->writers == 1)
42693 + if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
42694 wake_up_partner(inode);
42695 break;
42696
42697 @@ -123,19 +123,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
42698 return 0;
42699
42700 err_rd:
42701 - if (!--pipe->readers)
42702 + if (atomic_dec_and_test(&pipe->readers))
42703 wake_up_interruptible(&pipe->wait);
42704 ret = -ERESTARTSYS;
42705 goto err;
42706
42707 err_wr:
42708 - if (!--pipe->writers)
42709 + if (atomic_dec_and_test(&pipe->writers))
42710 wake_up_interruptible(&pipe->wait);
42711 ret = -ERESTARTSYS;
42712 goto err;
42713
42714 err:
42715 - if (!pipe->readers && !pipe->writers)
42716 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
42717 free_pipe_info(inode);
42718
42719 err_nocleanup:
42720 diff --git a/fs/file.c b/fs/file.c
42721 index 4c6992d..104cdea 100644
42722 --- a/fs/file.c
42723 +++ b/fs/file.c
42724 @@ -15,6 +15,7 @@
42725 #include <linux/slab.h>
42726 #include <linux/vmalloc.h>
42727 #include <linux/file.h>
42728 +#include <linux/security.h>
42729 #include <linux/fdtable.h>
42730 #include <linux/bitops.h>
42731 #include <linux/interrupt.h>
42732 @@ -254,6 +255,7 @@ int expand_files(struct files_struct *files, int nr)
42733 * N.B. For clone tasks sharing a files structure, this test
42734 * will limit the total number of files that can be opened.
42735 */
42736 + gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
42737 if (nr >= rlimit(RLIMIT_NOFILE))
42738 return -EMFILE;
42739
42740 diff --git a/fs/filesystems.c b/fs/filesystems.c
42741 index 0845f84..7b4ebef 100644
42742 --- a/fs/filesystems.c
42743 +++ b/fs/filesystems.c
42744 @@ -274,7 +274,12 @@ struct file_system_type *get_fs_type(const char *name)
42745 int len = dot ? dot - name : strlen(name);
42746
42747 fs = __get_fs_type(name, len);
42748 +
42749 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
42750 + if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
42751 +#else
42752 if (!fs && (request_module("%.*s", len, name) == 0))
42753 +#endif
42754 fs = __get_fs_type(name, len);
42755
42756 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
42757 diff --git a/fs/fs_struct.c b/fs/fs_struct.c
42758 index 78b519c..212c0d0 100644
42759 --- a/fs/fs_struct.c
42760 +++ b/fs/fs_struct.c
42761 @@ -4,6 +4,7 @@
42762 #include <linux/path.h>
42763 #include <linux/slab.h>
42764 #include <linux/fs_struct.h>
42765 +#include <linux/grsecurity.h>
42766 #include "internal.h"
42767
42768 static inline void path_get_longterm(struct path *path)
42769 @@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
42770 old_root = fs->root;
42771 fs->root = *path;
42772 path_get_longterm(path);
42773 + gr_set_chroot_entries(current, path);
42774 write_seqcount_end(&fs->seq);
42775 spin_unlock(&fs->lock);
42776 if (old_root.dentry)
42777 @@ -74,6 +76,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
42778 && fs->root.mnt == old_root->mnt) {
42779 path_get_longterm(new_root);
42780 fs->root = *new_root;
42781 + gr_set_chroot_entries(p, new_root);
42782 count++;
42783 }
42784 if (fs->pwd.dentry == old_root->dentry
42785 @@ -109,7 +112,8 @@ void exit_fs(struct task_struct *tsk)
42786 spin_lock(&fs->lock);
42787 write_seqcount_begin(&fs->seq);
42788 tsk->fs = NULL;
42789 - kill = !--fs->users;
42790 + gr_clear_chroot_entries(tsk);
42791 + kill = !atomic_dec_return(&fs->users);
42792 write_seqcount_end(&fs->seq);
42793 spin_unlock(&fs->lock);
42794 task_unlock(tsk);
42795 @@ -123,7 +127,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
42796 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
42797 /* We don't need to lock fs - think why ;-) */
42798 if (fs) {
42799 - fs->users = 1;
42800 + atomic_set(&fs->users, 1);
42801 fs->in_exec = 0;
42802 spin_lock_init(&fs->lock);
42803 seqcount_init(&fs->seq);
42804 @@ -132,6 +136,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
42805 spin_lock(&old->lock);
42806 fs->root = old->root;
42807 path_get_longterm(&fs->root);
42808 + /* instead of calling gr_set_chroot_entries here,
42809 + we call it from every caller of this function
42810 + */
42811 fs->pwd = old->pwd;
42812 path_get_longterm(&fs->pwd);
42813 spin_unlock(&old->lock);
42814 @@ -150,8 +157,9 @@ int unshare_fs_struct(void)
42815
42816 task_lock(current);
42817 spin_lock(&fs->lock);
42818 - kill = !--fs->users;
42819 + kill = !atomic_dec_return(&fs->users);
42820 current->fs = new_fs;
42821 + gr_set_chroot_entries(current, &new_fs->root);
42822 spin_unlock(&fs->lock);
42823 task_unlock(current);
42824
42825 @@ -170,7 +178,7 @@ EXPORT_SYMBOL(current_umask);
42826
42827 /* to be mentioned only in INIT_TASK */
42828 struct fs_struct init_fs = {
42829 - .users = 1,
42830 + .users = ATOMIC_INIT(1),
42831 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
42832 .seq = SEQCNT_ZERO,
42833 .umask = 0022,
42834 @@ -186,12 +194,13 @@ void daemonize_fs_struct(void)
42835 task_lock(current);
42836
42837 spin_lock(&init_fs.lock);
42838 - init_fs.users++;
42839 + atomic_inc(&init_fs.users);
42840 spin_unlock(&init_fs.lock);
42841
42842 spin_lock(&fs->lock);
42843 current->fs = &init_fs;
42844 - kill = !--fs->users;
42845 + gr_set_chroot_entries(current, &current->fs->root);
42846 + kill = !atomic_dec_return(&fs->users);
42847 spin_unlock(&fs->lock);
42848
42849 task_unlock(current);
42850 diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
42851 index 9905350..02eaec4 100644
42852 --- a/fs/fscache/cookie.c
42853 +++ b/fs/fscache/cookie.c
42854 @@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
42855 parent ? (char *) parent->def->name : "<no-parent>",
42856 def->name, netfs_data);
42857
42858 - fscache_stat(&fscache_n_acquires);
42859 + fscache_stat_unchecked(&fscache_n_acquires);
42860
42861 /* if there's no parent cookie, then we don't create one here either */
42862 if (!parent) {
42863 - fscache_stat(&fscache_n_acquires_null);
42864 + fscache_stat_unchecked(&fscache_n_acquires_null);
42865 _leave(" [no parent]");
42866 return NULL;
42867 }
42868 @@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
42869 /* allocate and initialise a cookie */
42870 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
42871 if (!cookie) {
42872 - fscache_stat(&fscache_n_acquires_oom);
42873 + fscache_stat_unchecked(&fscache_n_acquires_oom);
42874 _leave(" [ENOMEM]");
42875 return NULL;
42876 }
42877 @@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
42878
42879 switch (cookie->def->type) {
42880 case FSCACHE_COOKIE_TYPE_INDEX:
42881 - fscache_stat(&fscache_n_cookie_index);
42882 + fscache_stat_unchecked(&fscache_n_cookie_index);
42883 break;
42884 case FSCACHE_COOKIE_TYPE_DATAFILE:
42885 - fscache_stat(&fscache_n_cookie_data);
42886 + fscache_stat_unchecked(&fscache_n_cookie_data);
42887 break;
42888 default:
42889 - fscache_stat(&fscache_n_cookie_special);
42890 + fscache_stat_unchecked(&fscache_n_cookie_special);
42891 break;
42892 }
42893
42894 @@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
42895 if (fscache_acquire_non_index_cookie(cookie) < 0) {
42896 atomic_dec(&parent->n_children);
42897 __fscache_cookie_put(cookie);
42898 - fscache_stat(&fscache_n_acquires_nobufs);
42899 + fscache_stat_unchecked(&fscache_n_acquires_nobufs);
42900 _leave(" = NULL");
42901 return NULL;
42902 }
42903 }
42904
42905 - fscache_stat(&fscache_n_acquires_ok);
42906 + fscache_stat_unchecked(&fscache_n_acquires_ok);
42907 _leave(" = %p", cookie);
42908 return cookie;
42909 }
42910 @@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
42911 cache = fscache_select_cache_for_object(cookie->parent);
42912 if (!cache) {
42913 up_read(&fscache_addremove_sem);
42914 - fscache_stat(&fscache_n_acquires_no_cache);
42915 + fscache_stat_unchecked(&fscache_n_acquires_no_cache);
42916 _leave(" = -ENOMEDIUM [no cache]");
42917 return -ENOMEDIUM;
42918 }
42919 @@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
42920 object = cache->ops->alloc_object(cache, cookie);
42921 fscache_stat_d(&fscache_n_cop_alloc_object);
42922 if (IS_ERR(object)) {
42923 - fscache_stat(&fscache_n_object_no_alloc);
42924 + fscache_stat_unchecked(&fscache_n_object_no_alloc);
42925 ret = PTR_ERR(object);
42926 goto error;
42927 }
42928
42929 - fscache_stat(&fscache_n_object_alloc);
42930 + fscache_stat_unchecked(&fscache_n_object_alloc);
42931
42932 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
42933
42934 @@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
42935 struct fscache_object *object;
42936 struct hlist_node *_p;
42937
42938 - fscache_stat(&fscache_n_updates);
42939 + fscache_stat_unchecked(&fscache_n_updates);
42940
42941 if (!cookie) {
42942 - fscache_stat(&fscache_n_updates_null);
42943 + fscache_stat_unchecked(&fscache_n_updates_null);
42944 _leave(" [no cookie]");
42945 return;
42946 }
42947 @@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
42948 struct fscache_object *object;
42949 unsigned long event;
42950
42951 - fscache_stat(&fscache_n_relinquishes);
42952 + fscache_stat_unchecked(&fscache_n_relinquishes);
42953 if (retire)
42954 - fscache_stat(&fscache_n_relinquishes_retire);
42955 + fscache_stat_unchecked(&fscache_n_relinquishes_retire);
42956
42957 if (!cookie) {
42958 - fscache_stat(&fscache_n_relinquishes_null);
42959 + fscache_stat_unchecked(&fscache_n_relinquishes_null);
42960 _leave(" [no cookie]");
42961 return;
42962 }
42963 @@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
42964
42965 /* wait for the cookie to finish being instantiated (or to fail) */
42966 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
42967 - fscache_stat(&fscache_n_relinquishes_waitcrt);
42968 + fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
42969 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
42970 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
42971 }
42972 diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
42973 index f6aad48..88dcf26 100644
42974 --- a/fs/fscache/internal.h
42975 +++ b/fs/fscache/internal.h
42976 @@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
42977 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
42978 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
42979
42980 -extern atomic_t fscache_n_op_pend;
42981 -extern atomic_t fscache_n_op_run;
42982 -extern atomic_t fscache_n_op_enqueue;
42983 -extern atomic_t fscache_n_op_deferred_release;
42984 -extern atomic_t fscache_n_op_release;
42985 -extern atomic_t fscache_n_op_gc;
42986 -extern atomic_t fscache_n_op_cancelled;
42987 -extern atomic_t fscache_n_op_rejected;
42988 +extern atomic_unchecked_t fscache_n_op_pend;
42989 +extern atomic_unchecked_t fscache_n_op_run;
42990 +extern atomic_unchecked_t fscache_n_op_enqueue;
42991 +extern atomic_unchecked_t fscache_n_op_deferred_release;
42992 +extern atomic_unchecked_t fscache_n_op_release;
42993 +extern atomic_unchecked_t fscache_n_op_gc;
42994 +extern atomic_unchecked_t fscache_n_op_cancelled;
42995 +extern atomic_unchecked_t fscache_n_op_rejected;
42996
42997 -extern atomic_t fscache_n_attr_changed;
42998 -extern atomic_t fscache_n_attr_changed_ok;
42999 -extern atomic_t fscache_n_attr_changed_nobufs;
43000 -extern atomic_t fscache_n_attr_changed_nomem;
43001 -extern atomic_t fscache_n_attr_changed_calls;
43002 +extern atomic_unchecked_t fscache_n_attr_changed;
43003 +extern atomic_unchecked_t fscache_n_attr_changed_ok;
43004 +extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
43005 +extern atomic_unchecked_t fscache_n_attr_changed_nomem;
43006 +extern atomic_unchecked_t fscache_n_attr_changed_calls;
43007
43008 -extern atomic_t fscache_n_allocs;
43009 -extern atomic_t fscache_n_allocs_ok;
43010 -extern atomic_t fscache_n_allocs_wait;
43011 -extern atomic_t fscache_n_allocs_nobufs;
43012 -extern atomic_t fscache_n_allocs_intr;
43013 -extern atomic_t fscache_n_allocs_object_dead;
43014 -extern atomic_t fscache_n_alloc_ops;
43015 -extern atomic_t fscache_n_alloc_op_waits;
43016 +extern atomic_unchecked_t fscache_n_allocs;
43017 +extern atomic_unchecked_t fscache_n_allocs_ok;
43018 +extern atomic_unchecked_t fscache_n_allocs_wait;
43019 +extern atomic_unchecked_t fscache_n_allocs_nobufs;
43020 +extern atomic_unchecked_t fscache_n_allocs_intr;
43021 +extern atomic_unchecked_t fscache_n_allocs_object_dead;
43022 +extern atomic_unchecked_t fscache_n_alloc_ops;
43023 +extern atomic_unchecked_t fscache_n_alloc_op_waits;
43024
43025 -extern atomic_t fscache_n_retrievals;
43026 -extern atomic_t fscache_n_retrievals_ok;
43027 -extern atomic_t fscache_n_retrievals_wait;
43028 -extern atomic_t fscache_n_retrievals_nodata;
43029 -extern atomic_t fscache_n_retrievals_nobufs;
43030 -extern atomic_t fscache_n_retrievals_intr;
43031 -extern atomic_t fscache_n_retrievals_nomem;
43032 -extern atomic_t fscache_n_retrievals_object_dead;
43033 -extern atomic_t fscache_n_retrieval_ops;
43034 -extern atomic_t fscache_n_retrieval_op_waits;
43035 +extern atomic_unchecked_t fscache_n_retrievals;
43036 +extern atomic_unchecked_t fscache_n_retrievals_ok;
43037 +extern atomic_unchecked_t fscache_n_retrievals_wait;
43038 +extern atomic_unchecked_t fscache_n_retrievals_nodata;
43039 +extern atomic_unchecked_t fscache_n_retrievals_nobufs;
43040 +extern atomic_unchecked_t fscache_n_retrievals_intr;
43041 +extern atomic_unchecked_t fscache_n_retrievals_nomem;
43042 +extern atomic_unchecked_t fscache_n_retrievals_object_dead;
43043 +extern atomic_unchecked_t fscache_n_retrieval_ops;
43044 +extern atomic_unchecked_t fscache_n_retrieval_op_waits;
43045
43046 -extern atomic_t fscache_n_stores;
43047 -extern atomic_t fscache_n_stores_ok;
43048 -extern atomic_t fscache_n_stores_again;
43049 -extern atomic_t fscache_n_stores_nobufs;
43050 -extern atomic_t fscache_n_stores_oom;
43051 -extern atomic_t fscache_n_store_ops;
43052 -extern atomic_t fscache_n_store_calls;
43053 -extern atomic_t fscache_n_store_pages;
43054 -extern atomic_t fscache_n_store_radix_deletes;
43055 -extern atomic_t fscache_n_store_pages_over_limit;
43056 +extern atomic_unchecked_t fscache_n_stores;
43057 +extern atomic_unchecked_t fscache_n_stores_ok;
43058 +extern atomic_unchecked_t fscache_n_stores_again;
43059 +extern atomic_unchecked_t fscache_n_stores_nobufs;
43060 +extern atomic_unchecked_t fscache_n_stores_oom;
43061 +extern atomic_unchecked_t fscache_n_store_ops;
43062 +extern atomic_unchecked_t fscache_n_store_calls;
43063 +extern atomic_unchecked_t fscache_n_store_pages;
43064 +extern atomic_unchecked_t fscache_n_store_radix_deletes;
43065 +extern atomic_unchecked_t fscache_n_store_pages_over_limit;
43066
43067 -extern atomic_t fscache_n_store_vmscan_not_storing;
43068 -extern atomic_t fscache_n_store_vmscan_gone;
43069 -extern atomic_t fscache_n_store_vmscan_busy;
43070 -extern atomic_t fscache_n_store_vmscan_cancelled;
43071 +extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
43072 +extern atomic_unchecked_t fscache_n_store_vmscan_gone;
43073 +extern atomic_unchecked_t fscache_n_store_vmscan_busy;
43074 +extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
43075
43076 -extern atomic_t fscache_n_marks;
43077 -extern atomic_t fscache_n_uncaches;
43078 +extern atomic_unchecked_t fscache_n_marks;
43079 +extern atomic_unchecked_t fscache_n_uncaches;
43080
43081 -extern atomic_t fscache_n_acquires;
43082 -extern atomic_t fscache_n_acquires_null;
43083 -extern atomic_t fscache_n_acquires_no_cache;
43084 -extern atomic_t fscache_n_acquires_ok;
43085 -extern atomic_t fscache_n_acquires_nobufs;
43086 -extern atomic_t fscache_n_acquires_oom;
43087 +extern atomic_unchecked_t fscache_n_acquires;
43088 +extern atomic_unchecked_t fscache_n_acquires_null;
43089 +extern atomic_unchecked_t fscache_n_acquires_no_cache;
43090 +extern atomic_unchecked_t fscache_n_acquires_ok;
43091 +extern atomic_unchecked_t fscache_n_acquires_nobufs;
43092 +extern atomic_unchecked_t fscache_n_acquires_oom;
43093
43094 -extern atomic_t fscache_n_updates;
43095 -extern atomic_t fscache_n_updates_null;
43096 -extern atomic_t fscache_n_updates_run;
43097 +extern atomic_unchecked_t fscache_n_updates;
43098 +extern atomic_unchecked_t fscache_n_updates_null;
43099 +extern atomic_unchecked_t fscache_n_updates_run;
43100
43101 -extern atomic_t fscache_n_relinquishes;
43102 -extern atomic_t fscache_n_relinquishes_null;
43103 -extern atomic_t fscache_n_relinquishes_waitcrt;
43104 -extern atomic_t fscache_n_relinquishes_retire;
43105 +extern atomic_unchecked_t fscache_n_relinquishes;
43106 +extern atomic_unchecked_t fscache_n_relinquishes_null;
43107 +extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
43108 +extern atomic_unchecked_t fscache_n_relinquishes_retire;
43109
43110 -extern atomic_t fscache_n_cookie_index;
43111 -extern atomic_t fscache_n_cookie_data;
43112 -extern atomic_t fscache_n_cookie_special;
43113 +extern atomic_unchecked_t fscache_n_cookie_index;
43114 +extern atomic_unchecked_t fscache_n_cookie_data;
43115 +extern atomic_unchecked_t fscache_n_cookie_special;
43116
43117 -extern atomic_t fscache_n_object_alloc;
43118 -extern atomic_t fscache_n_object_no_alloc;
43119 -extern atomic_t fscache_n_object_lookups;
43120 -extern atomic_t fscache_n_object_lookups_negative;
43121 -extern atomic_t fscache_n_object_lookups_positive;
43122 -extern atomic_t fscache_n_object_lookups_timed_out;
43123 -extern atomic_t fscache_n_object_created;
43124 -extern atomic_t fscache_n_object_avail;
43125 -extern atomic_t fscache_n_object_dead;
43126 +extern atomic_unchecked_t fscache_n_object_alloc;
43127 +extern atomic_unchecked_t fscache_n_object_no_alloc;
43128 +extern atomic_unchecked_t fscache_n_object_lookups;
43129 +extern atomic_unchecked_t fscache_n_object_lookups_negative;
43130 +extern atomic_unchecked_t fscache_n_object_lookups_positive;
43131 +extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
43132 +extern atomic_unchecked_t fscache_n_object_created;
43133 +extern atomic_unchecked_t fscache_n_object_avail;
43134 +extern atomic_unchecked_t fscache_n_object_dead;
43135
43136 -extern atomic_t fscache_n_checkaux_none;
43137 -extern atomic_t fscache_n_checkaux_okay;
43138 -extern atomic_t fscache_n_checkaux_update;
43139 -extern atomic_t fscache_n_checkaux_obsolete;
43140 +extern atomic_unchecked_t fscache_n_checkaux_none;
43141 +extern atomic_unchecked_t fscache_n_checkaux_okay;
43142 +extern atomic_unchecked_t fscache_n_checkaux_update;
43143 +extern atomic_unchecked_t fscache_n_checkaux_obsolete;
43144
43145 extern atomic_t fscache_n_cop_alloc_object;
43146 extern atomic_t fscache_n_cop_lookup_object;
43147 @@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t *stat)
43148 atomic_inc(stat);
43149 }
43150
43151 +static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
43152 +{
43153 + atomic_inc_unchecked(stat);
43154 +}
43155 +
43156 static inline void fscache_stat_d(atomic_t *stat)
43157 {
43158 atomic_dec(stat);
43159 @@ -267,6 +272,7 @@ extern const struct file_operations fscache_stats_fops;
43160
43161 #define __fscache_stat(stat) (NULL)
43162 #define fscache_stat(stat) do {} while (0)
43163 +#define fscache_stat_unchecked(stat) do {} while (0)
43164 #define fscache_stat_d(stat) do {} while (0)
43165 #endif
43166
43167 diff --git a/fs/fscache/object.c b/fs/fscache/object.c
43168 index b6b897c..0ffff9c 100644
43169 --- a/fs/fscache/object.c
43170 +++ b/fs/fscache/object.c
43171 @@ -128,7 +128,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
43172 /* update the object metadata on disk */
43173 case FSCACHE_OBJECT_UPDATING:
43174 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
43175 - fscache_stat(&fscache_n_updates_run);
43176 + fscache_stat_unchecked(&fscache_n_updates_run);
43177 fscache_stat(&fscache_n_cop_update_object);
43178 object->cache->ops->update_object(object);
43179 fscache_stat_d(&fscache_n_cop_update_object);
43180 @@ -217,7 +217,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
43181 spin_lock(&object->lock);
43182 object->state = FSCACHE_OBJECT_DEAD;
43183 spin_unlock(&object->lock);
43184 - fscache_stat(&fscache_n_object_dead);
43185 + fscache_stat_unchecked(&fscache_n_object_dead);
43186 goto terminal_transit;
43187
43188 /* handle the parent cache of this object being withdrawn from
43189 @@ -232,7 +232,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
43190 spin_lock(&object->lock);
43191 object->state = FSCACHE_OBJECT_DEAD;
43192 spin_unlock(&object->lock);
43193 - fscache_stat(&fscache_n_object_dead);
43194 + fscache_stat_unchecked(&fscache_n_object_dead);
43195 goto terminal_transit;
43196
43197 /* complain about the object being woken up once it is
43198 @@ -461,7 +461,7 @@ static void fscache_lookup_object(struct fscache_object *object)
43199 parent->cookie->def->name, cookie->def->name,
43200 object->cache->tag->name);
43201
43202 - fscache_stat(&fscache_n_object_lookups);
43203 + fscache_stat_unchecked(&fscache_n_object_lookups);
43204 fscache_stat(&fscache_n_cop_lookup_object);
43205 ret = object->cache->ops->lookup_object(object);
43206 fscache_stat_d(&fscache_n_cop_lookup_object);
43207 @@ -472,7 +472,7 @@ static void fscache_lookup_object(struct fscache_object *object)
43208 if (ret == -ETIMEDOUT) {
43209 /* probably stuck behind another object, so move this one to
43210 * the back of the queue */
43211 - fscache_stat(&fscache_n_object_lookups_timed_out);
43212 + fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
43213 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
43214 }
43215
43216 @@ -495,7 +495,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
43217
43218 spin_lock(&object->lock);
43219 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
43220 - fscache_stat(&fscache_n_object_lookups_negative);
43221 + fscache_stat_unchecked(&fscache_n_object_lookups_negative);
43222
43223 /* transit here to allow write requests to begin stacking up
43224 * and read requests to begin returning ENODATA */
43225 @@ -541,7 +541,7 @@ void fscache_obtained_object(struct fscache_object *object)
43226 * result, in which case there may be data available */
43227 spin_lock(&object->lock);
43228 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
43229 - fscache_stat(&fscache_n_object_lookups_positive);
43230 + fscache_stat_unchecked(&fscache_n_object_lookups_positive);
43231
43232 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
43233
43234 @@ -555,7 +555,7 @@ void fscache_obtained_object(struct fscache_object *object)
43235 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
43236 } else {
43237 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
43238 - fscache_stat(&fscache_n_object_created);
43239 + fscache_stat_unchecked(&fscache_n_object_created);
43240
43241 object->state = FSCACHE_OBJECT_AVAILABLE;
43242 spin_unlock(&object->lock);
43243 @@ -602,7 +602,7 @@ static void fscache_object_available(struct fscache_object *object)
43244 fscache_enqueue_dependents(object);
43245
43246 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
43247 - fscache_stat(&fscache_n_object_avail);
43248 + fscache_stat_unchecked(&fscache_n_object_avail);
43249
43250 _leave("");
43251 }
43252 @@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
43253 enum fscache_checkaux result;
43254
43255 if (!object->cookie->def->check_aux) {
43256 - fscache_stat(&fscache_n_checkaux_none);
43257 + fscache_stat_unchecked(&fscache_n_checkaux_none);
43258 return FSCACHE_CHECKAUX_OKAY;
43259 }
43260
43261 @@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
43262 switch (result) {
43263 /* entry okay as is */
43264 case FSCACHE_CHECKAUX_OKAY:
43265 - fscache_stat(&fscache_n_checkaux_okay);
43266 + fscache_stat_unchecked(&fscache_n_checkaux_okay);
43267 break;
43268
43269 /* entry requires update */
43270 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
43271 - fscache_stat(&fscache_n_checkaux_update);
43272 + fscache_stat_unchecked(&fscache_n_checkaux_update);
43273 break;
43274
43275 /* entry requires deletion */
43276 case FSCACHE_CHECKAUX_OBSOLETE:
43277 - fscache_stat(&fscache_n_checkaux_obsolete);
43278 + fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
43279 break;
43280
43281 default:
43282 diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
43283 index 30afdfa..2256596 100644
43284 --- a/fs/fscache/operation.c
43285 +++ b/fs/fscache/operation.c
43286 @@ -17,7 +17,7 @@
43287 #include <linux/slab.h>
43288 #include "internal.h"
43289
43290 -atomic_t fscache_op_debug_id;
43291 +atomic_unchecked_t fscache_op_debug_id;
43292 EXPORT_SYMBOL(fscache_op_debug_id);
43293
43294 /**
43295 @@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
43296 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
43297 ASSERTCMP(atomic_read(&op->usage), >, 0);
43298
43299 - fscache_stat(&fscache_n_op_enqueue);
43300 + fscache_stat_unchecked(&fscache_n_op_enqueue);
43301 switch (op->flags & FSCACHE_OP_TYPE) {
43302 case FSCACHE_OP_ASYNC:
43303 _debug("queue async");
43304 @@ -69,7 +69,7 @@ static void fscache_run_op(struct fscache_object *object,
43305 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
43306 if (op->processor)
43307 fscache_enqueue_operation(op);
43308 - fscache_stat(&fscache_n_op_run);
43309 + fscache_stat_unchecked(&fscache_n_op_run);
43310 }
43311
43312 /*
43313 @@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
43314 if (object->n_ops > 1) {
43315 atomic_inc(&op->usage);
43316 list_add_tail(&op->pend_link, &object->pending_ops);
43317 - fscache_stat(&fscache_n_op_pend);
43318 + fscache_stat_unchecked(&fscache_n_op_pend);
43319 } else if (!list_empty(&object->pending_ops)) {
43320 atomic_inc(&op->usage);
43321 list_add_tail(&op->pend_link, &object->pending_ops);
43322 - fscache_stat(&fscache_n_op_pend);
43323 + fscache_stat_unchecked(&fscache_n_op_pend);
43324 fscache_start_operations(object);
43325 } else {
43326 ASSERTCMP(object->n_in_progress, ==, 0);
43327 @@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
43328 object->n_exclusive++; /* reads and writes must wait */
43329 atomic_inc(&op->usage);
43330 list_add_tail(&op->pend_link, &object->pending_ops);
43331 - fscache_stat(&fscache_n_op_pend);
43332 + fscache_stat_unchecked(&fscache_n_op_pend);
43333 ret = 0;
43334 } else {
43335 /* not allowed to submit ops in any other state */
43336 @@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_object *object,
43337 if (object->n_exclusive > 0) {
43338 atomic_inc(&op->usage);
43339 list_add_tail(&op->pend_link, &object->pending_ops);
43340 - fscache_stat(&fscache_n_op_pend);
43341 + fscache_stat_unchecked(&fscache_n_op_pend);
43342 } else if (!list_empty(&object->pending_ops)) {
43343 atomic_inc(&op->usage);
43344 list_add_tail(&op->pend_link, &object->pending_ops);
43345 - fscache_stat(&fscache_n_op_pend);
43346 + fscache_stat_unchecked(&fscache_n_op_pend);
43347 fscache_start_operations(object);
43348 } else {
43349 ASSERTCMP(object->n_exclusive, ==, 0);
43350 @@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_object *object,
43351 object->n_ops++;
43352 atomic_inc(&op->usage);
43353 list_add_tail(&op->pend_link, &object->pending_ops);
43354 - fscache_stat(&fscache_n_op_pend);
43355 + fscache_stat_unchecked(&fscache_n_op_pend);
43356 ret = 0;
43357 } else if (object->state == FSCACHE_OBJECT_DYING ||
43358 object->state == FSCACHE_OBJECT_LC_DYING ||
43359 object->state == FSCACHE_OBJECT_WITHDRAWING) {
43360 - fscache_stat(&fscache_n_op_rejected);
43361 + fscache_stat_unchecked(&fscache_n_op_rejected);
43362 ret = -ENOBUFS;
43363 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
43364 fscache_report_unexpected_submission(object, op, ostate);
43365 @@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_operation *op)
43366
43367 ret = -EBUSY;
43368 if (!list_empty(&op->pend_link)) {
43369 - fscache_stat(&fscache_n_op_cancelled);
43370 + fscache_stat_unchecked(&fscache_n_op_cancelled);
43371 list_del_init(&op->pend_link);
43372 object->n_ops--;
43373 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
43374 @@ -331,7 +331,7 @@ void fscache_put_operation(struct fscache_operation *op)
43375 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
43376 BUG();
43377
43378 - fscache_stat(&fscache_n_op_release);
43379 + fscache_stat_unchecked(&fscache_n_op_release);
43380
43381 if (op->release) {
43382 op->release(op);
43383 @@ -348,7 +348,7 @@ void fscache_put_operation(struct fscache_operation *op)
43384 * lock, and defer it otherwise */
43385 if (!spin_trylock(&object->lock)) {
43386 _debug("defer put");
43387 - fscache_stat(&fscache_n_op_deferred_release);
43388 + fscache_stat_unchecked(&fscache_n_op_deferred_release);
43389
43390 cache = object->cache;
43391 spin_lock(&cache->op_gc_list_lock);
43392 @@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_struct *work)
43393
43394 _debug("GC DEFERRED REL OBJ%x OP%x",
43395 object->debug_id, op->debug_id);
43396 - fscache_stat(&fscache_n_op_gc);
43397 + fscache_stat_unchecked(&fscache_n_op_gc);
43398
43399 ASSERTCMP(atomic_read(&op->usage), ==, 0);
43400
43401 diff --git a/fs/fscache/page.c b/fs/fscache/page.c
43402 index 3f7a59b..cf196cc 100644
43403 --- a/fs/fscache/page.c
43404 +++ b/fs/fscache/page.c
43405 @@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
43406 val = radix_tree_lookup(&cookie->stores, page->index);
43407 if (!val) {
43408 rcu_read_unlock();
43409 - fscache_stat(&fscache_n_store_vmscan_not_storing);
43410 + fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
43411 __fscache_uncache_page(cookie, page);
43412 return true;
43413 }
43414 @@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
43415 spin_unlock(&cookie->stores_lock);
43416
43417 if (xpage) {
43418 - fscache_stat(&fscache_n_store_vmscan_cancelled);
43419 - fscache_stat(&fscache_n_store_radix_deletes);
43420 + fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
43421 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
43422 ASSERTCMP(xpage, ==, page);
43423 } else {
43424 - fscache_stat(&fscache_n_store_vmscan_gone);
43425 + fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
43426 }
43427
43428 wake_up_bit(&cookie->flags, 0);
43429 @@ -107,7 +107,7 @@ page_busy:
43430 /* we might want to wait here, but that could deadlock the allocator as
43431 * the work threads writing to the cache may all end up sleeping
43432 * on memory allocation */
43433 - fscache_stat(&fscache_n_store_vmscan_busy);
43434 + fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
43435 return false;
43436 }
43437 EXPORT_SYMBOL(__fscache_maybe_release_page);
43438 @@ -131,7 +131,7 @@ static void fscache_end_page_write(struct fscache_object *object,
43439 FSCACHE_COOKIE_STORING_TAG);
43440 if (!radix_tree_tag_get(&cookie->stores, page->index,
43441 FSCACHE_COOKIE_PENDING_TAG)) {
43442 - fscache_stat(&fscache_n_store_radix_deletes);
43443 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
43444 xpage = radix_tree_delete(&cookie->stores, page->index);
43445 }
43446 spin_unlock(&cookie->stores_lock);
43447 @@ -152,7 +152,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
43448
43449 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
43450
43451 - fscache_stat(&fscache_n_attr_changed_calls);
43452 + fscache_stat_unchecked(&fscache_n_attr_changed_calls);
43453
43454 if (fscache_object_is_active(object)) {
43455 fscache_stat(&fscache_n_cop_attr_changed);
43456 @@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
43457
43458 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
43459
43460 - fscache_stat(&fscache_n_attr_changed);
43461 + fscache_stat_unchecked(&fscache_n_attr_changed);
43462
43463 op = kzalloc(sizeof(*op), GFP_KERNEL);
43464 if (!op) {
43465 - fscache_stat(&fscache_n_attr_changed_nomem);
43466 + fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
43467 _leave(" = -ENOMEM");
43468 return -ENOMEM;
43469 }
43470 @@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
43471 if (fscache_submit_exclusive_op(object, op) < 0)
43472 goto nobufs;
43473 spin_unlock(&cookie->lock);
43474 - fscache_stat(&fscache_n_attr_changed_ok);
43475 + fscache_stat_unchecked(&fscache_n_attr_changed_ok);
43476 fscache_put_operation(op);
43477 _leave(" = 0");
43478 return 0;
43479 @@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
43480 nobufs:
43481 spin_unlock(&cookie->lock);
43482 kfree(op);
43483 - fscache_stat(&fscache_n_attr_changed_nobufs);
43484 + fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
43485 _leave(" = %d", -ENOBUFS);
43486 return -ENOBUFS;
43487 }
43488 @@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
43489 /* allocate a retrieval operation and attempt to submit it */
43490 op = kzalloc(sizeof(*op), GFP_NOIO);
43491 if (!op) {
43492 - fscache_stat(&fscache_n_retrievals_nomem);
43493 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
43494 return NULL;
43495 }
43496
43497 @@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
43498 return 0;
43499 }
43500
43501 - fscache_stat(&fscache_n_retrievals_wait);
43502 + fscache_stat_unchecked(&fscache_n_retrievals_wait);
43503
43504 jif = jiffies;
43505 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
43506 fscache_wait_bit_interruptible,
43507 TASK_INTERRUPTIBLE) != 0) {
43508 - fscache_stat(&fscache_n_retrievals_intr);
43509 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
43510 _leave(" = -ERESTARTSYS");
43511 return -ERESTARTSYS;
43512 }
43513 @@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
43514 */
43515 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
43516 struct fscache_retrieval *op,
43517 - atomic_t *stat_op_waits,
43518 - atomic_t *stat_object_dead)
43519 + atomic_unchecked_t *stat_op_waits,
43520 + atomic_unchecked_t *stat_object_dead)
43521 {
43522 int ret;
43523
43524 @@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
43525 goto check_if_dead;
43526
43527 _debug(">>> WT");
43528 - fscache_stat(stat_op_waits);
43529 + fscache_stat_unchecked(stat_op_waits);
43530 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
43531 fscache_wait_bit_interruptible,
43532 TASK_INTERRUPTIBLE) < 0) {
43533 @@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
43534
43535 check_if_dead:
43536 if (unlikely(fscache_object_is_dead(object))) {
43537 - fscache_stat(stat_object_dead);
43538 + fscache_stat_unchecked(stat_object_dead);
43539 return -ENOBUFS;
43540 }
43541 return 0;
43542 @@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
43543
43544 _enter("%p,%p,,,", cookie, page);
43545
43546 - fscache_stat(&fscache_n_retrievals);
43547 + fscache_stat_unchecked(&fscache_n_retrievals);
43548
43549 if (hlist_empty(&cookie->backing_objects))
43550 goto nobufs;
43551 @@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
43552 goto nobufs_unlock;
43553 spin_unlock(&cookie->lock);
43554
43555 - fscache_stat(&fscache_n_retrieval_ops);
43556 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
43557
43558 /* pin the netfs read context in case we need to do the actual netfs
43559 * read because we've encountered a cache read failure */
43560 @@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
43561
43562 error:
43563 if (ret == -ENOMEM)
43564 - fscache_stat(&fscache_n_retrievals_nomem);
43565 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
43566 else if (ret == -ERESTARTSYS)
43567 - fscache_stat(&fscache_n_retrievals_intr);
43568 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
43569 else if (ret == -ENODATA)
43570 - fscache_stat(&fscache_n_retrievals_nodata);
43571 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
43572 else if (ret < 0)
43573 - fscache_stat(&fscache_n_retrievals_nobufs);
43574 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
43575 else
43576 - fscache_stat(&fscache_n_retrievals_ok);
43577 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
43578
43579 fscache_put_retrieval(op);
43580 _leave(" = %d", ret);
43581 @@ -429,7 +429,7 @@ nobufs_unlock:
43582 spin_unlock(&cookie->lock);
43583 kfree(op);
43584 nobufs:
43585 - fscache_stat(&fscache_n_retrievals_nobufs);
43586 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
43587 _leave(" = -ENOBUFS");
43588 return -ENOBUFS;
43589 }
43590 @@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
43591
43592 _enter("%p,,%d,,,", cookie, *nr_pages);
43593
43594 - fscache_stat(&fscache_n_retrievals);
43595 + fscache_stat_unchecked(&fscache_n_retrievals);
43596
43597 if (hlist_empty(&cookie->backing_objects))
43598 goto nobufs;
43599 @@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
43600 goto nobufs_unlock;
43601 spin_unlock(&cookie->lock);
43602
43603 - fscache_stat(&fscache_n_retrieval_ops);
43604 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
43605
43606 /* pin the netfs read context in case we need to do the actual netfs
43607 * read because we've encountered a cache read failure */
43608 @@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
43609
43610 error:
43611 if (ret == -ENOMEM)
43612 - fscache_stat(&fscache_n_retrievals_nomem);
43613 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
43614 else if (ret == -ERESTARTSYS)
43615 - fscache_stat(&fscache_n_retrievals_intr);
43616 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
43617 else if (ret == -ENODATA)
43618 - fscache_stat(&fscache_n_retrievals_nodata);
43619 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
43620 else if (ret < 0)
43621 - fscache_stat(&fscache_n_retrievals_nobufs);
43622 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
43623 else
43624 - fscache_stat(&fscache_n_retrievals_ok);
43625 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
43626
43627 fscache_put_retrieval(op);
43628 _leave(" = %d", ret);
43629 @@ -545,7 +545,7 @@ nobufs_unlock:
43630 spin_unlock(&cookie->lock);
43631 kfree(op);
43632 nobufs:
43633 - fscache_stat(&fscache_n_retrievals_nobufs);
43634 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
43635 _leave(" = -ENOBUFS");
43636 return -ENOBUFS;
43637 }
43638 @@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
43639
43640 _enter("%p,%p,,,", cookie, page);
43641
43642 - fscache_stat(&fscache_n_allocs);
43643 + fscache_stat_unchecked(&fscache_n_allocs);
43644
43645 if (hlist_empty(&cookie->backing_objects))
43646 goto nobufs;
43647 @@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
43648 goto nobufs_unlock;
43649 spin_unlock(&cookie->lock);
43650
43651 - fscache_stat(&fscache_n_alloc_ops);
43652 + fscache_stat_unchecked(&fscache_n_alloc_ops);
43653
43654 ret = fscache_wait_for_retrieval_activation(
43655 object, op,
43656 @@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
43657
43658 error:
43659 if (ret == -ERESTARTSYS)
43660 - fscache_stat(&fscache_n_allocs_intr);
43661 + fscache_stat_unchecked(&fscache_n_allocs_intr);
43662 else if (ret < 0)
43663 - fscache_stat(&fscache_n_allocs_nobufs);
43664 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
43665 else
43666 - fscache_stat(&fscache_n_allocs_ok);
43667 + fscache_stat_unchecked(&fscache_n_allocs_ok);
43668
43669 fscache_put_retrieval(op);
43670 _leave(" = %d", ret);
43671 @@ -625,7 +625,7 @@ nobufs_unlock:
43672 spin_unlock(&cookie->lock);
43673 kfree(op);
43674 nobufs:
43675 - fscache_stat(&fscache_n_allocs_nobufs);
43676 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
43677 _leave(" = -ENOBUFS");
43678 return -ENOBUFS;
43679 }
43680 @@ -666,7 +666,7 @@ static void fscache_write_op(struct fscache_operation *_op)
43681
43682 spin_lock(&cookie->stores_lock);
43683
43684 - fscache_stat(&fscache_n_store_calls);
43685 + fscache_stat_unchecked(&fscache_n_store_calls);
43686
43687 /* find a page to store */
43688 page = NULL;
43689 @@ -677,7 +677,7 @@ static void fscache_write_op(struct fscache_operation *_op)
43690 page = results[0];
43691 _debug("gang %d [%lx]", n, page->index);
43692 if (page->index > op->store_limit) {
43693 - fscache_stat(&fscache_n_store_pages_over_limit);
43694 + fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
43695 goto superseded;
43696 }
43697
43698 @@ -689,7 +689,7 @@ static void fscache_write_op(struct fscache_operation *_op)
43699 spin_unlock(&cookie->stores_lock);
43700 spin_unlock(&object->lock);
43701
43702 - fscache_stat(&fscache_n_store_pages);
43703 + fscache_stat_unchecked(&fscache_n_store_pages);
43704 fscache_stat(&fscache_n_cop_write_page);
43705 ret = object->cache->ops->write_page(op, page);
43706 fscache_stat_d(&fscache_n_cop_write_page);
43707 @@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
43708 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
43709 ASSERT(PageFsCache(page));
43710
43711 - fscache_stat(&fscache_n_stores);
43712 + fscache_stat_unchecked(&fscache_n_stores);
43713
43714 op = kzalloc(sizeof(*op), GFP_NOIO);
43715 if (!op)
43716 @@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
43717 spin_unlock(&cookie->stores_lock);
43718 spin_unlock(&object->lock);
43719
43720 - op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
43721 + op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
43722 op->store_limit = object->store_limit;
43723
43724 if (fscache_submit_op(object, &op->op) < 0)
43725 @@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
43726
43727 spin_unlock(&cookie->lock);
43728 radix_tree_preload_end();
43729 - fscache_stat(&fscache_n_store_ops);
43730 - fscache_stat(&fscache_n_stores_ok);
43731 + fscache_stat_unchecked(&fscache_n_store_ops);
43732 + fscache_stat_unchecked(&fscache_n_stores_ok);
43733
43734 /* the work queue now carries its own ref on the object */
43735 fscache_put_operation(&op->op);
43736 @@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
43737 return 0;
43738
43739 already_queued:
43740 - fscache_stat(&fscache_n_stores_again);
43741 + fscache_stat_unchecked(&fscache_n_stores_again);
43742 already_pending:
43743 spin_unlock(&cookie->stores_lock);
43744 spin_unlock(&object->lock);
43745 spin_unlock(&cookie->lock);
43746 radix_tree_preload_end();
43747 kfree(op);
43748 - fscache_stat(&fscache_n_stores_ok);
43749 + fscache_stat_unchecked(&fscache_n_stores_ok);
43750 _leave(" = 0");
43751 return 0;
43752
43753 @@ -851,14 +851,14 @@ nobufs:
43754 spin_unlock(&cookie->lock);
43755 radix_tree_preload_end();
43756 kfree(op);
43757 - fscache_stat(&fscache_n_stores_nobufs);
43758 + fscache_stat_unchecked(&fscache_n_stores_nobufs);
43759 _leave(" = -ENOBUFS");
43760 return -ENOBUFS;
43761
43762 nomem_free:
43763 kfree(op);
43764 nomem:
43765 - fscache_stat(&fscache_n_stores_oom);
43766 + fscache_stat_unchecked(&fscache_n_stores_oom);
43767 _leave(" = -ENOMEM");
43768 return -ENOMEM;
43769 }
43770 @@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
43771 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
43772 ASSERTCMP(page, !=, NULL);
43773
43774 - fscache_stat(&fscache_n_uncaches);
43775 + fscache_stat_unchecked(&fscache_n_uncaches);
43776
43777 /* cache withdrawal may beat us to it */
43778 if (!PageFsCache(page))
43779 @@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op,
43780 unsigned long loop;
43781
43782 #ifdef CONFIG_FSCACHE_STATS
43783 - atomic_add(pagevec->nr, &fscache_n_marks);
43784 + atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
43785 #endif
43786
43787 for (loop = 0; loop < pagevec->nr; loop++) {
43788 diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
43789 index 4765190..2a067f2 100644
43790 --- a/fs/fscache/stats.c
43791 +++ b/fs/fscache/stats.c
43792 @@ -18,95 +18,95 @@
43793 /*
43794 * operation counters
43795 */
43796 -atomic_t fscache_n_op_pend;
43797 -atomic_t fscache_n_op_run;
43798 -atomic_t fscache_n_op_enqueue;
43799 -atomic_t fscache_n_op_requeue;
43800 -atomic_t fscache_n_op_deferred_release;
43801 -atomic_t fscache_n_op_release;
43802 -atomic_t fscache_n_op_gc;
43803 -atomic_t fscache_n_op_cancelled;
43804 -atomic_t fscache_n_op_rejected;
43805 +atomic_unchecked_t fscache_n_op_pend;
43806 +atomic_unchecked_t fscache_n_op_run;
43807 +atomic_unchecked_t fscache_n_op_enqueue;
43808 +atomic_unchecked_t fscache_n_op_requeue;
43809 +atomic_unchecked_t fscache_n_op_deferred_release;
43810 +atomic_unchecked_t fscache_n_op_release;
43811 +atomic_unchecked_t fscache_n_op_gc;
43812 +atomic_unchecked_t fscache_n_op_cancelled;
43813 +atomic_unchecked_t fscache_n_op_rejected;
43814
43815 -atomic_t fscache_n_attr_changed;
43816 -atomic_t fscache_n_attr_changed_ok;
43817 -atomic_t fscache_n_attr_changed_nobufs;
43818 -atomic_t fscache_n_attr_changed_nomem;
43819 -atomic_t fscache_n_attr_changed_calls;
43820 +atomic_unchecked_t fscache_n_attr_changed;
43821 +atomic_unchecked_t fscache_n_attr_changed_ok;
43822 +atomic_unchecked_t fscache_n_attr_changed_nobufs;
43823 +atomic_unchecked_t fscache_n_attr_changed_nomem;
43824 +atomic_unchecked_t fscache_n_attr_changed_calls;
43825
43826 -atomic_t fscache_n_allocs;
43827 -atomic_t fscache_n_allocs_ok;
43828 -atomic_t fscache_n_allocs_wait;
43829 -atomic_t fscache_n_allocs_nobufs;
43830 -atomic_t fscache_n_allocs_intr;
43831 -atomic_t fscache_n_allocs_object_dead;
43832 -atomic_t fscache_n_alloc_ops;
43833 -atomic_t fscache_n_alloc_op_waits;
43834 +atomic_unchecked_t fscache_n_allocs;
43835 +atomic_unchecked_t fscache_n_allocs_ok;
43836 +atomic_unchecked_t fscache_n_allocs_wait;
43837 +atomic_unchecked_t fscache_n_allocs_nobufs;
43838 +atomic_unchecked_t fscache_n_allocs_intr;
43839 +atomic_unchecked_t fscache_n_allocs_object_dead;
43840 +atomic_unchecked_t fscache_n_alloc_ops;
43841 +atomic_unchecked_t fscache_n_alloc_op_waits;
43842
43843 -atomic_t fscache_n_retrievals;
43844 -atomic_t fscache_n_retrievals_ok;
43845 -atomic_t fscache_n_retrievals_wait;
43846 -atomic_t fscache_n_retrievals_nodata;
43847 -atomic_t fscache_n_retrievals_nobufs;
43848 -atomic_t fscache_n_retrievals_intr;
43849 -atomic_t fscache_n_retrievals_nomem;
43850 -atomic_t fscache_n_retrievals_object_dead;
43851 -atomic_t fscache_n_retrieval_ops;
43852 -atomic_t fscache_n_retrieval_op_waits;
43853 +atomic_unchecked_t fscache_n_retrievals;
43854 +atomic_unchecked_t fscache_n_retrievals_ok;
43855 +atomic_unchecked_t fscache_n_retrievals_wait;
43856 +atomic_unchecked_t fscache_n_retrievals_nodata;
43857 +atomic_unchecked_t fscache_n_retrievals_nobufs;
43858 +atomic_unchecked_t fscache_n_retrievals_intr;
43859 +atomic_unchecked_t fscache_n_retrievals_nomem;
43860 +atomic_unchecked_t fscache_n_retrievals_object_dead;
43861 +atomic_unchecked_t fscache_n_retrieval_ops;
43862 +atomic_unchecked_t fscache_n_retrieval_op_waits;
43863
43864 -atomic_t fscache_n_stores;
43865 -atomic_t fscache_n_stores_ok;
43866 -atomic_t fscache_n_stores_again;
43867 -atomic_t fscache_n_stores_nobufs;
43868 -atomic_t fscache_n_stores_oom;
43869 -atomic_t fscache_n_store_ops;
43870 -atomic_t fscache_n_store_calls;
43871 -atomic_t fscache_n_store_pages;
43872 -atomic_t fscache_n_store_radix_deletes;
43873 -atomic_t fscache_n_store_pages_over_limit;
43874 +atomic_unchecked_t fscache_n_stores;
43875 +atomic_unchecked_t fscache_n_stores_ok;
43876 +atomic_unchecked_t fscache_n_stores_again;
43877 +atomic_unchecked_t fscache_n_stores_nobufs;
43878 +atomic_unchecked_t fscache_n_stores_oom;
43879 +atomic_unchecked_t fscache_n_store_ops;
43880 +atomic_unchecked_t fscache_n_store_calls;
43881 +atomic_unchecked_t fscache_n_store_pages;
43882 +atomic_unchecked_t fscache_n_store_radix_deletes;
43883 +atomic_unchecked_t fscache_n_store_pages_over_limit;
43884
43885 -atomic_t fscache_n_store_vmscan_not_storing;
43886 -atomic_t fscache_n_store_vmscan_gone;
43887 -atomic_t fscache_n_store_vmscan_busy;
43888 -atomic_t fscache_n_store_vmscan_cancelled;
43889 +atomic_unchecked_t fscache_n_store_vmscan_not_storing;
43890 +atomic_unchecked_t fscache_n_store_vmscan_gone;
43891 +atomic_unchecked_t fscache_n_store_vmscan_busy;
43892 +atomic_unchecked_t fscache_n_store_vmscan_cancelled;
43893
43894 -atomic_t fscache_n_marks;
43895 -atomic_t fscache_n_uncaches;
43896 +atomic_unchecked_t fscache_n_marks;
43897 +atomic_unchecked_t fscache_n_uncaches;
43898
43899 -atomic_t fscache_n_acquires;
43900 -atomic_t fscache_n_acquires_null;
43901 -atomic_t fscache_n_acquires_no_cache;
43902 -atomic_t fscache_n_acquires_ok;
43903 -atomic_t fscache_n_acquires_nobufs;
43904 -atomic_t fscache_n_acquires_oom;
43905 +atomic_unchecked_t fscache_n_acquires;
43906 +atomic_unchecked_t fscache_n_acquires_null;
43907 +atomic_unchecked_t fscache_n_acquires_no_cache;
43908 +atomic_unchecked_t fscache_n_acquires_ok;
43909 +atomic_unchecked_t fscache_n_acquires_nobufs;
43910 +atomic_unchecked_t fscache_n_acquires_oom;
43911
43912 -atomic_t fscache_n_updates;
43913 -atomic_t fscache_n_updates_null;
43914 -atomic_t fscache_n_updates_run;
43915 +atomic_unchecked_t fscache_n_updates;
43916 +atomic_unchecked_t fscache_n_updates_null;
43917 +atomic_unchecked_t fscache_n_updates_run;
43918
43919 -atomic_t fscache_n_relinquishes;
43920 -atomic_t fscache_n_relinquishes_null;
43921 -atomic_t fscache_n_relinquishes_waitcrt;
43922 -atomic_t fscache_n_relinquishes_retire;
43923 +atomic_unchecked_t fscache_n_relinquishes;
43924 +atomic_unchecked_t fscache_n_relinquishes_null;
43925 +atomic_unchecked_t fscache_n_relinquishes_waitcrt;
43926 +atomic_unchecked_t fscache_n_relinquishes_retire;
43927
43928 -atomic_t fscache_n_cookie_index;
43929 -atomic_t fscache_n_cookie_data;
43930 -atomic_t fscache_n_cookie_special;
43931 +atomic_unchecked_t fscache_n_cookie_index;
43932 +atomic_unchecked_t fscache_n_cookie_data;
43933 +atomic_unchecked_t fscache_n_cookie_special;
43934
43935 -atomic_t fscache_n_object_alloc;
43936 -atomic_t fscache_n_object_no_alloc;
43937 -atomic_t fscache_n_object_lookups;
43938 -atomic_t fscache_n_object_lookups_negative;
43939 -atomic_t fscache_n_object_lookups_positive;
43940 -atomic_t fscache_n_object_lookups_timed_out;
43941 -atomic_t fscache_n_object_created;
43942 -atomic_t fscache_n_object_avail;
43943 -atomic_t fscache_n_object_dead;
43944 +atomic_unchecked_t fscache_n_object_alloc;
43945 +atomic_unchecked_t fscache_n_object_no_alloc;
43946 +atomic_unchecked_t fscache_n_object_lookups;
43947 +atomic_unchecked_t fscache_n_object_lookups_negative;
43948 +atomic_unchecked_t fscache_n_object_lookups_positive;
43949 +atomic_unchecked_t fscache_n_object_lookups_timed_out;
43950 +atomic_unchecked_t fscache_n_object_created;
43951 +atomic_unchecked_t fscache_n_object_avail;
43952 +atomic_unchecked_t fscache_n_object_dead;
43953
43954 -atomic_t fscache_n_checkaux_none;
43955 -atomic_t fscache_n_checkaux_okay;
43956 -atomic_t fscache_n_checkaux_update;
43957 -atomic_t fscache_n_checkaux_obsolete;
43958 +atomic_unchecked_t fscache_n_checkaux_none;
43959 +atomic_unchecked_t fscache_n_checkaux_okay;
43960 +atomic_unchecked_t fscache_n_checkaux_update;
43961 +atomic_unchecked_t fscache_n_checkaux_obsolete;
43962
43963 atomic_t fscache_n_cop_alloc_object;
43964 atomic_t fscache_n_cop_lookup_object;
43965 @@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq_file *m, void *v)
43966 seq_puts(m, "FS-Cache statistics\n");
43967
43968 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
43969 - atomic_read(&fscache_n_cookie_index),
43970 - atomic_read(&fscache_n_cookie_data),
43971 - atomic_read(&fscache_n_cookie_special));
43972 + atomic_read_unchecked(&fscache_n_cookie_index),
43973 + atomic_read_unchecked(&fscache_n_cookie_data),
43974 + atomic_read_unchecked(&fscache_n_cookie_special));
43975
43976 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
43977 - atomic_read(&fscache_n_object_alloc),
43978 - atomic_read(&fscache_n_object_no_alloc),
43979 - atomic_read(&fscache_n_object_avail),
43980 - atomic_read(&fscache_n_object_dead));
43981 + atomic_read_unchecked(&fscache_n_object_alloc),
43982 + atomic_read_unchecked(&fscache_n_object_no_alloc),
43983 + atomic_read_unchecked(&fscache_n_object_avail),
43984 + atomic_read_unchecked(&fscache_n_object_dead));
43985 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
43986 - atomic_read(&fscache_n_checkaux_none),
43987 - atomic_read(&fscache_n_checkaux_okay),
43988 - atomic_read(&fscache_n_checkaux_update),
43989 - atomic_read(&fscache_n_checkaux_obsolete));
43990 + atomic_read_unchecked(&fscache_n_checkaux_none),
43991 + atomic_read_unchecked(&fscache_n_checkaux_okay),
43992 + atomic_read_unchecked(&fscache_n_checkaux_update),
43993 + atomic_read_unchecked(&fscache_n_checkaux_obsolete));
43994
43995 seq_printf(m, "Pages : mrk=%u unc=%u\n",
43996 - atomic_read(&fscache_n_marks),
43997 - atomic_read(&fscache_n_uncaches));
43998 + atomic_read_unchecked(&fscache_n_marks),
43999 + atomic_read_unchecked(&fscache_n_uncaches));
44000
44001 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
44002 " oom=%u\n",
44003 - atomic_read(&fscache_n_acquires),
44004 - atomic_read(&fscache_n_acquires_null),
44005 - atomic_read(&fscache_n_acquires_no_cache),
44006 - atomic_read(&fscache_n_acquires_ok),
44007 - atomic_read(&fscache_n_acquires_nobufs),
44008 - atomic_read(&fscache_n_acquires_oom));
44009 + atomic_read_unchecked(&fscache_n_acquires),
44010 + atomic_read_unchecked(&fscache_n_acquires_null),
44011 + atomic_read_unchecked(&fscache_n_acquires_no_cache),
44012 + atomic_read_unchecked(&fscache_n_acquires_ok),
44013 + atomic_read_unchecked(&fscache_n_acquires_nobufs),
44014 + atomic_read_unchecked(&fscache_n_acquires_oom));
44015
44016 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
44017 - atomic_read(&fscache_n_object_lookups),
44018 - atomic_read(&fscache_n_object_lookups_negative),
44019 - atomic_read(&fscache_n_object_lookups_positive),
44020 - atomic_read(&fscache_n_object_created),
44021 - atomic_read(&fscache_n_object_lookups_timed_out));
44022 + atomic_read_unchecked(&fscache_n_object_lookups),
44023 + atomic_read_unchecked(&fscache_n_object_lookups_negative),
44024 + atomic_read_unchecked(&fscache_n_object_lookups_positive),
44025 + atomic_read_unchecked(&fscache_n_object_created),
44026 + atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
44027
44028 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
44029 - atomic_read(&fscache_n_updates),
44030 - atomic_read(&fscache_n_updates_null),
44031 - atomic_read(&fscache_n_updates_run));
44032 + atomic_read_unchecked(&fscache_n_updates),
44033 + atomic_read_unchecked(&fscache_n_updates_null),
44034 + atomic_read_unchecked(&fscache_n_updates_run));
44035
44036 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
44037 - atomic_read(&fscache_n_relinquishes),
44038 - atomic_read(&fscache_n_relinquishes_null),
44039 - atomic_read(&fscache_n_relinquishes_waitcrt),
44040 - atomic_read(&fscache_n_relinquishes_retire));
44041 + atomic_read_unchecked(&fscache_n_relinquishes),
44042 + atomic_read_unchecked(&fscache_n_relinquishes_null),
44043 + atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
44044 + atomic_read_unchecked(&fscache_n_relinquishes_retire));
44045
44046 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
44047 - atomic_read(&fscache_n_attr_changed),
44048 - atomic_read(&fscache_n_attr_changed_ok),
44049 - atomic_read(&fscache_n_attr_changed_nobufs),
44050 - atomic_read(&fscache_n_attr_changed_nomem),
44051 - atomic_read(&fscache_n_attr_changed_calls));
44052 + atomic_read_unchecked(&fscache_n_attr_changed),
44053 + atomic_read_unchecked(&fscache_n_attr_changed_ok),
44054 + atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
44055 + atomic_read_unchecked(&fscache_n_attr_changed_nomem),
44056 + atomic_read_unchecked(&fscache_n_attr_changed_calls));
44057
44058 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
44059 - atomic_read(&fscache_n_allocs),
44060 - atomic_read(&fscache_n_allocs_ok),
44061 - atomic_read(&fscache_n_allocs_wait),
44062 - atomic_read(&fscache_n_allocs_nobufs),
44063 - atomic_read(&fscache_n_allocs_intr));
44064 + atomic_read_unchecked(&fscache_n_allocs),
44065 + atomic_read_unchecked(&fscache_n_allocs_ok),
44066 + atomic_read_unchecked(&fscache_n_allocs_wait),
44067 + atomic_read_unchecked(&fscache_n_allocs_nobufs),
44068 + atomic_read_unchecked(&fscache_n_allocs_intr));
44069 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
44070 - atomic_read(&fscache_n_alloc_ops),
44071 - atomic_read(&fscache_n_alloc_op_waits),
44072 - atomic_read(&fscache_n_allocs_object_dead));
44073 + atomic_read_unchecked(&fscache_n_alloc_ops),
44074 + atomic_read_unchecked(&fscache_n_alloc_op_waits),
44075 + atomic_read_unchecked(&fscache_n_allocs_object_dead));
44076
44077 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
44078 " int=%u oom=%u\n",
44079 - atomic_read(&fscache_n_retrievals),
44080 - atomic_read(&fscache_n_retrievals_ok),
44081 - atomic_read(&fscache_n_retrievals_wait),
44082 - atomic_read(&fscache_n_retrievals_nodata),
44083 - atomic_read(&fscache_n_retrievals_nobufs),
44084 - atomic_read(&fscache_n_retrievals_intr),
44085 - atomic_read(&fscache_n_retrievals_nomem));
44086 + atomic_read_unchecked(&fscache_n_retrievals),
44087 + atomic_read_unchecked(&fscache_n_retrievals_ok),
44088 + atomic_read_unchecked(&fscache_n_retrievals_wait),
44089 + atomic_read_unchecked(&fscache_n_retrievals_nodata),
44090 + atomic_read_unchecked(&fscache_n_retrievals_nobufs),
44091 + atomic_read_unchecked(&fscache_n_retrievals_intr),
44092 + atomic_read_unchecked(&fscache_n_retrievals_nomem));
44093 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
44094 - atomic_read(&fscache_n_retrieval_ops),
44095 - atomic_read(&fscache_n_retrieval_op_waits),
44096 - atomic_read(&fscache_n_retrievals_object_dead));
44097 + atomic_read_unchecked(&fscache_n_retrieval_ops),
44098 + atomic_read_unchecked(&fscache_n_retrieval_op_waits),
44099 + atomic_read_unchecked(&fscache_n_retrievals_object_dead));
44100
44101 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
44102 - atomic_read(&fscache_n_stores),
44103 - atomic_read(&fscache_n_stores_ok),
44104 - atomic_read(&fscache_n_stores_again),
44105 - atomic_read(&fscache_n_stores_nobufs),
44106 - atomic_read(&fscache_n_stores_oom));
44107 + atomic_read_unchecked(&fscache_n_stores),
44108 + atomic_read_unchecked(&fscache_n_stores_ok),
44109 + atomic_read_unchecked(&fscache_n_stores_again),
44110 + atomic_read_unchecked(&fscache_n_stores_nobufs),
44111 + atomic_read_unchecked(&fscache_n_stores_oom));
44112 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
44113 - atomic_read(&fscache_n_store_ops),
44114 - atomic_read(&fscache_n_store_calls),
44115 - atomic_read(&fscache_n_store_pages),
44116 - atomic_read(&fscache_n_store_radix_deletes),
44117 - atomic_read(&fscache_n_store_pages_over_limit));
44118 + atomic_read_unchecked(&fscache_n_store_ops),
44119 + atomic_read_unchecked(&fscache_n_store_calls),
44120 + atomic_read_unchecked(&fscache_n_store_pages),
44121 + atomic_read_unchecked(&fscache_n_store_radix_deletes),
44122 + atomic_read_unchecked(&fscache_n_store_pages_over_limit));
44123
44124 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
44125 - atomic_read(&fscache_n_store_vmscan_not_storing),
44126 - atomic_read(&fscache_n_store_vmscan_gone),
44127 - atomic_read(&fscache_n_store_vmscan_busy),
44128 - atomic_read(&fscache_n_store_vmscan_cancelled));
44129 + atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
44130 + atomic_read_unchecked(&fscache_n_store_vmscan_gone),
44131 + atomic_read_unchecked(&fscache_n_store_vmscan_busy),
44132 + atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
44133
44134 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
44135 - atomic_read(&fscache_n_op_pend),
44136 - atomic_read(&fscache_n_op_run),
44137 - atomic_read(&fscache_n_op_enqueue),
44138 - atomic_read(&fscache_n_op_cancelled),
44139 - atomic_read(&fscache_n_op_rejected));
44140 + atomic_read_unchecked(&fscache_n_op_pend),
44141 + atomic_read_unchecked(&fscache_n_op_run),
44142 + atomic_read_unchecked(&fscache_n_op_enqueue),
44143 + atomic_read_unchecked(&fscache_n_op_cancelled),
44144 + atomic_read_unchecked(&fscache_n_op_rejected));
44145 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
44146 - atomic_read(&fscache_n_op_deferred_release),
44147 - atomic_read(&fscache_n_op_release),
44148 - atomic_read(&fscache_n_op_gc));
44149 + atomic_read_unchecked(&fscache_n_op_deferred_release),
44150 + atomic_read_unchecked(&fscache_n_op_release),
44151 + atomic_read_unchecked(&fscache_n_op_gc));
44152
44153 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
44154 atomic_read(&fscache_n_cop_alloc_object),
44155 diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
44156 index 3426521..3b75162 100644
44157 --- a/fs/fuse/cuse.c
44158 +++ b/fs/fuse/cuse.c
44159 @@ -587,10 +587,12 @@ static int __init cuse_init(void)
44160 INIT_LIST_HEAD(&cuse_conntbl[i]);
44161
44162 /* inherit and extend fuse_dev_operations */
44163 - cuse_channel_fops = fuse_dev_operations;
44164 - cuse_channel_fops.owner = THIS_MODULE;
44165 - cuse_channel_fops.open = cuse_channel_open;
44166 - cuse_channel_fops.release = cuse_channel_release;
44167 + pax_open_kernel();
44168 + memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
44169 + *(void **)&cuse_channel_fops.owner = THIS_MODULE;
44170 + *(void **)&cuse_channel_fops.open = cuse_channel_open;
44171 + *(void **)&cuse_channel_fops.release = cuse_channel_release;
44172 + pax_close_kernel();
44173
44174 cuse_class = class_create(THIS_MODULE, "cuse");
44175 if (IS_ERR(cuse_class))
44176 diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
44177 index 2aaf3ea..8e50863 100644
44178 --- a/fs/fuse/dev.c
44179 +++ b/fs/fuse/dev.c
44180 @@ -1242,7 +1242,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
44181 ret = 0;
44182 pipe_lock(pipe);
44183
44184 - if (!pipe->readers) {
44185 + if (!atomic_read(&pipe->readers)) {
44186 send_sig(SIGPIPE, current, 0);
44187 if (!ret)
44188 ret = -EPIPE;
44189 diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
44190 index 9f63e49..d8a64c0 100644
44191 --- a/fs/fuse/dir.c
44192 +++ b/fs/fuse/dir.c
44193 @@ -1147,7 +1147,7 @@ static char *read_link(struct dentry *dentry)
44194 return link;
44195 }
44196
44197 -static void free_link(char *link)
44198 +static void free_link(const char *link)
44199 {
44200 if (!IS_ERR(link))
44201 free_page((unsigned long) link);
44202 diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
44203 index cfd4959..a780959 100644
44204 --- a/fs/gfs2/inode.c
44205 +++ b/fs/gfs2/inode.c
44206 @@ -1490,7 +1490,7 @@ out:
44207
44208 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
44209 {
44210 - char *s = nd_get_link(nd);
44211 + const char *s = nd_get_link(nd);
44212 if (!IS_ERR(s))
44213 kfree(s);
44214 }
44215 diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
44216 index 0be5a78..9cfb853 100644
44217 --- a/fs/hugetlbfs/inode.c
44218 +++ b/fs/hugetlbfs/inode.c
44219 @@ -915,7 +915,7 @@ static struct file_system_type hugetlbfs_fs_type = {
44220 .kill_sb = kill_litter_super,
44221 };
44222
44223 -static struct vfsmount *hugetlbfs_vfsmount;
44224 +struct vfsmount *hugetlbfs_vfsmount;
44225
44226 static int can_do_hugetlb_shm(void)
44227 {
44228 diff --git a/fs/inode.c b/fs/inode.c
44229 index ee4e66b..0451521 100644
44230 --- a/fs/inode.c
44231 +++ b/fs/inode.c
44232 @@ -787,8 +787,8 @@ unsigned int get_next_ino(void)
44233
44234 #ifdef CONFIG_SMP
44235 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
44236 - static atomic_t shared_last_ino;
44237 - int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
44238 + static atomic_unchecked_t shared_last_ino;
44239 + int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
44240
44241 res = next - LAST_INO_BATCH;
44242 }
44243 diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
44244 index e513f19..2ab1351 100644
44245 --- a/fs/jffs2/erase.c
44246 +++ b/fs/jffs2/erase.c
44247 @@ -439,7 +439,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
44248 struct jffs2_unknown_node marker = {
44249 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
44250 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
44251 - .totlen = cpu_to_je32(c->cleanmarker_size)
44252 + .totlen = cpu_to_je32(c->cleanmarker_size),
44253 + .hdr_crc = cpu_to_je32(0)
44254 };
44255
44256 jffs2_prealloc_raw_node_refs(c, jeb, 1);
44257 diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
44258 index b09e51d..e482afa 100644
44259 --- a/fs/jffs2/wbuf.c
44260 +++ b/fs/jffs2/wbuf.c
44261 @@ -1011,7 +1011,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
44262 {
44263 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
44264 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
44265 - .totlen = constant_cpu_to_je32(8)
44266 + .totlen = constant_cpu_to_je32(8),
44267 + .hdr_crc = constant_cpu_to_je32(0)
44268 };
44269
44270 /*
44271 diff --git a/fs/jfs/super.c b/fs/jfs/super.c
44272 index a44eff0..462e07d 100644
44273 --- a/fs/jfs/super.c
44274 +++ b/fs/jfs/super.c
44275 @@ -802,7 +802,7 @@ static int __init init_jfs_fs(void)
44276
44277 jfs_inode_cachep =
44278 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
44279 - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
44280 + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
44281 init_once);
44282 if (jfs_inode_cachep == NULL)
44283 return -ENOMEM;
44284 diff --git a/fs/libfs.c b/fs/libfs.c
44285 index f6d411e..e82a08d 100644
44286 --- a/fs/libfs.c
44287 +++ b/fs/libfs.c
44288 @@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
44289
44290 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
44291 struct dentry *next;
44292 + char d_name[sizeof(next->d_iname)];
44293 + const unsigned char *name;
44294 +
44295 next = list_entry(p, struct dentry, d_u.d_child);
44296 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
44297 if (!simple_positive(next)) {
44298 @@ -174,7 +177,12 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
44299
44300 spin_unlock(&next->d_lock);
44301 spin_unlock(&dentry->d_lock);
44302 - if (filldir(dirent, next->d_name.name,
44303 + name = next->d_name.name;
44304 + if (name == next->d_iname) {
44305 + memcpy(d_name, name, next->d_name.len);
44306 + name = d_name;
44307 + }
44308 + if (filldir(dirent, name,
44309 next->d_name.len, filp->f_pos,
44310 next->d_inode->i_ino,
44311 dt_type(next->d_inode)) < 0)
44312 diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
44313 index 8392cb8..80d6193 100644
44314 --- a/fs/lockd/clntproc.c
44315 +++ b/fs/lockd/clntproc.c
44316 @@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
44317 /*
44318 * Cookie counter for NLM requests
44319 */
44320 -static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
44321 +static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
44322
44323 void nlmclnt_next_cookie(struct nlm_cookie *c)
44324 {
44325 - u32 cookie = atomic_inc_return(&nlm_cookie);
44326 + u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
44327
44328 memcpy(c->data, &cookie, 4);
44329 c->len=4;
44330 diff --git a/fs/locks.c b/fs/locks.c
44331 index 637694b..f84a121 100644
44332 --- a/fs/locks.c
44333 +++ b/fs/locks.c
44334 @@ -2074,16 +2074,16 @@ void locks_remove_flock(struct file *filp)
44335 return;
44336
44337 if (filp->f_op && filp->f_op->flock) {
44338 - struct file_lock fl = {
44339 + struct file_lock flock = {
44340 .fl_pid = current->tgid,
44341 .fl_file = filp,
44342 .fl_flags = FL_FLOCK,
44343 .fl_type = F_UNLCK,
44344 .fl_end = OFFSET_MAX,
44345 };
44346 - filp->f_op->flock(filp, F_SETLKW, &fl);
44347 - if (fl.fl_ops && fl.fl_ops->fl_release_private)
44348 - fl.fl_ops->fl_release_private(&fl);
44349 + filp->f_op->flock(filp, F_SETLKW, &flock);
44350 + if (flock.fl_ops && flock.fl_ops->fl_release_private)
44351 + flock.fl_ops->fl_release_private(&flock);
44352 }
44353
44354 lock_flocks();
44355 diff --git a/fs/namei.c b/fs/namei.c
44356 index 5008f01..90328a7 100644
44357 --- a/fs/namei.c
44358 +++ b/fs/namei.c
44359 @@ -279,16 +279,32 @@ int generic_permission(struct inode *inode, int mask)
44360 if (ret != -EACCES)
44361 return ret;
44362
44363 +#ifdef CONFIG_GRKERNSEC
44364 + /* we'll block if we have to log due to a denied capability use */
44365 + if (mask & MAY_NOT_BLOCK)
44366 + return -ECHILD;
44367 +#endif
44368 +
44369 if (S_ISDIR(inode->i_mode)) {
44370 /* DACs are overridable for directories */
44371 - if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
44372 - return 0;
44373 if (!(mask & MAY_WRITE))
44374 - if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
44375 + if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
44376 + ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
44377 return 0;
44378 + if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
44379 + return 0;
44380 return -EACCES;
44381 }
44382 /*
44383 + * Searching includes executable on directories, else just read.
44384 + */
44385 + mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
44386 + if (mask == MAY_READ)
44387 + if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
44388 + ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
44389 + return 0;
44390 +
44391 + /*
44392 * Read/write DACs are always overridable.
44393 * Executable DACs are overridable when there is
44394 * at least one exec bit set.
44395 @@ -297,14 +313,6 @@ int generic_permission(struct inode *inode, int mask)
44396 if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
44397 return 0;
44398
44399 - /*
44400 - * Searching includes executable on directories, else just read.
44401 - */
44402 - mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
44403 - if (mask == MAY_READ)
44404 - if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
44405 - return 0;
44406 -
44407 return -EACCES;
44408 }
44409
44410 @@ -653,11 +661,19 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
44411 return error;
44412 }
44413
44414 + if (gr_handle_follow_link(dentry->d_parent->d_inode,
44415 + dentry->d_inode, dentry, nd->path.mnt)) {
44416 + error = -EACCES;
44417 + *p = ERR_PTR(error); /* no ->put_link(), please */
44418 + path_put(&nd->path);
44419 + return error;
44420 + }
44421 +
44422 nd->last_type = LAST_BIND;
44423 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
44424 error = PTR_ERR(*p);
44425 if (!IS_ERR(*p)) {
44426 - char *s = nd_get_link(nd);
44427 + const char *s = nd_get_link(nd);
44428 error = 0;
44429 if (s)
44430 error = __vfs_follow_link(nd, s);
44431 @@ -1622,6 +1638,21 @@ static int path_lookupat(int dfd, const char *name,
44432 if (!err)
44433 err = complete_walk(nd);
44434
44435 + if (!(nd->flags & LOOKUP_PARENT)) {
44436 +#ifdef CONFIG_GRKERNSEC
44437 + if (flags & LOOKUP_RCU) {
44438 + if (!err)
44439 + path_put(&nd->path);
44440 + err = -ECHILD;
44441 + } else
44442 +#endif
44443 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
44444 + if (!err)
44445 + path_put(&nd->path);
44446 + err = -ENOENT;
44447 + }
44448 + }
44449 +
44450 if (!err && nd->flags & LOOKUP_DIRECTORY) {
44451 if (!nd->inode->i_op->lookup) {
44452 path_put(&nd->path);
44453 @@ -1649,6 +1680,15 @@ static int do_path_lookup(int dfd, const char *name,
44454 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
44455
44456 if (likely(!retval)) {
44457 + if (*name != '/' && nd->path.dentry && nd->inode) {
44458 +#ifdef CONFIG_GRKERNSEC
44459 + if (flags & LOOKUP_RCU)
44460 + return -ECHILD;
44461 +#endif
44462 + if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
44463 + return -ENOENT;
44464 + }
44465 +
44466 if (unlikely(!audit_dummy_context())) {
44467 if (nd->path.dentry && nd->inode)
44468 audit_inode(name, nd->path.dentry);
44469 @@ -2046,6 +2086,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
44470 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
44471 return -EPERM;
44472
44473 + if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
44474 + return -EPERM;
44475 + if (gr_handle_rawio(inode))
44476 + return -EPERM;
44477 + if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
44478 + return -EACCES;
44479 +
44480 return 0;
44481 }
44482
44483 @@ -2107,6 +2154,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44484 error = complete_walk(nd);
44485 if (error)
44486 return ERR_PTR(error);
44487 +#ifdef CONFIG_GRKERNSEC
44488 + if (nd->flags & LOOKUP_RCU) {
44489 + error = -ECHILD;
44490 + goto exit;
44491 + }
44492 +#endif
44493 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
44494 + error = -ENOENT;
44495 + goto exit;
44496 + }
44497 audit_inode(pathname, nd->path.dentry);
44498 if (open_flag & O_CREAT) {
44499 error = -EISDIR;
44500 @@ -2117,6 +2174,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44501 error = complete_walk(nd);
44502 if (error)
44503 return ERR_PTR(error);
44504 +#ifdef CONFIG_GRKERNSEC
44505 + if (nd->flags & LOOKUP_RCU) {
44506 + error = -ECHILD;
44507 + goto exit;
44508 + }
44509 +#endif
44510 + if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
44511 + error = -ENOENT;
44512 + goto exit;
44513 + }
44514 audit_inode(pathname, dir);
44515 goto ok;
44516 }
44517 @@ -2138,6 +2205,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44518 error = complete_walk(nd);
44519 if (error)
44520 return ERR_PTR(-ECHILD);
44521 +#ifdef CONFIG_GRKERNSEC
44522 + if (nd->flags & LOOKUP_RCU) {
44523 + error = -ECHILD;
44524 + goto exit;
44525 + }
44526 +#endif
44527 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
44528 + error = -ENOENT;
44529 + goto exit;
44530 + }
44531
44532 error = -ENOTDIR;
44533 if (nd->flags & LOOKUP_DIRECTORY) {
44534 @@ -2178,6 +2255,12 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44535 /* Negative dentry, just create the file */
44536 if (!dentry->d_inode) {
44537 int mode = op->mode;
44538 +
44539 + if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, open_flag, acc_mode, mode)) {
44540 + error = -EACCES;
44541 + goto exit_mutex_unlock;
44542 + }
44543 +
44544 if (!IS_POSIXACL(dir->d_inode))
44545 mode &= ~current_umask();
44546 /*
44547 @@ -2201,6 +2284,8 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44548 error = vfs_create(dir->d_inode, dentry, mode, nd);
44549 if (error)
44550 goto exit_mutex_unlock;
44551 + else
44552 + gr_handle_create(path->dentry, path->mnt);
44553 mutex_unlock(&dir->d_inode->i_mutex);
44554 dput(nd->path.dentry);
44555 nd->path.dentry = dentry;
44556 @@ -2210,6 +2295,19 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44557 /*
44558 * It already exists.
44559 */
44560 +
44561 + if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
44562 + error = -ENOENT;
44563 + goto exit_mutex_unlock;
44564 + }
44565 +
44566 + /* only check if O_CREAT is specified, all other checks need to go
44567 + into may_open */
44568 + if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
44569 + error = -EACCES;
44570 + goto exit_mutex_unlock;
44571 + }
44572 +
44573 mutex_unlock(&dir->d_inode->i_mutex);
44574 audit_inode(pathname, path->dentry);
44575
44576 @@ -2422,6 +2520,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path
44577 *path = nd.path;
44578 return dentry;
44579 eexist:
44580 + if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) {
44581 + dput(dentry);
44582 + dentry = ERR_PTR(-ENOENT);
44583 + goto fail;
44584 + }
44585 dput(dentry);
44586 dentry = ERR_PTR(-EEXIST);
44587 fail:
44588 @@ -2444,6 +2547,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname, struct pat
44589 }
44590 EXPORT_SYMBOL(user_path_create);
44591
44592 +static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, char **to, int is_dir)
44593 +{
44594 + char *tmp = getname(pathname);
44595 + struct dentry *res;
44596 + if (IS_ERR(tmp))
44597 + return ERR_CAST(tmp);
44598 + res = kern_path_create(dfd, tmp, path, is_dir);
44599 + if (IS_ERR(res))
44600 + putname(tmp);
44601 + else
44602 + *to = tmp;
44603 + return res;
44604 +}
44605 +
44606 int vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
44607 {
44608 int error = may_create(dir, dentry);
44609 @@ -2511,6 +2628,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
44610 error = mnt_want_write(path.mnt);
44611 if (error)
44612 goto out_dput;
44613 +
44614 + if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
44615 + error = -EPERM;
44616 + goto out_drop_write;
44617 + }
44618 +
44619 + if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
44620 + error = -EACCES;
44621 + goto out_drop_write;
44622 + }
44623 +
44624 error = security_path_mknod(&path, dentry, mode, dev);
44625 if (error)
44626 goto out_drop_write;
44627 @@ -2528,6 +2656,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
44628 }
44629 out_drop_write:
44630 mnt_drop_write(path.mnt);
44631 +
44632 + if (!error)
44633 + gr_handle_create(dentry, path.mnt);
44634 out_dput:
44635 dput(dentry);
44636 mutex_unlock(&path.dentry->d_inode->i_mutex);
44637 @@ -2577,12 +2708,21 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode)
44638 error = mnt_want_write(path.mnt);
44639 if (error)
44640 goto out_dput;
44641 +
44642 + if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
44643 + error = -EACCES;
44644 + goto out_drop_write;
44645 + }
44646 +
44647 error = security_path_mkdir(&path, dentry, mode);
44648 if (error)
44649 goto out_drop_write;
44650 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
44651 out_drop_write:
44652 mnt_drop_write(path.mnt);
44653 +
44654 + if (!error)
44655 + gr_handle_create(dentry, path.mnt);
44656 out_dput:
44657 dput(dentry);
44658 mutex_unlock(&path.dentry->d_inode->i_mutex);
44659 @@ -2662,6 +2802,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
44660 char * name;
44661 struct dentry *dentry;
44662 struct nameidata nd;
44663 + ino_t saved_ino = 0;
44664 + dev_t saved_dev = 0;
44665
44666 error = user_path_parent(dfd, pathname, &nd, &name);
44667 if (error)
44668 @@ -2690,6 +2832,15 @@ static long do_rmdir(int dfd, const char __user *pathname)
44669 error = -ENOENT;
44670 goto exit3;
44671 }
44672 +
44673 + saved_ino = dentry->d_inode->i_ino;
44674 + saved_dev = gr_get_dev_from_dentry(dentry);
44675 +
44676 + if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
44677 + error = -EACCES;
44678 + goto exit3;
44679 + }
44680 +
44681 error = mnt_want_write(nd.path.mnt);
44682 if (error)
44683 goto exit3;
44684 @@ -2697,6 +2848,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
44685 if (error)
44686 goto exit4;
44687 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
44688 + if (!error && (saved_dev || saved_ino))
44689 + gr_handle_delete(saved_ino, saved_dev);
44690 exit4:
44691 mnt_drop_write(nd.path.mnt);
44692 exit3:
44693 @@ -2759,6 +2912,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
44694 struct dentry *dentry;
44695 struct nameidata nd;
44696 struct inode *inode = NULL;
44697 + ino_t saved_ino = 0;
44698 + dev_t saved_dev = 0;
44699
44700 error = user_path_parent(dfd, pathname, &nd, &name);
44701 if (error)
44702 @@ -2781,6 +2936,16 @@ static long do_unlinkat(int dfd, const char __user *pathname)
44703 if (!inode)
44704 goto slashes;
44705 ihold(inode);
44706 +
44707 + if (inode->i_nlink <= 1) {
44708 + saved_ino = inode->i_ino;
44709 + saved_dev = gr_get_dev_from_dentry(dentry);
44710 + }
44711 + if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
44712 + error = -EACCES;
44713 + goto exit2;
44714 + }
44715 +
44716 error = mnt_want_write(nd.path.mnt);
44717 if (error)
44718 goto exit2;
44719 @@ -2788,6 +2953,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
44720 if (error)
44721 goto exit3;
44722 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
44723 + if (!error && (saved_ino || saved_dev))
44724 + gr_handle_delete(saved_ino, saved_dev);
44725 exit3:
44726 mnt_drop_write(nd.path.mnt);
44727 exit2:
44728 @@ -2863,10 +3030,18 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
44729 error = mnt_want_write(path.mnt);
44730 if (error)
44731 goto out_dput;
44732 +
44733 + if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
44734 + error = -EACCES;
44735 + goto out_drop_write;
44736 + }
44737 +
44738 error = security_path_symlink(&path, dentry, from);
44739 if (error)
44740 goto out_drop_write;
44741 error = vfs_symlink(path.dentry->d_inode, dentry, from);
44742 + if (!error)
44743 + gr_handle_create(dentry, path.mnt);
44744 out_drop_write:
44745 mnt_drop_write(path.mnt);
44746 out_dput:
44747 @@ -2938,6 +3113,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
44748 {
44749 struct dentry *new_dentry;
44750 struct path old_path, new_path;
44751 + char *to = NULL;
44752 int how = 0;
44753 int error;
44754
44755 @@ -2961,7 +3137,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
44756 if (error)
44757 return error;
44758
44759 - new_dentry = user_path_create(newdfd, newname, &new_path, 0);
44760 + new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to, 0);
44761 error = PTR_ERR(new_dentry);
44762 if (IS_ERR(new_dentry))
44763 goto out;
44764 @@ -2972,13 +3148,30 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
44765 error = mnt_want_write(new_path.mnt);
44766 if (error)
44767 goto out_dput;
44768 +
44769 + if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
44770 + old_path.dentry->d_inode,
44771 + old_path.dentry->d_inode->i_mode, to)) {
44772 + error = -EACCES;
44773 + goto out_drop_write;
44774 + }
44775 +
44776 + if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
44777 + old_path.dentry, old_path.mnt, to)) {
44778 + error = -EACCES;
44779 + goto out_drop_write;
44780 + }
44781 +
44782 error = security_path_link(old_path.dentry, &new_path, new_dentry);
44783 if (error)
44784 goto out_drop_write;
44785 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
44786 + if (!error)
44787 + gr_handle_create(new_dentry, new_path.mnt);
44788 out_drop_write:
44789 mnt_drop_write(new_path.mnt);
44790 out_dput:
44791 + putname(to);
44792 dput(new_dentry);
44793 mutex_unlock(&new_path.dentry->d_inode->i_mutex);
44794 path_put(&new_path);
44795 @@ -3206,6 +3399,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
44796 if (new_dentry == trap)
44797 goto exit5;
44798
44799 + error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
44800 + old_dentry, old_dir->d_inode, oldnd.path.mnt,
44801 + to);
44802 + if (error)
44803 + goto exit5;
44804 +
44805 error = mnt_want_write(oldnd.path.mnt);
44806 if (error)
44807 goto exit5;
44808 @@ -3215,6 +3414,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
44809 goto exit6;
44810 error = vfs_rename(old_dir->d_inode, old_dentry,
44811 new_dir->d_inode, new_dentry);
44812 + if (!error)
44813 + gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
44814 + new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
44815 exit6:
44816 mnt_drop_write(oldnd.path.mnt);
44817 exit5:
44818 @@ -3240,6 +3442,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
44819
44820 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
44821 {
44822 + char tmpbuf[64];
44823 + const char *newlink;
44824 int len;
44825
44826 len = PTR_ERR(link);
44827 @@ -3249,7 +3453,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
44828 len = strlen(link);
44829 if (len > (unsigned) buflen)
44830 len = buflen;
44831 - if (copy_to_user(buffer, link, len))
44832 +
44833 + if (len < sizeof(tmpbuf)) {
44834 + memcpy(tmpbuf, link, len);
44835 + newlink = tmpbuf;
44836 + } else
44837 + newlink = link;
44838 +
44839 + if (copy_to_user(buffer, newlink, len))
44840 len = -EFAULT;
44841 out:
44842 return len;
44843 diff --git a/fs/namespace.c b/fs/namespace.c
44844 index cfc6d44..b4632a5 100644
44845 --- a/fs/namespace.c
44846 +++ b/fs/namespace.c
44847 @@ -1326,6 +1326,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
44848 if (!(sb->s_flags & MS_RDONLY))
44849 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
44850 up_write(&sb->s_umount);
44851 +
44852 + gr_log_remount(mnt->mnt_devname, retval);
44853 +
44854 return retval;
44855 }
44856
44857 @@ -1345,6 +1348,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
44858 br_write_unlock(vfsmount_lock);
44859 up_write(&namespace_sem);
44860 release_mounts(&umount_list);
44861 +
44862 + gr_log_unmount(mnt->mnt_devname, retval);
44863 +
44864 return retval;
44865 }
44866
44867 @@ -2336,6 +2342,16 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
44868 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
44869 MS_STRICTATIME);
44870
44871 + if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
44872 + retval = -EPERM;
44873 + goto dput_out;
44874 + }
44875 +
44876 + if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
44877 + retval = -EPERM;
44878 + goto dput_out;
44879 + }
44880 +
44881 if (flags & MS_REMOUNT)
44882 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
44883 data_page);
44884 @@ -2350,6 +2366,9 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
44885 dev_name, data_page);
44886 dput_out:
44887 path_put(&path);
44888 +
44889 + gr_log_mount(dev_name, dir_name, retval);
44890 +
44891 return retval;
44892 }
44893
44894 @@ -2605,6 +2624,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
44895 if (error)
44896 goto out2;
44897
44898 + if (gr_handle_chroot_pivot()) {
44899 + error = -EPERM;
44900 + goto out2;
44901 + }
44902 +
44903 get_fs_root(current->fs, &root);
44904 error = lock_mount(&old);
44905 if (error)
44906 diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
44907 index 3db6b82..a57597e 100644
44908 --- a/fs/nfs/blocklayout/blocklayout.c
44909 +++ b/fs/nfs/blocklayout/blocklayout.c
44910 @@ -90,7 +90,7 @@ static int is_writable(struct pnfs_block_extent *be, sector_t isect)
44911 */
44912 struct parallel_io {
44913 struct kref refcnt;
44914 - struct rpc_call_ops call_ops;
44915 + rpc_call_ops_no_const call_ops;
44916 void (*pnfs_callback) (void *data);
44917 void *data;
44918 };
44919 diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
44920 index 50a15fa..ca113f9 100644
44921 --- a/fs/nfs/inode.c
44922 +++ b/fs/nfs/inode.c
44923 @@ -150,7 +150,7 @@ static void nfs_zap_caches_locked(struct inode *inode)
44924 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
44925 nfsi->attrtimeo_timestamp = jiffies;
44926
44927 - memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
44928 + memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
44929 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
44930 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
44931 else
44932 @@ -1002,16 +1002,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
44933 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
44934 }
44935
44936 -static atomic_long_t nfs_attr_generation_counter;
44937 +static atomic_long_unchecked_t nfs_attr_generation_counter;
44938
44939 static unsigned long nfs_read_attr_generation_counter(void)
44940 {
44941 - return atomic_long_read(&nfs_attr_generation_counter);
44942 + return atomic_long_read_unchecked(&nfs_attr_generation_counter);
44943 }
44944
44945 unsigned long nfs_inc_attr_generation_counter(void)
44946 {
44947 - return atomic_long_inc_return(&nfs_attr_generation_counter);
44948 + return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
44949 }
44950
44951 void nfs_fattr_init(struct nfs_fattr *fattr)
44952 diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
44953 index 7a2e442..8e544cc 100644
44954 --- a/fs/nfsd/vfs.c
44955 +++ b/fs/nfsd/vfs.c
44956 @@ -914,7 +914,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
44957 } else {
44958 oldfs = get_fs();
44959 set_fs(KERNEL_DS);
44960 - host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
44961 + host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
44962 set_fs(oldfs);
44963 }
44964
44965 @@ -1018,7 +1018,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
44966
44967 /* Write the data. */
44968 oldfs = get_fs(); set_fs(KERNEL_DS);
44969 - host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
44970 + host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
44971 set_fs(oldfs);
44972 if (host_err < 0)
44973 goto out_nfserr;
44974 @@ -1553,7 +1553,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
44975 */
44976
44977 oldfs = get_fs(); set_fs(KERNEL_DS);
44978 - host_err = inode->i_op->readlink(dentry, buf, *lenp);
44979 + host_err = inode->i_op->readlink(dentry, (char __force_user *)buf, *lenp);
44980 set_fs(oldfs);
44981
44982 if (host_err < 0)
44983 diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
44984 index 9fde1c0..14e8827 100644
44985 --- a/fs/notify/fanotify/fanotify_user.c
44986 +++ b/fs/notify/fanotify/fanotify_user.c
44987 @@ -276,7 +276,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
44988 goto out_close_fd;
44989
44990 ret = -EFAULT;
44991 - if (copy_to_user(buf, &fanotify_event_metadata,
44992 + if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
44993 + copy_to_user(buf, &fanotify_event_metadata,
44994 fanotify_event_metadata.event_len))
44995 goto out_kill_access_response;
44996
44997 diff --git a/fs/notify/notification.c b/fs/notify/notification.c
44998 index ee18815..7aa5d01 100644
44999 --- a/fs/notify/notification.c
45000 +++ b/fs/notify/notification.c
45001 @@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
45002 * get set to 0 so it will never get 'freed'
45003 */
45004 static struct fsnotify_event *q_overflow_event;
45005 -static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
45006 +static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
45007
45008 /**
45009 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
45010 @@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
45011 */
45012 u32 fsnotify_get_cookie(void)
45013 {
45014 - return atomic_inc_return(&fsnotify_sync_cookie);
45015 + return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
45016 }
45017 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
45018
45019 diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
45020 index 99e3610..02c1068 100644
45021 --- a/fs/ntfs/dir.c
45022 +++ b/fs/ntfs/dir.c
45023 @@ -1329,7 +1329,7 @@ find_next_index_buffer:
45024 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
45025 ~(s64)(ndir->itype.index.block_size - 1)));
45026 /* Bounds checks. */
45027 - if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
45028 + if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
45029 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
45030 "inode 0x%lx or driver bug.", vdir->i_ino);
45031 goto err_out;
45032 diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
45033 index c587e2d..3641eaa 100644
45034 --- a/fs/ntfs/file.c
45035 +++ b/fs/ntfs/file.c
45036 @@ -2229,6 +2229,6 @@ const struct inode_operations ntfs_file_inode_ops = {
45037 #endif /* NTFS_RW */
45038 };
45039
45040 -const struct file_operations ntfs_empty_file_ops = {};
45041 +const struct file_operations ntfs_empty_file_ops __read_only;
45042
45043 -const struct inode_operations ntfs_empty_inode_ops = {};
45044 +const struct inode_operations ntfs_empty_inode_ops __read_only;
45045 diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
45046 index 210c352..a174f83 100644
45047 --- a/fs/ocfs2/localalloc.c
45048 +++ b/fs/ocfs2/localalloc.c
45049 @@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
45050 goto bail;
45051 }
45052
45053 - atomic_inc(&osb->alloc_stats.moves);
45054 + atomic_inc_unchecked(&osb->alloc_stats.moves);
45055
45056 bail:
45057 if (handle)
45058 diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
45059 index d355e6e..578d905 100644
45060 --- a/fs/ocfs2/ocfs2.h
45061 +++ b/fs/ocfs2/ocfs2.h
45062 @@ -235,11 +235,11 @@ enum ocfs2_vol_state
45063
45064 struct ocfs2_alloc_stats
45065 {
45066 - atomic_t moves;
45067 - atomic_t local_data;
45068 - atomic_t bitmap_data;
45069 - atomic_t bg_allocs;
45070 - atomic_t bg_extends;
45071 + atomic_unchecked_t moves;
45072 + atomic_unchecked_t local_data;
45073 + atomic_unchecked_t bitmap_data;
45074 + atomic_unchecked_t bg_allocs;
45075 + atomic_unchecked_t bg_extends;
45076 };
45077
45078 enum ocfs2_local_alloc_state
45079 diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
45080 index ba5d97e..c77db25 100644
45081 --- a/fs/ocfs2/suballoc.c
45082 +++ b/fs/ocfs2/suballoc.c
45083 @@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
45084 mlog_errno(status);
45085 goto bail;
45086 }
45087 - atomic_inc(&osb->alloc_stats.bg_extends);
45088 + atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
45089
45090 /* You should never ask for this much metadata */
45091 BUG_ON(bits_wanted >
45092 @@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handle,
45093 mlog_errno(status);
45094 goto bail;
45095 }
45096 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
45097 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
45098
45099 *suballoc_loc = res.sr_bg_blkno;
45100 *suballoc_bit_start = res.sr_bit_offset;
45101 @@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
45102 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
45103 res->sr_bits);
45104
45105 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
45106 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
45107
45108 BUG_ON(res->sr_bits != 1);
45109
45110 @@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
45111 mlog_errno(status);
45112 goto bail;
45113 }
45114 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
45115 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
45116
45117 BUG_ON(res.sr_bits != 1);
45118
45119 @@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
45120 cluster_start,
45121 num_clusters);
45122 if (!status)
45123 - atomic_inc(&osb->alloc_stats.local_data);
45124 + atomic_inc_unchecked(&osb->alloc_stats.local_data);
45125 } else {
45126 if (min_clusters > (osb->bitmap_cpg - 1)) {
45127 /* The only paths asking for contiguousness
45128 @@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
45129 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
45130 res.sr_bg_blkno,
45131 res.sr_bit_offset);
45132 - atomic_inc(&osb->alloc_stats.bitmap_data);
45133 + atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
45134 *num_clusters = res.sr_bits;
45135 }
45136 }
45137 diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
45138 index 4994f8b..eaab8eb 100644
45139 --- a/fs/ocfs2/super.c
45140 +++ b/fs/ocfs2/super.c
45141 @@ -301,11 +301,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
45142 "%10s => GlobalAllocs: %d LocalAllocs: %d "
45143 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
45144 "Stats",
45145 - atomic_read(&osb->alloc_stats.bitmap_data),
45146 - atomic_read(&osb->alloc_stats.local_data),
45147 - atomic_read(&osb->alloc_stats.bg_allocs),
45148 - atomic_read(&osb->alloc_stats.moves),
45149 - atomic_read(&osb->alloc_stats.bg_extends));
45150 + atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
45151 + atomic_read_unchecked(&osb->alloc_stats.local_data),
45152 + atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
45153 + atomic_read_unchecked(&osb->alloc_stats.moves),
45154 + atomic_read_unchecked(&osb->alloc_stats.bg_extends));
45155
45156 out += snprintf(buf + out, len - out,
45157 "%10s => State: %u Descriptor: %llu Size: %u bits "
45158 @@ -2119,11 +2119,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
45159 spin_lock_init(&osb->osb_xattr_lock);
45160 ocfs2_init_steal_slots(osb);
45161
45162 - atomic_set(&osb->alloc_stats.moves, 0);
45163 - atomic_set(&osb->alloc_stats.local_data, 0);
45164 - atomic_set(&osb->alloc_stats.bitmap_data, 0);
45165 - atomic_set(&osb->alloc_stats.bg_allocs, 0);
45166 - atomic_set(&osb->alloc_stats.bg_extends, 0);
45167 + atomic_set_unchecked(&osb->alloc_stats.moves, 0);
45168 + atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
45169 + atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
45170 + atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
45171 + atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
45172
45173 /* Copy the blockcheck stats from the superblock probe */
45174 osb->osb_ecc_stats = *stats;
45175 diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c
45176 index 5d22872..523db20 100644
45177 --- a/fs/ocfs2/symlink.c
45178 +++ b/fs/ocfs2/symlink.c
45179 @@ -142,7 +142,7 @@ bail:
45180
45181 static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
45182 {
45183 - char *link = nd_get_link(nd);
45184 + const char *link = nd_get_link(nd);
45185 if (!IS_ERR(link))
45186 kfree(link);
45187 }
45188 diff --git a/fs/open.c b/fs/open.c
45189 index 22c41b5..695cb17 100644
45190 --- a/fs/open.c
45191 +++ b/fs/open.c
45192 @@ -112,6 +112,10 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
45193 error = locks_verify_truncate(inode, NULL, length);
45194 if (!error)
45195 error = security_path_truncate(&path);
45196 +
45197 + if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
45198 + error = -EACCES;
45199 +
45200 if (!error)
45201 error = do_truncate(path.dentry, length, 0, NULL);
45202
45203 @@ -358,6 +362,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
45204 if (__mnt_is_readonly(path.mnt))
45205 res = -EROFS;
45206
45207 + if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
45208 + res = -EACCES;
45209 +
45210 out_path_release:
45211 path_put(&path);
45212 out:
45213 @@ -384,6 +391,8 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename)
45214 if (error)
45215 goto dput_and_out;
45216
45217 + gr_log_chdir(path.dentry, path.mnt);
45218 +
45219 set_fs_pwd(current->fs, &path);
45220
45221 dput_and_out:
45222 @@ -410,6 +419,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
45223 goto out_putf;
45224
45225 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
45226 +
45227 + if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
45228 + error = -EPERM;
45229 +
45230 + if (!error)
45231 + gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
45232 +
45233 if (!error)
45234 set_fs_pwd(current->fs, &file->f_path);
45235 out_putf:
45236 @@ -438,7 +454,13 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
45237 if (error)
45238 goto dput_and_out;
45239
45240 + if (gr_handle_chroot_chroot(path.dentry, path.mnt))
45241 + goto dput_and_out;
45242 +
45243 set_fs_root(current->fs, &path);
45244 +
45245 + gr_handle_chroot_chdir(&path);
45246 +
45247 error = 0;
45248 dput_and_out:
45249 path_put(&path);
45250 @@ -456,6 +478,16 @@ static int chmod_common(struct path *path, umode_t mode)
45251 if (error)
45252 return error;
45253 mutex_lock(&inode->i_mutex);
45254 +
45255 + if (!gr_acl_handle_fchmod(path->dentry, path->mnt, mode)) {
45256 + error = -EACCES;
45257 + goto out_unlock;
45258 + }
45259 + if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
45260 + error = -EACCES;
45261 + goto out_unlock;
45262 + }
45263 +
45264 error = security_path_chmod(path->dentry, path->mnt, mode);
45265 if (error)
45266 goto out_unlock;
45267 @@ -506,6 +538,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
45268 int error;
45269 struct iattr newattrs;
45270
45271 + if (!gr_acl_handle_chown(path->dentry, path->mnt))
45272 + return -EACCES;
45273 +
45274 newattrs.ia_valid = ATTR_CTIME;
45275 if (user != (uid_t) -1) {
45276 newattrs.ia_valid |= ATTR_UID;
45277 diff --git a/fs/partitions/efi.c b/fs/partitions/efi.c
45278 index 6296b40..417c00f 100644
45279 --- a/fs/partitions/efi.c
45280 +++ b/fs/partitions/efi.c
45281 @@ -234,14 +234,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
45282 if (!gpt)
45283 return NULL;
45284
45285 + if (!le32_to_cpu(gpt->num_partition_entries))
45286 + return NULL;
45287 + pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
45288 + if (!pte)
45289 + return NULL;
45290 +
45291 count = le32_to_cpu(gpt->num_partition_entries) *
45292 le32_to_cpu(gpt->sizeof_partition_entry);
45293 - if (!count)
45294 - return NULL;
45295 - pte = kzalloc(count, GFP_KERNEL);
45296 - if (!pte)
45297 - return NULL;
45298 -
45299 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
45300 (u8 *) pte,
45301 count) < count) {
45302 diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c
45303 index bd8ae78..539d250 100644
45304 --- a/fs/partitions/ldm.c
45305 +++ b/fs/partitions/ldm.c
45306 @@ -1324,7 +1324,7 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
45307 goto found;
45308 }
45309
45310 - f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
45311 + f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
45312 if (!f) {
45313 ldm_crit ("Out of memory.");
45314 return false;
45315 diff --git a/fs/pipe.c b/fs/pipe.c
45316 index 4065f07..68c0706 100644
45317 --- a/fs/pipe.c
45318 +++ b/fs/pipe.c
45319 @@ -420,9 +420,9 @@ redo:
45320 }
45321 if (bufs) /* More to do? */
45322 continue;
45323 - if (!pipe->writers)
45324 + if (!atomic_read(&pipe->writers))
45325 break;
45326 - if (!pipe->waiting_writers) {
45327 + if (!atomic_read(&pipe->waiting_writers)) {
45328 /* syscall merging: Usually we must not sleep
45329 * if O_NONBLOCK is set, or if we got some data.
45330 * But if a writer sleeps in kernel space, then
45331 @@ -481,7 +481,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
45332 mutex_lock(&inode->i_mutex);
45333 pipe = inode->i_pipe;
45334
45335 - if (!pipe->readers) {
45336 + if (!atomic_read(&pipe->readers)) {
45337 send_sig(SIGPIPE, current, 0);
45338 ret = -EPIPE;
45339 goto out;
45340 @@ -530,7 +530,7 @@ redo1:
45341 for (;;) {
45342 int bufs;
45343
45344 - if (!pipe->readers) {
45345 + if (!atomic_read(&pipe->readers)) {
45346 send_sig(SIGPIPE, current, 0);
45347 if (!ret)
45348 ret = -EPIPE;
45349 @@ -616,9 +616,9 @@ redo2:
45350 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
45351 do_wakeup = 0;
45352 }
45353 - pipe->waiting_writers++;
45354 + atomic_inc(&pipe->waiting_writers);
45355 pipe_wait(pipe);
45356 - pipe->waiting_writers--;
45357 + atomic_dec(&pipe->waiting_writers);
45358 }
45359 out:
45360 mutex_unlock(&inode->i_mutex);
45361 @@ -685,7 +685,7 @@ pipe_poll(struct file *filp, poll_table *wait)
45362 mask = 0;
45363 if (filp->f_mode & FMODE_READ) {
45364 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
45365 - if (!pipe->writers && filp->f_version != pipe->w_counter)
45366 + if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
45367 mask |= POLLHUP;
45368 }
45369
45370 @@ -695,7 +695,7 @@ pipe_poll(struct file *filp, poll_table *wait)
45371 * Most Unices do not set POLLERR for FIFOs but on Linux they
45372 * behave exactly like pipes for poll().
45373 */
45374 - if (!pipe->readers)
45375 + if (!atomic_read(&pipe->readers))
45376 mask |= POLLERR;
45377 }
45378
45379 @@ -709,10 +709,10 @@ pipe_release(struct inode *inode, int decr, int decw)
45380
45381 mutex_lock(&inode->i_mutex);
45382 pipe = inode->i_pipe;
45383 - pipe->readers -= decr;
45384 - pipe->writers -= decw;
45385 + atomic_sub(decr, &pipe->readers);
45386 + atomic_sub(decw, &pipe->writers);
45387
45388 - if (!pipe->readers && !pipe->writers) {
45389 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
45390 free_pipe_info(inode);
45391 } else {
45392 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
45393 @@ -802,7 +802,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
45394
45395 if (inode->i_pipe) {
45396 ret = 0;
45397 - inode->i_pipe->readers++;
45398 + atomic_inc(&inode->i_pipe->readers);
45399 }
45400
45401 mutex_unlock(&inode->i_mutex);
45402 @@ -819,7 +819,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
45403
45404 if (inode->i_pipe) {
45405 ret = 0;
45406 - inode->i_pipe->writers++;
45407 + atomic_inc(&inode->i_pipe->writers);
45408 }
45409
45410 mutex_unlock(&inode->i_mutex);
45411 @@ -837,9 +837,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
45412 if (inode->i_pipe) {
45413 ret = 0;
45414 if (filp->f_mode & FMODE_READ)
45415 - inode->i_pipe->readers++;
45416 + atomic_inc(&inode->i_pipe->readers);
45417 if (filp->f_mode & FMODE_WRITE)
45418 - inode->i_pipe->writers++;
45419 + atomic_inc(&inode->i_pipe->writers);
45420 }
45421
45422 mutex_unlock(&inode->i_mutex);
45423 @@ -931,7 +931,7 @@ void free_pipe_info(struct inode *inode)
45424 inode->i_pipe = NULL;
45425 }
45426
45427 -static struct vfsmount *pipe_mnt __read_mostly;
45428 +struct vfsmount *pipe_mnt __read_mostly;
45429
45430 /*
45431 * pipefs_dname() is called from d_path().
45432 @@ -961,7 +961,8 @@ static struct inode * get_pipe_inode(void)
45433 goto fail_iput;
45434 inode->i_pipe = pipe;
45435
45436 - pipe->readers = pipe->writers = 1;
45437 + atomic_set(&pipe->readers, 1);
45438 + atomic_set(&pipe->writers, 1);
45439 inode->i_fop = &rdwr_pipefifo_fops;
45440
45441 /*
45442 diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
45443 index 15af622..0e9f4467 100644
45444 --- a/fs/proc/Kconfig
45445 +++ b/fs/proc/Kconfig
45446 @@ -30,12 +30,12 @@ config PROC_FS
45447
45448 config PROC_KCORE
45449 bool "/proc/kcore support" if !ARM
45450 - depends on PROC_FS && MMU
45451 + depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
45452
45453 config PROC_VMCORE
45454 bool "/proc/vmcore support"
45455 - depends on PROC_FS && CRASH_DUMP
45456 - default y
45457 + depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
45458 + default n
45459 help
45460 Exports the dump image of crashed kernel in ELF format.
45461
45462 @@ -59,8 +59,8 @@ config PROC_SYSCTL
45463 limited in memory.
45464
45465 config PROC_PAGE_MONITOR
45466 - default y
45467 - depends on PROC_FS && MMU
45468 + default n
45469 + depends on PROC_FS && MMU && !GRKERNSEC
45470 bool "Enable /proc page monitoring" if EXPERT
45471 help
45472 Various /proc files exist to monitor process memory utilization:
45473 diff --git a/fs/proc/array.c b/fs/proc/array.c
45474 index 3a1dafd..d41fc37 100644
45475 --- a/fs/proc/array.c
45476 +++ b/fs/proc/array.c
45477 @@ -60,6 +60,7 @@
45478 #include <linux/tty.h>
45479 #include <linux/string.h>
45480 #include <linux/mman.h>
45481 +#include <linux/grsecurity.h>
45482 #include <linux/proc_fs.h>
45483 #include <linux/ioport.h>
45484 #include <linux/uaccess.h>
45485 @@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
45486 seq_putc(m, '\n');
45487 }
45488
45489 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
45490 +static inline void task_pax(struct seq_file *m, struct task_struct *p)
45491 +{
45492 + if (p->mm)
45493 + seq_printf(m, "PaX:\t%c%c%c%c%c\n",
45494 + p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
45495 + p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
45496 + p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
45497 + p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
45498 + p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
45499 + else
45500 + seq_printf(m, "PaX:\t-----\n");
45501 +}
45502 +#endif
45503 +
45504 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
45505 struct pid *pid, struct task_struct *task)
45506 {
45507 @@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
45508 task_cpus_allowed(m, task);
45509 cpuset_task_status_allowed(m, task);
45510 task_context_switch_counts(m, task);
45511 +
45512 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
45513 + task_pax(m, task);
45514 +#endif
45515 +
45516 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
45517 + task_grsec_rbac(m, task);
45518 +#endif
45519 +
45520 return 0;
45521 }
45522
45523 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45524 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
45525 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
45526 + _mm->pax_flags & MF_PAX_SEGMEXEC))
45527 +#endif
45528 +
45529 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
45530 struct pid *pid, struct task_struct *task, int whole)
45531 {
45532 @@ -449,6 +480,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
45533 gtime = task->gtime;
45534 }
45535
45536 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45537 + if (PAX_RAND_FLAGS(mm)) {
45538 + eip = 0;
45539 + esp = 0;
45540 + wchan = 0;
45541 + }
45542 +#endif
45543 +#ifdef CONFIG_GRKERNSEC_HIDESYM
45544 + wchan = 0;
45545 + eip =0;
45546 + esp =0;
45547 +#endif
45548 +
45549 /* scale priority and nice values from timeslices to -20..20 */
45550 /* to make it look like a "normal" Unix priority/nice value */
45551 priority = task_prio(task);
45552 @@ -489,9 +533,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
45553 vsize,
45554 mm ? get_mm_rss(mm) : 0,
45555 rsslim,
45556 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45557 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
45558 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
45559 + PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
45560 +#else
45561 mm ? (permitted ? mm->start_code : 1) : 0,
45562 mm ? (permitted ? mm->end_code : 1) : 0,
45563 (permitted && mm) ? mm->start_stack : 0,
45564 +#endif
45565 esp,
45566 eip,
45567 /* The signal information here is obsolete.
45568 @@ -544,3 +594,18 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
45569
45570 return 0;
45571 }
45572 +
45573 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45574 +int proc_pid_ipaddr(struct task_struct *task, char *buffer)
45575 +{
45576 + u32 curr_ip = 0;
45577 + unsigned long flags;
45578 +
45579 + if (lock_task_sighand(task, &flags)) {
45580 + curr_ip = task->signal->curr_ip;
45581 + unlock_task_sighand(task, &flags);
45582 + }
45583 +
45584 + return sprintf(buffer, "%pI4\n", &curr_ip);
45585 +}
45586 +#endif
45587 diff --git a/fs/proc/base.c b/fs/proc/base.c
45588 index 1fc1dca..813fd0b 100644
45589 --- a/fs/proc/base.c
45590 +++ b/fs/proc/base.c
45591 @@ -107,6 +107,22 @@ struct pid_entry {
45592 union proc_op op;
45593 };
45594
45595 +struct getdents_callback {
45596 + struct linux_dirent __user * current_dir;
45597 + struct linux_dirent __user * previous;
45598 + struct file * file;
45599 + int count;
45600 + int error;
45601 +};
45602 +
45603 +static int gr_fake_filldir(void * __buf, const char *name, int namlen,
45604 + loff_t offset, u64 ino, unsigned int d_type)
45605 +{
45606 + struct getdents_callback * buf = (struct getdents_callback *) __buf;
45607 + buf->error = -EINVAL;
45608 + return 0;
45609 +}
45610 +
45611 #define NOD(NAME, MODE, IOP, FOP, OP) { \
45612 .name = (NAME), \
45613 .len = sizeof(NAME) - 1, \
45614 @@ -204,10 +220,12 @@ static struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
45615 return ERR_PTR(err);
45616
45617 mm = get_task_mm(task);
45618 - if (mm && mm != current->mm &&
45619 - !ptrace_may_access(task, mode)) {
45620 - mmput(mm);
45621 - mm = ERR_PTR(-EACCES);
45622 + if (mm) {
45623 + if ((mm != current->mm && !ptrace_may_access(task, mode)) ||
45624 + (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task)))) {
45625 + mmput(mm);
45626 + mm = ERR_PTR(-EACCES);
45627 + }
45628 }
45629 mutex_unlock(&task->signal->cred_guard_mutex);
45630
45631 @@ -229,6 +247,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
45632 if (!mm->arg_end)
45633 goto out_mm; /* Shh! No looking before we're done */
45634
45635 + if (gr_acl_handle_procpidmem(task))
45636 + goto out_mm;
45637 +
45638 len = mm->arg_end - mm->arg_start;
45639
45640 if (len > PAGE_SIZE)
45641 @@ -256,12 +277,28 @@ out:
45642 return res;
45643 }
45644
45645 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45646 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
45647 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
45648 + _mm->pax_flags & MF_PAX_SEGMEXEC))
45649 +#endif
45650 +
45651 static int proc_pid_auxv(struct task_struct *task, char *buffer)
45652 {
45653 struct mm_struct *mm = mm_for_maps(task);
45654 int res = PTR_ERR(mm);
45655 if (mm && !IS_ERR(mm)) {
45656 unsigned int nwords = 0;
45657 +
45658 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45659 + /* allow if we're currently ptracing this task */
45660 + if (PAX_RAND_FLAGS(mm) &&
45661 + (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
45662 + mmput(mm);
45663 + return 0;
45664 + }
45665 +#endif
45666 +
45667 do {
45668 nwords += 2;
45669 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
45670 @@ -275,7 +312,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
45671 }
45672
45673
45674 -#ifdef CONFIG_KALLSYMS
45675 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45676 /*
45677 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
45678 * Returns the resolved symbol. If that fails, simply return the address.
45679 @@ -314,7 +351,7 @@ static void unlock_trace(struct task_struct *task)
45680 mutex_unlock(&task->signal->cred_guard_mutex);
45681 }
45682
45683 -#ifdef CONFIG_STACKTRACE
45684 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45685
45686 #define MAX_STACK_TRACE_DEPTH 64
45687
45688 @@ -505,7 +542,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
45689 return count;
45690 }
45691
45692 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45693 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45694 static int proc_pid_syscall(struct task_struct *task, char *buffer)
45695 {
45696 long nr;
45697 @@ -534,7 +571,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
45698 /************************************************************************/
45699
45700 /* permission checks */
45701 -static int proc_fd_access_allowed(struct inode *inode)
45702 +static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
45703 {
45704 struct task_struct *task;
45705 int allowed = 0;
45706 @@ -544,7 +581,10 @@ static int proc_fd_access_allowed(struct inode *inode)
45707 */
45708 task = get_proc_task(inode);
45709 if (task) {
45710 - allowed = ptrace_may_access(task, PTRACE_MODE_READ);
45711 + if (log)
45712 + allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
45713 + else
45714 + allowed = ptrace_may_access(task, PTRACE_MODE_READ);
45715 put_task_struct(task);
45716 }
45717 return allowed;
45718 @@ -826,6 +866,10 @@ static ssize_t mem_read(struct file * file, char __user * buf,
45719 return ret;
45720 }
45721
45722 +#define mem_write NULL
45723 +
45724 +#ifndef mem_write
45725 +/* They were right the first time */
45726 static ssize_t mem_write(struct file * file, const char __user *buf,
45727 size_t count, loff_t *ppos)
45728 {
45729 @@ -866,6 +910,7 @@ static ssize_t mem_write(struct file * file, const char __user *buf,
45730 free_page((unsigned long) page);
45731 return copied;
45732 }
45733 +#endif
45734
45735 loff_t mem_lseek(struct file *file, loff_t offset, int orig)
45736 {
45737 @@ -911,6 +956,9 @@ static ssize_t environ_read(struct file *file, char __user *buf,
45738 if (!task)
45739 goto out_no_task;
45740
45741 + if (gr_acl_handle_procpidmem(task))
45742 + goto out;
45743 +
45744 ret = -ENOMEM;
45745 page = (char *)__get_free_page(GFP_TEMPORARY);
45746 if (!page)
45747 @@ -1533,7 +1581,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
45748 path_put(&nd->path);
45749
45750 /* Are we allowed to snoop on the tasks file descriptors? */
45751 - if (!proc_fd_access_allowed(inode))
45752 + if (!proc_fd_access_allowed(inode,0))
45753 goto out;
45754
45755 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
45756 @@ -1572,8 +1620,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
45757 struct path path;
45758
45759 /* Are we allowed to snoop on the tasks file descriptors? */
45760 - if (!proc_fd_access_allowed(inode))
45761 - goto out;
45762 + /* logging this is needed for learning on chromium to work properly,
45763 + but we don't want to flood the logs from 'ps' which does a readlink
45764 + on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
45765 + CAP_SYS_PTRACE as it's not necessary for its basic functionality
45766 + */
45767 + if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
45768 + if (!proc_fd_access_allowed(inode,0))
45769 + goto out;
45770 + } else {
45771 + if (!proc_fd_access_allowed(inode,1))
45772 + goto out;
45773 + }
45774
45775 error = PROC_I(inode)->op.proc_get_link(inode, &path);
45776 if (error)
45777 @@ -1638,7 +1696,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
45778 rcu_read_lock();
45779 cred = __task_cred(task);
45780 inode->i_uid = cred->euid;
45781 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45782 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45783 +#else
45784 inode->i_gid = cred->egid;
45785 +#endif
45786 rcu_read_unlock();
45787 }
45788 security_task_to_inode(task, inode);
45789 @@ -1656,6 +1718,9 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
45790 struct inode *inode = dentry->d_inode;
45791 struct task_struct *task;
45792 const struct cred *cred;
45793 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45794 + const struct cred *tmpcred = current_cred();
45795 +#endif
45796
45797 generic_fillattr(inode, stat);
45798
45799 @@ -1663,13 +1728,41 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
45800 stat->uid = 0;
45801 stat->gid = 0;
45802 task = pid_task(proc_pid(inode), PIDTYPE_PID);
45803 +
45804 + if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
45805 + rcu_read_unlock();
45806 + return -ENOENT;
45807 + }
45808 +
45809 if (task) {
45810 + cred = __task_cred(task);
45811 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45812 + if (!tmpcred->uid || (tmpcred->uid == cred->uid)
45813 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45814 + || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
45815 +#endif
45816 + ) {
45817 +#endif
45818 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
45819 +#ifdef CONFIG_GRKERNSEC_PROC_USER
45820 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
45821 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45822 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
45823 +#endif
45824 task_dumpable(task)) {
45825 - cred = __task_cred(task);
45826 stat->uid = cred->euid;
45827 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45828 + stat->gid = CONFIG_GRKERNSEC_PROC_GID;
45829 +#else
45830 stat->gid = cred->egid;
45831 +#endif
45832 }
45833 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45834 + } else {
45835 + rcu_read_unlock();
45836 + return -ENOENT;
45837 + }
45838 +#endif
45839 }
45840 rcu_read_unlock();
45841 return 0;
45842 @@ -1706,11 +1799,20 @@ int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
45843
45844 if (task) {
45845 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
45846 +#ifdef CONFIG_GRKERNSEC_PROC_USER
45847 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
45848 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45849 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
45850 +#endif
45851 task_dumpable(task)) {
45852 rcu_read_lock();
45853 cred = __task_cred(task);
45854 inode->i_uid = cred->euid;
45855 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45856 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45857 +#else
45858 inode->i_gid = cred->egid;
45859 +#endif
45860 rcu_read_unlock();
45861 } else {
45862 inode->i_uid = 0;
45863 @@ -1828,7 +1930,8 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
45864 int fd = proc_fd(inode);
45865
45866 if (task) {
45867 - files = get_files_struct(task);
45868 + if (!gr_acl_handle_procpidmem(task))
45869 + files = get_files_struct(task);
45870 put_task_struct(task);
45871 }
45872 if (files) {
45873 @@ -2096,11 +2199,21 @@ static const struct file_operations proc_fd_operations = {
45874 */
45875 static int proc_fd_permission(struct inode *inode, int mask)
45876 {
45877 + struct task_struct *task;
45878 int rv = generic_permission(inode, mask);
45879 - if (rv == 0)
45880 - return 0;
45881 +
45882 if (task_pid(current) == proc_pid(inode))
45883 rv = 0;
45884 +
45885 + task = get_proc_task(inode);
45886 + if (task == NULL)
45887 + return rv;
45888 +
45889 + if (gr_acl_handle_procpidmem(task))
45890 + rv = -EACCES;
45891 +
45892 + put_task_struct(task);
45893 +
45894 return rv;
45895 }
45896
45897 @@ -2210,6 +2323,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
45898 if (!task)
45899 goto out_no_task;
45900
45901 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45902 + goto out;
45903 +
45904 /*
45905 * Yes, it does not scale. And it should not. Don't add
45906 * new entries into /proc/<tgid>/ without very good reasons.
45907 @@ -2254,6 +2370,9 @@ static int proc_pident_readdir(struct file *filp,
45908 if (!task)
45909 goto out_no_task;
45910
45911 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45912 + goto out;
45913 +
45914 ret = 0;
45915 i = filp->f_pos;
45916 switch (i) {
45917 @@ -2524,7 +2643,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
45918 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
45919 void *cookie)
45920 {
45921 - char *s = nd_get_link(nd);
45922 + const char *s = nd_get_link(nd);
45923 if (!IS_ERR(s))
45924 __putname(s);
45925 }
45926 @@ -2722,7 +2841,7 @@ static const struct pid_entry tgid_base_stuff[] = {
45927 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
45928 #endif
45929 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
45930 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45931 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45932 INF("syscall", S_IRUGO, proc_pid_syscall),
45933 #endif
45934 INF("cmdline", S_IRUGO, proc_pid_cmdline),
45935 @@ -2747,10 +2866,10 @@ static const struct pid_entry tgid_base_stuff[] = {
45936 #ifdef CONFIG_SECURITY
45937 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
45938 #endif
45939 -#ifdef CONFIG_KALLSYMS
45940 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45941 INF("wchan", S_IRUGO, proc_pid_wchan),
45942 #endif
45943 -#ifdef CONFIG_STACKTRACE
45944 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45945 ONE("stack", S_IRUGO, proc_pid_stack),
45946 #endif
45947 #ifdef CONFIG_SCHEDSTATS
45948 @@ -2784,6 +2903,9 @@ static const struct pid_entry tgid_base_stuff[] = {
45949 #ifdef CONFIG_HARDWALL
45950 INF("hardwall", S_IRUGO, proc_pid_hardwall),
45951 #endif
45952 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45953 + INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
45954 +#endif
45955 };
45956
45957 static int proc_tgid_base_readdir(struct file * filp,
45958 @@ -2909,7 +3031,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
45959 if (!inode)
45960 goto out;
45961
45962 +#ifdef CONFIG_GRKERNSEC_PROC_USER
45963 + inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
45964 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45965 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45966 + inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
45967 +#else
45968 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
45969 +#endif
45970 inode->i_op = &proc_tgid_base_inode_operations;
45971 inode->i_fop = &proc_tgid_base_operations;
45972 inode->i_flags|=S_IMMUTABLE;
45973 @@ -2951,7 +3080,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
45974 if (!task)
45975 goto out;
45976
45977 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45978 + goto out_put_task;
45979 +
45980 result = proc_pid_instantiate(dir, dentry, task, NULL);
45981 +out_put_task:
45982 put_task_struct(task);
45983 out:
45984 return result;
45985 @@ -3016,6 +3149,11 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
45986 {
45987 unsigned int nr;
45988 struct task_struct *reaper;
45989 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45990 + const struct cred *tmpcred = current_cred();
45991 + const struct cred *itercred;
45992 +#endif
45993 + filldir_t __filldir = filldir;
45994 struct tgid_iter iter;
45995 struct pid_namespace *ns;
45996
45997 @@ -3039,8 +3177,27 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
45998 for (iter = next_tgid(ns, iter);
45999 iter.task;
46000 iter.tgid += 1, iter = next_tgid(ns, iter)) {
46001 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46002 + rcu_read_lock();
46003 + itercred = __task_cred(iter.task);
46004 +#endif
46005 + if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
46006 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46007 + || (tmpcred->uid && (itercred->uid != tmpcred->uid)
46008 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
46009 + && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
46010 +#endif
46011 + )
46012 +#endif
46013 + )
46014 + __filldir = &gr_fake_filldir;
46015 + else
46016 + __filldir = filldir;
46017 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46018 + rcu_read_unlock();
46019 +#endif
46020 filp->f_pos = iter.tgid + TGID_OFFSET;
46021 - if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
46022 + if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
46023 put_task_struct(iter.task);
46024 goto out;
46025 }
46026 @@ -3068,7 +3225,7 @@ static const struct pid_entry tid_base_stuff[] = {
46027 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
46028 #endif
46029 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
46030 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
46031 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
46032 INF("syscall", S_IRUGO, proc_pid_syscall),
46033 #endif
46034 INF("cmdline", S_IRUGO, proc_pid_cmdline),
46035 @@ -3092,10 +3249,10 @@ static const struct pid_entry tid_base_stuff[] = {
46036 #ifdef CONFIG_SECURITY
46037 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
46038 #endif
46039 -#ifdef CONFIG_KALLSYMS
46040 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
46041 INF("wchan", S_IRUGO, proc_pid_wchan),
46042 #endif
46043 -#ifdef CONFIG_STACKTRACE
46044 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
46045 ONE("stack", S_IRUGO, proc_pid_stack),
46046 #endif
46047 #ifdef CONFIG_SCHEDSTATS
46048 diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
46049 index 82676e3..5f8518a 100644
46050 --- a/fs/proc/cmdline.c
46051 +++ b/fs/proc/cmdline.c
46052 @@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
46053
46054 static int __init proc_cmdline_init(void)
46055 {
46056 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
46057 + proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
46058 +#else
46059 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
46060 +#endif
46061 return 0;
46062 }
46063 module_init(proc_cmdline_init);
46064 diff --git a/fs/proc/devices.c b/fs/proc/devices.c
46065 index b143471..bb105e5 100644
46066 --- a/fs/proc/devices.c
46067 +++ b/fs/proc/devices.c
46068 @@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
46069
46070 static int __init proc_devices_init(void)
46071 {
46072 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
46073 + proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
46074 +#else
46075 proc_create("devices", 0, NULL, &proc_devinfo_operations);
46076 +#endif
46077 return 0;
46078 }
46079 module_init(proc_devices_init);
46080 diff --git a/fs/proc/inode.c b/fs/proc/inode.c
46081 index 7737c54..7172574 100644
46082 --- a/fs/proc/inode.c
46083 +++ b/fs/proc/inode.c
46084 @@ -18,12 +18,18 @@
46085 #include <linux/module.h>
46086 #include <linux/sysctl.h>
46087 #include <linux/slab.h>
46088 +#include <linux/grsecurity.h>
46089
46090 #include <asm/system.h>
46091 #include <asm/uaccess.h>
46092
46093 #include "internal.h"
46094
46095 +#ifdef CONFIG_PROC_SYSCTL
46096 +extern const struct inode_operations proc_sys_inode_operations;
46097 +extern const struct inode_operations proc_sys_dir_operations;
46098 +#endif
46099 +
46100 static void proc_evict_inode(struct inode *inode)
46101 {
46102 struct proc_dir_entry *de;
46103 @@ -49,6 +55,13 @@ static void proc_evict_inode(struct inode *inode)
46104 ns_ops = PROC_I(inode)->ns_ops;
46105 if (ns_ops && ns_ops->put)
46106 ns_ops->put(PROC_I(inode)->ns);
46107 +
46108 +#ifdef CONFIG_PROC_SYSCTL
46109 + if (inode->i_op == &proc_sys_inode_operations ||
46110 + inode->i_op == &proc_sys_dir_operations)
46111 + gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
46112 +#endif
46113 +
46114 }
46115
46116 static struct kmem_cache * proc_inode_cachep;
46117 @@ -440,7 +453,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
46118 if (de->mode) {
46119 inode->i_mode = de->mode;
46120 inode->i_uid = de->uid;
46121 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
46122 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
46123 +#else
46124 inode->i_gid = de->gid;
46125 +#endif
46126 }
46127 if (de->size)
46128 inode->i_size = de->size;
46129 diff --git a/fs/proc/internal.h b/fs/proc/internal.h
46130 index 7838e5c..ff92cbc 100644
46131 --- a/fs/proc/internal.h
46132 +++ b/fs/proc/internal.h
46133 @@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
46134 struct pid *pid, struct task_struct *task);
46135 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
46136 struct pid *pid, struct task_struct *task);
46137 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
46138 +extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
46139 +#endif
46140 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
46141
46142 extern const struct file_operations proc_maps_operations;
46143 diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
46144 index d245cb2..f4e8498 100644
46145 --- a/fs/proc/kcore.c
46146 +++ b/fs/proc/kcore.c
46147 @@ -478,9 +478,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
46148 * the addresses in the elf_phdr on our list.
46149 */
46150 start = kc_offset_to_vaddr(*fpos - elf_buflen);
46151 - if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
46152 + tsz = PAGE_SIZE - (start & ~PAGE_MASK);
46153 + if (tsz > buflen)
46154 tsz = buflen;
46155 -
46156 +
46157 while (buflen) {
46158 struct kcore_list *m;
46159
46160 @@ -509,20 +510,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
46161 kfree(elf_buf);
46162 } else {
46163 if (kern_addr_valid(start)) {
46164 - unsigned long n;
46165 + char *elf_buf;
46166 + mm_segment_t oldfs;
46167
46168 - n = copy_to_user(buffer, (char *)start, tsz);
46169 - /*
46170 - * We cannot distingush between fault on source
46171 - * and fault on destination. When this happens
46172 - * we clear too and hope it will trigger the
46173 - * EFAULT again.
46174 - */
46175 - if (n) {
46176 - if (clear_user(buffer + tsz - n,
46177 - n))
46178 + elf_buf = kmalloc(tsz, GFP_KERNEL);
46179 + if (!elf_buf)
46180 + return -ENOMEM;
46181 + oldfs = get_fs();
46182 + set_fs(KERNEL_DS);
46183 + if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
46184 + set_fs(oldfs);
46185 + if (copy_to_user(buffer, elf_buf, tsz)) {
46186 + kfree(elf_buf);
46187 return -EFAULT;
46188 + }
46189 }
46190 + set_fs(oldfs);
46191 + kfree(elf_buf);
46192 } else {
46193 if (clear_user(buffer, tsz))
46194 return -EFAULT;
46195 @@ -542,6 +546,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
46196
46197 static int open_kcore(struct inode *inode, struct file *filp)
46198 {
46199 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
46200 + return -EPERM;
46201 +#endif
46202 if (!capable(CAP_SYS_RAWIO))
46203 return -EPERM;
46204 if (kcore_need_update)
46205 diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
46206 index 80e4645..53e5fcf 100644
46207 --- a/fs/proc/meminfo.c
46208 +++ b/fs/proc/meminfo.c
46209 @@ -158,7 +158,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
46210 vmi.used >> 10,
46211 vmi.largest_chunk >> 10
46212 #ifdef CONFIG_MEMORY_FAILURE
46213 - ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
46214 + ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
46215 #endif
46216 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
46217 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
46218 diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
46219 index b1822dd..df622cb 100644
46220 --- a/fs/proc/nommu.c
46221 +++ b/fs/proc/nommu.c
46222 @@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
46223 if (len < 1)
46224 len = 1;
46225 seq_printf(m, "%*c", len, ' ');
46226 - seq_path(m, &file->f_path, "");
46227 + seq_path(m, &file->f_path, "\n\\");
46228 }
46229
46230 seq_putc(m, '\n');
46231 diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
46232 index f738024..876984a 100644
46233 --- a/fs/proc/proc_net.c
46234 +++ b/fs/proc/proc_net.c
46235 @@ -105,6 +105,17 @@ static struct net *get_proc_task_net(struct inode *dir)
46236 struct task_struct *task;
46237 struct nsproxy *ns;
46238 struct net *net = NULL;
46239 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46240 + const struct cred *cred = current_cred();
46241 +#endif
46242 +
46243 +#ifdef CONFIG_GRKERNSEC_PROC_USER
46244 + if (cred->fsuid)
46245 + return net;
46246 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46247 + if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
46248 + return net;
46249 +#endif
46250
46251 rcu_read_lock();
46252 task = pid_task(proc_pid(dir), PIDTYPE_PID);
46253 diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
46254 index a6b6217..1e0579d 100644
46255 --- a/fs/proc/proc_sysctl.c
46256 +++ b/fs/proc/proc_sysctl.c
46257 @@ -9,11 +9,13 @@
46258 #include <linux/namei.h>
46259 #include "internal.h"
46260
46261 +extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
46262 +
46263 static const struct dentry_operations proc_sys_dentry_operations;
46264 static const struct file_operations proc_sys_file_operations;
46265 -static const struct inode_operations proc_sys_inode_operations;
46266 +const struct inode_operations proc_sys_inode_operations;
46267 static const struct file_operations proc_sys_dir_file_operations;
46268 -static const struct inode_operations proc_sys_dir_operations;
46269 +const struct inode_operations proc_sys_dir_operations;
46270
46271 void proc_sys_poll_notify(struct ctl_table_poll *poll)
46272 {
46273 @@ -131,8 +133,14 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
46274
46275 err = NULL;
46276 d_set_d_op(dentry, &proc_sys_dentry_operations);
46277 +
46278 + gr_handle_proc_create(dentry, inode);
46279 +
46280 d_add(dentry, inode);
46281
46282 + if (gr_handle_sysctl(p, MAY_EXEC))
46283 + err = ERR_PTR(-ENOENT);
46284 +
46285 out:
46286 sysctl_head_finish(head);
46287 return err;
46288 @@ -163,6 +171,12 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
46289 if (!table->proc_handler)
46290 goto out;
46291
46292 +#ifdef CONFIG_GRKERNSEC
46293 + error = -EPERM;
46294 + if (write && !capable(CAP_SYS_ADMIN))
46295 + goto out;
46296 +#endif
46297 +
46298 /* careful: calling conventions are nasty here */
46299 res = count;
46300 error = table->proc_handler(table, write, buf, &res, ppos);
46301 @@ -245,6 +259,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
46302 return -ENOMEM;
46303 } else {
46304 d_set_d_op(child, &proc_sys_dentry_operations);
46305 +
46306 + gr_handle_proc_create(child, inode);
46307 +
46308 d_add(child, inode);
46309 }
46310 } else {
46311 @@ -273,6 +290,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
46312 if (*pos < file->f_pos)
46313 continue;
46314
46315 + if (gr_handle_sysctl(table, 0))
46316 + continue;
46317 +
46318 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
46319 if (res)
46320 return res;
46321 @@ -398,6 +418,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
46322 if (IS_ERR(head))
46323 return PTR_ERR(head);
46324
46325 + if (table && gr_handle_sysctl(table, MAY_EXEC))
46326 + return -ENOENT;
46327 +
46328 generic_fillattr(inode, stat);
46329 if (table)
46330 stat->mode = (stat->mode & S_IFMT) | table->mode;
46331 @@ -420,13 +443,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
46332 .llseek = generic_file_llseek,
46333 };
46334
46335 -static const struct inode_operations proc_sys_inode_operations = {
46336 +const struct inode_operations proc_sys_inode_operations = {
46337 .permission = proc_sys_permission,
46338 .setattr = proc_sys_setattr,
46339 .getattr = proc_sys_getattr,
46340 };
46341
46342 -static const struct inode_operations proc_sys_dir_operations = {
46343 +const struct inode_operations proc_sys_dir_operations = {
46344 .lookup = proc_sys_lookup,
46345 .permission = proc_sys_permission,
46346 .setattr = proc_sys_setattr,
46347 diff --git a/fs/proc/root.c b/fs/proc/root.c
46348 index 03102d9..4ae347e 100644
46349 --- a/fs/proc/root.c
46350 +++ b/fs/proc/root.c
46351 @@ -121,7 +121,15 @@ void __init proc_root_init(void)
46352 #ifdef CONFIG_PROC_DEVICETREE
46353 proc_device_tree_init();
46354 #endif
46355 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
46356 +#ifdef CONFIG_GRKERNSEC_PROC_USER
46357 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
46358 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46359 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
46360 +#endif
46361 +#else
46362 proc_mkdir("bus", NULL);
46363 +#endif
46364 proc_sys_init();
46365 }
46366
46367 diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
46368 index 7dcd2a2..d1d9cb6 100644
46369 --- a/fs/proc/task_mmu.c
46370 +++ b/fs/proc/task_mmu.c
46371 @@ -52,8 +52,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
46372 "VmExe:\t%8lu kB\n"
46373 "VmLib:\t%8lu kB\n"
46374 "VmPTE:\t%8lu kB\n"
46375 - "VmSwap:\t%8lu kB\n",
46376 - hiwater_vm << (PAGE_SHIFT-10),
46377 + "VmSwap:\t%8lu kB\n"
46378 +
46379 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
46380 + "CsBase:\t%8lx\nCsLim:\t%8lx\n"
46381 +#endif
46382 +
46383 + ,hiwater_vm << (PAGE_SHIFT-10),
46384 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
46385 mm->locked_vm << (PAGE_SHIFT-10),
46386 mm->pinned_vm << (PAGE_SHIFT-10),
46387 @@ -62,7 +67,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
46388 data << (PAGE_SHIFT-10),
46389 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
46390 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
46391 - swap << (PAGE_SHIFT-10));
46392 + swap << (PAGE_SHIFT-10)
46393 +
46394 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
46395 + , mm->context.user_cs_base, mm->context.user_cs_limit
46396 +#endif
46397 +
46398 + );
46399 }
46400
46401 unsigned long task_vsize(struct mm_struct *mm)
46402 @@ -209,6 +220,12 @@ static int do_maps_open(struct inode *inode, struct file *file,
46403 return ret;
46404 }
46405
46406 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46407 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
46408 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
46409 + _mm->pax_flags & MF_PAX_SEGMEXEC))
46410 +#endif
46411 +
46412 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
46413 {
46414 struct mm_struct *mm = vma->vm_mm;
46415 @@ -227,13 +244,13 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
46416 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
46417 }
46418
46419 - /* We don't show the stack guard page in /proc/maps */
46420 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46421 + start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
46422 + end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
46423 +#else
46424 start = vma->vm_start;
46425 - if (stack_guard_page_start(vma, start))
46426 - start += PAGE_SIZE;
46427 end = vma->vm_end;
46428 - if (stack_guard_page_end(vma, end))
46429 - end -= PAGE_SIZE;
46430 +#endif
46431
46432 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
46433 start,
46434 @@ -242,7 +259,11 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
46435 flags & VM_WRITE ? 'w' : '-',
46436 flags & VM_EXEC ? 'x' : '-',
46437 flags & VM_MAYSHARE ? 's' : 'p',
46438 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46439 + PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
46440 +#else
46441 pgoff,
46442 +#endif
46443 MAJOR(dev), MINOR(dev), ino, &len);
46444
46445 /*
46446 @@ -251,7 +272,7 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
46447 */
46448 if (file) {
46449 pad_len_spaces(m, len);
46450 - seq_path(m, &file->f_path, "\n");
46451 + seq_path(m, &file->f_path, "\n\\");
46452 } else {
46453 const char *name = arch_vma_name(vma);
46454 if (!name) {
46455 @@ -259,8 +280,9 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
46456 if (vma->vm_start <= mm->brk &&
46457 vma->vm_end >= mm->start_brk) {
46458 name = "[heap]";
46459 - } else if (vma->vm_start <= mm->start_stack &&
46460 - vma->vm_end >= mm->start_stack) {
46461 + } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
46462 + (vma->vm_start <= mm->start_stack &&
46463 + vma->vm_end >= mm->start_stack)) {
46464 name = "[stack]";
46465 }
46466 } else {
46467 @@ -435,11 +457,16 @@ static int show_smap(struct seq_file *m, void *v)
46468 };
46469
46470 memset(&mss, 0, sizeof mss);
46471 - mss.vma = vma;
46472 - /* mmap_sem is held in m_start */
46473 - if (vma->vm_mm && !is_vm_hugetlb_page(vma))
46474 - walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
46475 -
46476 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46477 + if (!PAX_RAND_FLAGS(vma->vm_mm)) {
46478 +#endif
46479 + mss.vma = vma;
46480 + /* mmap_sem is held in m_start */
46481 + if (vma->vm_mm && !is_vm_hugetlb_page(vma))
46482 + walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
46483 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46484 + }
46485 +#endif
46486 show_map_vma(m, vma);
46487
46488 seq_printf(m,
46489 @@ -457,7 +484,11 @@ static int show_smap(struct seq_file *m, void *v)
46490 "KernelPageSize: %8lu kB\n"
46491 "MMUPageSize: %8lu kB\n"
46492 "Locked: %8lu kB\n",
46493 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46494 + PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
46495 +#else
46496 (vma->vm_end - vma->vm_start) >> 10,
46497 +#endif
46498 mss.resident >> 10,
46499 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
46500 mss.shared_clean >> 10,
46501 @@ -1036,7 +1067,7 @@ static int show_numa_map(struct seq_file *m, void *v)
46502
46503 if (file) {
46504 seq_printf(m, " file=");
46505 - seq_path(m, &file->f_path, "\n\t= ");
46506 + seq_path(m, &file->f_path, "\n\t\\= ");
46507 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
46508 seq_printf(m, " heap");
46509 } else if (vma->vm_start <= mm->start_stack &&
46510 diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
46511 index 980de54..2a4db5f 100644
46512 --- a/fs/proc/task_nommu.c
46513 +++ b/fs/proc/task_nommu.c
46514 @@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
46515 else
46516 bytes += kobjsize(mm);
46517
46518 - if (current->fs && current->fs->users > 1)
46519 + if (current->fs && atomic_read(&current->fs->users) > 1)
46520 sbytes += kobjsize(current->fs);
46521 else
46522 bytes += kobjsize(current->fs);
46523 @@ -166,7 +166,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
46524
46525 if (file) {
46526 pad_len_spaces(m, len);
46527 - seq_path(m, &file->f_path, "");
46528 + seq_path(m, &file->f_path, "\n\\");
46529 } else if (mm) {
46530 if (vma->vm_start <= mm->start_stack &&
46531 vma->vm_end >= mm->start_stack) {
46532 diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
46533 index d67908b..d13f6a6 100644
46534 --- a/fs/quota/netlink.c
46535 +++ b/fs/quota/netlink.c
46536 @@ -33,7 +33,7 @@ static struct genl_family quota_genl_family = {
46537 void quota_send_warning(short type, unsigned int id, dev_t dev,
46538 const char warntype)
46539 {
46540 - static atomic_t seq;
46541 + static atomic_unchecked_t seq;
46542 struct sk_buff *skb;
46543 void *msg_head;
46544 int ret;
46545 @@ -49,7 +49,7 @@ void quota_send_warning(short type, unsigned int id, dev_t dev,
46546 "VFS: Not enough memory to send quota warning.\n");
46547 return;
46548 }
46549 - msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
46550 + msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
46551 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
46552 if (!msg_head) {
46553 printk(KERN_ERR
46554 diff --git a/fs/readdir.c b/fs/readdir.c
46555 index 356f715..c918d38 100644
46556 --- a/fs/readdir.c
46557 +++ b/fs/readdir.c
46558 @@ -17,6 +17,7 @@
46559 #include <linux/security.h>
46560 #include <linux/syscalls.h>
46561 #include <linux/unistd.h>
46562 +#include <linux/namei.h>
46563
46564 #include <asm/uaccess.h>
46565
46566 @@ -67,6 +68,7 @@ struct old_linux_dirent {
46567
46568 struct readdir_callback {
46569 struct old_linux_dirent __user * dirent;
46570 + struct file * file;
46571 int result;
46572 };
46573
46574 @@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
46575 buf->result = -EOVERFLOW;
46576 return -EOVERFLOW;
46577 }
46578 +
46579 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46580 + return 0;
46581 +
46582 buf->result++;
46583 dirent = buf->dirent;
46584 if (!access_ok(VERIFY_WRITE, dirent,
46585 @@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
46586
46587 buf.result = 0;
46588 buf.dirent = dirent;
46589 + buf.file = file;
46590
46591 error = vfs_readdir(file, fillonedir, &buf);
46592 if (buf.result)
46593 @@ -142,6 +149,7 @@ struct linux_dirent {
46594 struct getdents_callback {
46595 struct linux_dirent __user * current_dir;
46596 struct linux_dirent __user * previous;
46597 + struct file * file;
46598 int count;
46599 int error;
46600 };
46601 @@ -163,6 +171,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
46602 buf->error = -EOVERFLOW;
46603 return -EOVERFLOW;
46604 }
46605 +
46606 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46607 + return 0;
46608 +
46609 dirent = buf->previous;
46610 if (dirent) {
46611 if (__put_user(offset, &dirent->d_off))
46612 @@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
46613 buf.previous = NULL;
46614 buf.count = count;
46615 buf.error = 0;
46616 + buf.file = file;
46617
46618 error = vfs_readdir(file, filldir, &buf);
46619 if (error >= 0)
46620 @@ -229,6 +242,7 @@ out:
46621 struct getdents_callback64 {
46622 struct linux_dirent64 __user * current_dir;
46623 struct linux_dirent64 __user * previous;
46624 + struct file *file;
46625 int count;
46626 int error;
46627 };
46628 @@ -244,6 +258,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
46629 buf->error = -EINVAL; /* only used if we fail.. */
46630 if (reclen > buf->count)
46631 return -EINVAL;
46632 +
46633 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46634 + return 0;
46635 +
46636 dirent = buf->previous;
46637 if (dirent) {
46638 if (__put_user(offset, &dirent->d_off))
46639 @@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
46640
46641 buf.current_dir = dirent;
46642 buf.previous = NULL;
46643 + buf.file = file;
46644 buf.count = count;
46645 buf.error = 0;
46646
46647 @@ -299,7 +318,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
46648 error = buf.error;
46649 lastdirent = buf.previous;
46650 if (lastdirent) {
46651 - typeof(lastdirent->d_off) d_off = file->f_pos;
46652 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
46653 if (__put_user(d_off, &lastdirent->d_off))
46654 error = -EFAULT;
46655 else
46656 diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
46657 index 60c0804..d814f98 100644
46658 --- a/fs/reiserfs/do_balan.c
46659 +++ b/fs/reiserfs/do_balan.c
46660 @@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
46661 return;
46662 }
46663
46664 - atomic_inc(&(fs_generation(tb->tb_sb)));
46665 + atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
46666 do_balance_starts(tb);
46667
46668 /* balance leaf returns 0 except if combining L R and S into
46669 diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
46670 index 7a99811..a7c96c4 100644
46671 --- a/fs/reiserfs/procfs.c
46672 +++ b/fs/reiserfs/procfs.c
46673 @@ -113,7 +113,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
46674 "SMALL_TAILS " : "NO_TAILS ",
46675 replay_only(sb) ? "REPLAY_ONLY " : "",
46676 convert_reiserfs(sb) ? "CONV " : "",
46677 - atomic_read(&r->s_generation_counter),
46678 + atomic_read_unchecked(&r->s_generation_counter),
46679 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
46680 SF(s_do_balance), SF(s_unneeded_left_neighbor),
46681 SF(s_good_search_by_key_reada), SF(s_bmaps),
46682 diff --git a/fs/select.c b/fs/select.c
46683 index d33418f..2a5345e 100644
46684 --- a/fs/select.c
46685 +++ b/fs/select.c
46686 @@ -20,6 +20,7 @@
46687 #include <linux/module.h>
46688 #include <linux/slab.h>
46689 #include <linux/poll.h>
46690 +#include <linux/security.h>
46691 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
46692 #include <linux/file.h>
46693 #include <linux/fdtable.h>
46694 @@ -837,6 +838,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
46695 struct poll_list *walk = head;
46696 unsigned long todo = nfds;
46697
46698 + gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
46699 if (nfds > rlimit(RLIMIT_NOFILE))
46700 return -EINVAL;
46701
46702 diff --git a/fs/seq_file.c b/fs/seq_file.c
46703 index dba43c3..a99fb63 100644
46704 --- a/fs/seq_file.c
46705 +++ b/fs/seq_file.c
46706 @@ -76,7 +76,8 @@ static int traverse(struct seq_file *m, loff_t offset)
46707 return 0;
46708 }
46709 if (!m->buf) {
46710 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
46711 + m->size = PAGE_SIZE;
46712 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
46713 if (!m->buf)
46714 return -ENOMEM;
46715 }
46716 @@ -116,7 +117,8 @@ static int traverse(struct seq_file *m, loff_t offset)
46717 Eoverflow:
46718 m->op->stop(m, p);
46719 kfree(m->buf);
46720 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
46721 + m->size <<= 1;
46722 + m->buf = kmalloc(m->size, GFP_KERNEL);
46723 return !m->buf ? -ENOMEM : -EAGAIN;
46724 }
46725
46726 @@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
46727 m->version = file->f_version;
46728 /* grab buffer if we didn't have one */
46729 if (!m->buf) {
46730 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
46731 + m->size = PAGE_SIZE;
46732 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
46733 if (!m->buf)
46734 goto Enomem;
46735 }
46736 @@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
46737 goto Fill;
46738 m->op->stop(m, p);
46739 kfree(m->buf);
46740 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
46741 + m->size <<= 1;
46742 + m->buf = kmalloc(m->size, GFP_KERNEL);
46743 if (!m->buf)
46744 goto Enomem;
46745 m->count = 0;
46746 @@ -549,7 +553,7 @@ static void single_stop(struct seq_file *p, void *v)
46747 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
46748 void *data)
46749 {
46750 - struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
46751 + seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
46752 int res = -ENOMEM;
46753
46754 if (op) {
46755 diff --git a/fs/splice.c b/fs/splice.c
46756 index fa2defa..8601650 100644
46757 --- a/fs/splice.c
46758 +++ b/fs/splice.c
46759 @@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
46760 pipe_lock(pipe);
46761
46762 for (;;) {
46763 - if (!pipe->readers) {
46764 + if (!atomic_read(&pipe->readers)) {
46765 send_sig(SIGPIPE, current, 0);
46766 if (!ret)
46767 ret = -EPIPE;
46768 @@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
46769 do_wakeup = 0;
46770 }
46771
46772 - pipe->waiting_writers++;
46773 + atomic_inc(&pipe->waiting_writers);
46774 pipe_wait(pipe);
46775 - pipe->waiting_writers--;
46776 + atomic_dec(&pipe->waiting_writers);
46777 }
46778
46779 pipe_unlock(pipe);
46780 @@ -560,7 +560,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
46781 old_fs = get_fs();
46782 set_fs(get_ds());
46783 /* The cast to a user pointer is valid due to the set_fs() */
46784 - res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
46785 + res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
46786 set_fs(old_fs);
46787
46788 return res;
46789 @@ -575,7 +575,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
46790 old_fs = get_fs();
46791 set_fs(get_ds());
46792 /* The cast to a user pointer is valid due to the set_fs() */
46793 - res = vfs_write(file, (const char __user *)buf, count, &pos);
46794 + res = vfs_write(file, (const char __force_user *)buf, count, &pos);
46795 set_fs(old_fs);
46796
46797 return res;
46798 @@ -626,7 +626,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
46799 goto err;
46800
46801 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
46802 - vec[i].iov_base = (void __user *) page_address(page);
46803 + vec[i].iov_base = (void __force_user *) page_address(page);
46804 vec[i].iov_len = this_len;
46805 spd.pages[i] = page;
46806 spd.nr_pages++;
46807 @@ -846,10 +846,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
46808 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
46809 {
46810 while (!pipe->nrbufs) {
46811 - if (!pipe->writers)
46812 + if (!atomic_read(&pipe->writers))
46813 return 0;
46814
46815 - if (!pipe->waiting_writers && sd->num_spliced)
46816 + if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
46817 return 0;
46818
46819 if (sd->flags & SPLICE_F_NONBLOCK)
46820 @@ -1182,7 +1182,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
46821 * out of the pipe right after the splice_to_pipe(). So set
46822 * PIPE_READERS appropriately.
46823 */
46824 - pipe->readers = 1;
46825 + atomic_set(&pipe->readers, 1);
46826
46827 current->splice_pipe = pipe;
46828 }
46829 @@ -1734,9 +1734,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
46830 ret = -ERESTARTSYS;
46831 break;
46832 }
46833 - if (!pipe->writers)
46834 + if (!atomic_read(&pipe->writers))
46835 break;
46836 - if (!pipe->waiting_writers) {
46837 + if (!atomic_read(&pipe->waiting_writers)) {
46838 if (flags & SPLICE_F_NONBLOCK) {
46839 ret = -EAGAIN;
46840 break;
46841 @@ -1768,7 +1768,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
46842 pipe_lock(pipe);
46843
46844 while (pipe->nrbufs >= pipe->buffers) {
46845 - if (!pipe->readers) {
46846 + if (!atomic_read(&pipe->readers)) {
46847 send_sig(SIGPIPE, current, 0);
46848 ret = -EPIPE;
46849 break;
46850 @@ -1781,9 +1781,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
46851 ret = -ERESTARTSYS;
46852 break;
46853 }
46854 - pipe->waiting_writers++;
46855 + atomic_inc(&pipe->waiting_writers);
46856 pipe_wait(pipe);
46857 - pipe->waiting_writers--;
46858 + atomic_dec(&pipe->waiting_writers);
46859 }
46860
46861 pipe_unlock(pipe);
46862 @@ -1819,14 +1819,14 @@ retry:
46863 pipe_double_lock(ipipe, opipe);
46864
46865 do {
46866 - if (!opipe->readers) {
46867 + if (!atomic_read(&opipe->readers)) {
46868 send_sig(SIGPIPE, current, 0);
46869 if (!ret)
46870 ret = -EPIPE;
46871 break;
46872 }
46873
46874 - if (!ipipe->nrbufs && !ipipe->writers)
46875 + if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
46876 break;
46877
46878 /*
46879 @@ -1923,7 +1923,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
46880 pipe_double_lock(ipipe, opipe);
46881
46882 do {
46883 - if (!opipe->readers) {
46884 + if (!atomic_read(&opipe->readers)) {
46885 send_sig(SIGPIPE, current, 0);
46886 if (!ret)
46887 ret = -EPIPE;
46888 @@ -1968,7 +1968,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
46889 * return EAGAIN if we have the potential of some data in the
46890 * future, otherwise just return 0
46891 */
46892 - if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
46893 + if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
46894 ret = -EAGAIN;
46895
46896 pipe_unlock(ipipe);
46897 diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
46898 index d4e6080b..0e58b99 100644
46899 --- a/fs/sysfs/file.c
46900 +++ b/fs/sysfs/file.c
46901 @@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
46902
46903 struct sysfs_open_dirent {
46904 atomic_t refcnt;
46905 - atomic_t event;
46906 + atomic_unchecked_t event;
46907 wait_queue_head_t poll;
46908 struct list_head buffers; /* goes through sysfs_buffer.list */
46909 };
46910 @@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
46911 if (!sysfs_get_active(attr_sd))
46912 return -ENODEV;
46913
46914 - buffer->event = atomic_read(&attr_sd->s_attr.open->event);
46915 + buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
46916 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
46917
46918 sysfs_put_active(attr_sd);
46919 @@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
46920 return -ENOMEM;
46921
46922 atomic_set(&new_od->refcnt, 0);
46923 - atomic_set(&new_od->event, 1);
46924 + atomic_set_unchecked(&new_od->event, 1);
46925 init_waitqueue_head(&new_od->poll);
46926 INIT_LIST_HEAD(&new_od->buffers);
46927 goto retry;
46928 @@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
46929
46930 sysfs_put_active(attr_sd);
46931
46932 - if (buffer->event != atomic_read(&od->event))
46933 + if (buffer->event != atomic_read_unchecked(&od->event))
46934 goto trigger;
46935
46936 return DEFAULT_POLLMASK;
46937 @@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
46938
46939 od = sd->s_attr.open;
46940 if (od) {
46941 - atomic_inc(&od->event);
46942 + atomic_inc_unchecked(&od->event);
46943 wake_up_interruptible(&od->poll);
46944 }
46945
46946 diff --git a/fs/sysfs/mount.c b/fs/sysfs/mount.c
46947 index e34f0d9..740ea7b 100644
46948 --- a/fs/sysfs/mount.c
46949 +++ b/fs/sysfs/mount.c
46950 @@ -36,7 +36,11 @@ struct sysfs_dirent sysfs_root = {
46951 .s_name = "",
46952 .s_count = ATOMIC_INIT(1),
46953 .s_flags = SYSFS_DIR | (KOBJ_NS_TYPE_NONE << SYSFS_NS_TYPE_SHIFT),
46954 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
46955 + .s_mode = S_IFDIR | S_IRWXU,
46956 +#else
46957 .s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
46958 +#endif
46959 .s_ino = 1,
46960 };
46961
46962 diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
46963 index a7ac78f..02158e1 100644
46964 --- a/fs/sysfs/symlink.c
46965 +++ b/fs/sysfs/symlink.c
46966 @@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
46967
46968 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
46969 {
46970 - char *page = nd_get_link(nd);
46971 + const char *page = nd_get_link(nd);
46972 if (!IS_ERR(page))
46973 free_page((unsigned long)page);
46974 }
46975 diff --git a/fs/udf/misc.c b/fs/udf/misc.c
46976 index c175b4d..8f36a16 100644
46977 --- a/fs/udf/misc.c
46978 +++ b/fs/udf/misc.c
46979 @@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
46980
46981 u8 udf_tag_checksum(const struct tag *t)
46982 {
46983 - u8 *data = (u8 *)t;
46984 + const u8 *data = (const u8 *)t;
46985 u8 checksum = 0;
46986 int i;
46987 for (i = 0; i < sizeof(struct tag); ++i)
46988 diff --git a/fs/utimes.c b/fs/utimes.c
46989 index ba653f3..06ea4b1 100644
46990 --- a/fs/utimes.c
46991 +++ b/fs/utimes.c
46992 @@ -1,6 +1,7 @@
46993 #include <linux/compiler.h>
46994 #include <linux/file.h>
46995 #include <linux/fs.h>
46996 +#include <linux/security.h>
46997 #include <linux/linkage.h>
46998 #include <linux/mount.h>
46999 #include <linux/namei.h>
47000 @@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
47001 goto mnt_drop_write_and_out;
47002 }
47003 }
47004 +
47005 + if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
47006 + error = -EACCES;
47007 + goto mnt_drop_write_and_out;
47008 + }
47009 +
47010 mutex_lock(&inode->i_mutex);
47011 error = notify_change(path->dentry, &newattrs);
47012 mutex_unlock(&inode->i_mutex);
47013 diff --git a/fs/xattr.c b/fs/xattr.c
47014 index 67583de..c5aad14 100644
47015 --- a/fs/xattr.c
47016 +++ b/fs/xattr.c
47017 @@ -315,7 +315,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
47018 * Extended attribute SET operations
47019 */
47020 static long
47021 -setxattr(struct dentry *d, const char __user *name, const void __user *value,
47022 +setxattr(struct path *path, const char __user *name, const void __user *value,
47023 size_t size, int flags)
47024 {
47025 int error;
47026 @@ -339,7 +339,13 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
47027 return PTR_ERR(kvalue);
47028 }
47029
47030 - error = vfs_setxattr(d, kname, kvalue, size, flags);
47031 + if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
47032 + error = -EACCES;
47033 + goto out;
47034 + }
47035 +
47036 + error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
47037 +out:
47038 kfree(kvalue);
47039 return error;
47040 }
47041 @@ -356,7 +362,7 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
47042 return error;
47043 error = mnt_want_write(path.mnt);
47044 if (!error) {
47045 - error = setxattr(path.dentry, name, value, size, flags);
47046 + error = setxattr(&path, name, value, size, flags);
47047 mnt_drop_write(path.mnt);
47048 }
47049 path_put(&path);
47050 @@ -375,7 +381,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
47051 return error;
47052 error = mnt_want_write(path.mnt);
47053 if (!error) {
47054 - error = setxattr(path.dentry, name, value, size, flags);
47055 + error = setxattr(&path, name, value, size, flags);
47056 mnt_drop_write(path.mnt);
47057 }
47058 path_put(&path);
47059 @@ -386,17 +392,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
47060 const void __user *,value, size_t, size, int, flags)
47061 {
47062 struct file *f;
47063 - struct dentry *dentry;
47064 int error = -EBADF;
47065
47066 f = fget(fd);
47067 if (!f)
47068 return error;
47069 - dentry = f->f_path.dentry;
47070 - audit_inode(NULL, dentry);
47071 + audit_inode(NULL, f->f_path.dentry);
47072 error = mnt_want_write_file(f);
47073 if (!error) {
47074 - error = setxattr(dentry, name, value, size, flags);
47075 + error = setxattr(&f->f_path, name, value, size, flags);
47076 mnt_drop_write(f->f_path.mnt);
47077 }
47078 fput(f);
47079 diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
47080 index 8d5a506..7f62712 100644
47081 --- a/fs/xattr_acl.c
47082 +++ b/fs/xattr_acl.c
47083 @@ -17,8 +17,8 @@
47084 struct posix_acl *
47085 posix_acl_from_xattr(const void *value, size_t size)
47086 {
47087 - posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
47088 - posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
47089 + const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
47090 + const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
47091 int count;
47092 struct posix_acl *acl;
47093 struct posix_acl_entry *acl_e;
47094 diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
47095 index d0ab788..827999b 100644
47096 --- a/fs/xfs/xfs_bmap.c
47097 +++ b/fs/xfs/xfs_bmap.c
47098 @@ -190,7 +190,7 @@ xfs_bmap_validate_ret(
47099 int nmap,
47100 int ret_nmap);
47101 #else
47102 -#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
47103 +#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
47104 #endif /* DEBUG */
47105
47106 STATIC int
47107 diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
47108 index 79d05e8..e3e5861 100644
47109 --- a/fs/xfs/xfs_dir2_sf.c
47110 +++ b/fs/xfs/xfs_dir2_sf.c
47111 @@ -852,7 +852,15 @@ xfs_dir2_sf_getdents(
47112 }
47113
47114 ino = xfs_dir2_sfe_get_ino(sfp, sfep);
47115 - if (filldir(dirent, (char *)sfep->name, sfep->namelen,
47116 + if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
47117 + char name[sfep->namelen];
47118 + memcpy(name, sfep->name, sfep->namelen);
47119 + if (filldir(dirent, name, sfep->namelen,
47120 + off & 0x7fffffff, ino, DT_UNKNOWN)) {
47121 + *offset = off & 0x7fffffff;
47122 + return 0;
47123 + }
47124 + } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
47125 off & 0x7fffffff, ino, DT_UNKNOWN)) {
47126 *offset = off & 0x7fffffff;
47127 return 0;
47128 diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
47129 index d99a905..9f88202 100644
47130 --- a/fs/xfs/xfs_ioctl.c
47131 +++ b/fs/xfs/xfs_ioctl.c
47132 @@ -128,7 +128,7 @@ xfs_find_handle(
47133 }
47134
47135 error = -EFAULT;
47136 - if (copy_to_user(hreq->ohandle, &handle, hsize) ||
47137 + if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
47138 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
47139 goto out_put;
47140
47141 diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
47142 index 23ce927..e274cc1 100644
47143 --- a/fs/xfs/xfs_iops.c
47144 +++ b/fs/xfs/xfs_iops.c
47145 @@ -447,7 +447,7 @@ xfs_vn_put_link(
47146 struct nameidata *nd,
47147 void *p)
47148 {
47149 - char *s = nd_get_link(nd);
47150 + const char *s = nd_get_link(nd);
47151
47152 if (!IS_ERR(s))
47153 kfree(s);
47154 diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
47155 index ce9268a..ee98d0b 100644
47156 --- a/fs/xfs/xfs_vnodeops.c
47157 +++ b/fs/xfs/xfs_vnodeops.c
47158 @@ -131,7 +131,8 @@ xfs_readlink(
47159 __func__, (unsigned long long) ip->i_ino,
47160 (long long) pathlen);
47161 ASSERT(0);
47162 - return XFS_ERROR(EFSCORRUPTED);
47163 + error = XFS_ERROR(EFSCORRUPTED);
47164 + goto out;
47165 }
47166
47167
47168 diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
47169 new file mode 100644
47170 index 0000000..ab77366
47171 --- /dev/null
47172 +++ b/grsecurity/Kconfig
47173 @@ -0,0 +1,1065 @@
47174 +#
47175 +# grecurity configuration
47176 +#
47177 +
47178 +menu "Grsecurity"
47179 +
47180 +config GRKERNSEC
47181 + bool "Grsecurity"
47182 + select CRYPTO
47183 + select CRYPTO_SHA256
47184 + help
47185 + If you say Y here, you will be able to configure many features
47186 + that will enhance the security of your system. It is highly
47187 + recommended that you say Y here and read through the help
47188 + for each option so that you fully understand the features and
47189 + can evaluate their usefulness for your machine.
47190 +
47191 +choice
47192 + prompt "Security Level"
47193 + depends on GRKERNSEC
47194 + default GRKERNSEC_CUSTOM
47195 +
47196 +config GRKERNSEC_LOW
47197 + bool "Low"
47198 + select GRKERNSEC_LINK
47199 + select GRKERNSEC_FIFO
47200 + select GRKERNSEC_RANDNET
47201 + select GRKERNSEC_DMESG
47202 + select GRKERNSEC_CHROOT
47203 + select GRKERNSEC_CHROOT_CHDIR
47204 +
47205 + help
47206 + If you choose this option, several of the grsecurity options will
47207 + be enabled that will give you greater protection against a number
47208 + of attacks, while assuring that none of your software will have any
47209 + conflicts with the additional security measures. If you run a lot
47210 + of unusual software, or you are having problems with the higher
47211 + security levels, you should say Y here. With this option, the
47212 + following features are enabled:
47213 +
47214 + - Linking restrictions
47215 + - FIFO restrictions
47216 + - Restricted dmesg
47217 + - Enforced chdir("/") on chroot
47218 + - Runtime module disabling
47219 +
47220 +config GRKERNSEC_MEDIUM
47221 + bool "Medium"
47222 + select PAX
47223 + select PAX_EI_PAX
47224 + select PAX_PT_PAX_FLAGS
47225 + select PAX_HAVE_ACL_FLAGS
47226 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
47227 + select GRKERNSEC_CHROOT
47228 + select GRKERNSEC_CHROOT_SYSCTL
47229 + select GRKERNSEC_LINK
47230 + select GRKERNSEC_FIFO
47231 + select GRKERNSEC_DMESG
47232 + select GRKERNSEC_RANDNET
47233 + select GRKERNSEC_FORKFAIL
47234 + select GRKERNSEC_TIME
47235 + select GRKERNSEC_SIGNAL
47236 + select GRKERNSEC_CHROOT
47237 + select GRKERNSEC_CHROOT_UNIX
47238 + select GRKERNSEC_CHROOT_MOUNT
47239 + select GRKERNSEC_CHROOT_PIVOT
47240 + select GRKERNSEC_CHROOT_DOUBLE
47241 + select GRKERNSEC_CHROOT_CHDIR
47242 + select GRKERNSEC_CHROOT_MKNOD
47243 + select GRKERNSEC_PROC
47244 + select GRKERNSEC_PROC_USERGROUP
47245 + select PAX_RANDUSTACK
47246 + select PAX_ASLR
47247 + select PAX_RANDMMAP
47248 + select PAX_REFCOUNT if (X86 || SPARC64)
47249 + select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
47250 +
47251 + help
47252 + If you say Y here, several features in addition to those included
47253 + in the low additional security level will be enabled. These
47254 + features provide even more security to your system, though in rare
47255 + cases they may be incompatible with very old or poorly written
47256 + software. If you enable this option, make sure that your auth
47257 + service (identd) is running as gid 1001. With this option,
47258 + the following features (in addition to those provided in the
47259 + low additional security level) will be enabled:
47260 +
47261 + - Failed fork logging
47262 + - Time change logging
47263 + - Signal logging
47264 + - Deny mounts in chroot
47265 + - Deny double chrooting
47266 + - Deny sysctl writes in chroot
47267 + - Deny mknod in chroot
47268 + - Deny access to abstract AF_UNIX sockets out of chroot
47269 + - Deny pivot_root in chroot
47270 + - Denied reads/writes of /dev/kmem, /dev/mem, and /dev/port
47271 + - /proc restrictions with special GID set to 10 (usually wheel)
47272 + - Address Space Layout Randomization (ASLR)
47273 + - Prevent exploitation of most refcount overflows
47274 + - Bounds checking of copying between the kernel and userland
47275 +
47276 +config GRKERNSEC_HIGH
47277 + bool "High"
47278 + select GRKERNSEC_LINK
47279 + select GRKERNSEC_FIFO
47280 + select GRKERNSEC_DMESG
47281 + select GRKERNSEC_FORKFAIL
47282 + select GRKERNSEC_TIME
47283 + select GRKERNSEC_SIGNAL
47284 + select GRKERNSEC_CHROOT
47285 + select GRKERNSEC_CHROOT_SHMAT
47286 + select GRKERNSEC_CHROOT_UNIX
47287 + select GRKERNSEC_CHROOT_MOUNT
47288 + select GRKERNSEC_CHROOT_FCHDIR
47289 + select GRKERNSEC_CHROOT_PIVOT
47290 + select GRKERNSEC_CHROOT_DOUBLE
47291 + select GRKERNSEC_CHROOT_CHDIR
47292 + select GRKERNSEC_CHROOT_MKNOD
47293 + select GRKERNSEC_CHROOT_CAPS
47294 + select GRKERNSEC_CHROOT_SYSCTL
47295 + select GRKERNSEC_CHROOT_FINDTASK
47296 + select GRKERNSEC_SYSFS_RESTRICT
47297 + select GRKERNSEC_PROC
47298 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
47299 + select GRKERNSEC_HIDESYM
47300 + select GRKERNSEC_BRUTE
47301 + select GRKERNSEC_PROC_USERGROUP
47302 + select GRKERNSEC_KMEM
47303 + select GRKERNSEC_RESLOG
47304 + select GRKERNSEC_RANDNET
47305 + select GRKERNSEC_PROC_ADD
47306 + select GRKERNSEC_CHROOT_CHMOD
47307 + select GRKERNSEC_CHROOT_NICE
47308 + select GRKERNSEC_SETXID
47309 + select GRKERNSEC_AUDIT_MOUNT
47310 + select GRKERNSEC_MODHARDEN if (MODULES)
47311 + select GRKERNSEC_HARDEN_PTRACE
47312 + select GRKERNSEC_PTRACE_READEXEC
47313 + select GRKERNSEC_VM86 if (X86_32)
47314 + select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
47315 + select PAX
47316 + select PAX_RANDUSTACK
47317 + select PAX_ASLR
47318 + select PAX_RANDMMAP
47319 + select PAX_NOEXEC
47320 + select PAX_MPROTECT
47321 + select PAX_EI_PAX
47322 + select PAX_PT_PAX_FLAGS
47323 + select PAX_HAVE_ACL_FLAGS
47324 + select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
47325 + select PAX_MEMORY_UDEREF if (X86 && !XEN)
47326 + select PAX_RANDKSTACK if (X86_TSC && X86)
47327 + select PAX_SEGMEXEC if (X86_32)
47328 + select PAX_PAGEEXEC
47329 + select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
47330 + select PAX_EMUTRAMP if (PARISC)
47331 + select PAX_EMUSIGRT if (PARISC)
47332 + select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
47333 + select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
47334 + select PAX_REFCOUNT if (X86 || SPARC64)
47335 + select PAX_USERCOPY if ((X86 || PPC || SPARC || ARM) && (SLAB || SLUB || SLOB))
47336 + help
47337 + If you say Y here, many of the features of grsecurity will be
47338 + enabled, which will protect you against many kinds of attacks
47339 + against your system. The heightened security comes at a cost
47340 + of an increased chance of incompatibilities with rare software
47341 + on your machine. Since this security level enables PaX, you should
47342 + view <http://pax.grsecurity.net> and read about the PaX
47343 + project. While you are there, download chpax and run it on
47344 + binaries that cause problems with PaX. Also remember that
47345 + since the /proc restrictions are enabled, you must run your
47346 + identd as gid 1001. This security level enables the following
47347 + features in addition to those listed in the low and medium
47348 + security levels:
47349 +
47350 + - Additional /proc restrictions
47351 + - Chmod restrictions in chroot
47352 + - No signals, ptrace, or viewing of processes outside of chroot
47353 + - Capability restrictions in chroot
47354 + - Deny fchdir out of chroot
47355 + - Priority restrictions in chroot
47356 + - Segmentation-based implementation of PaX
47357 + - Mprotect restrictions
47358 + - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
47359 + - Kernel stack randomization
47360 + - Mount/unmount/remount logging
47361 + - Kernel symbol hiding
47362 + - Hardening of module auto-loading
47363 + - Ptrace restrictions
47364 + - Restricted vm86 mode
47365 + - Restricted sysfs/debugfs
47366 + - Active kernel exploit response
47367 +
47368 +config GRKERNSEC_CUSTOM
47369 + bool "Custom"
47370 + help
47371 + If you say Y here, you will be able to configure every grsecurity
47372 + option, which allows you to enable many more features that aren't
47373 + covered in the basic security levels. These additional features
47374 + include TPE, socket restrictions, and the sysctl system for
47375 + grsecurity. It is advised that you read through the help for
47376 + each option to determine its usefulness in your situation.
47377 +
47378 +endchoice
47379 +
47380 +menu "Address Space Protection"
47381 +depends on GRKERNSEC
47382 +
47383 +config GRKERNSEC_KMEM
47384 + bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
47385 + select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
47386 + help
47387 + If you say Y here, /dev/kmem and /dev/mem won't be allowed to
47388 + be written to or read from to modify or leak the contents of the running
47389 + kernel. /dev/port will also not be allowed to be opened. If you have module
47390 + support disabled, enabling this will close up four ways that are
47391 + currently used to insert malicious code into the running kernel.
47392 + Even with all these features enabled, we still highly recommend that
47393 + you use the RBAC system, as it is still possible for an attacker to
47394 + modify the running kernel through privileged I/O granted by ioperm/iopl.
47395 + If you are not using XFree86, you may be able to stop this additional
47396 + case by enabling the 'Disable privileged I/O' option. Though nothing
47397 + legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
47398 + but only to video memory, which is the only writing we allow in this
47399 + case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
47400 + not be allowed to mprotect it with PROT_WRITE later.
47401 + It is highly recommended that you say Y here if you meet all the
47402 + conditions above.
47403 +
47404 +config GRKERNSEC_VM86
47405 + bool "Restrict VM86 mode"
47406 + depends on X86_32
47407 +
47408 + help
47409 + If you say Y here, only processes with CAP_SYS_RAWIO will be able to
47410 + make use of a special execution mode on 32bit x86 processors called
47411 + Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
47412 + video cards and will still work with this option enabled. The purpose
47413 + of the option is to prevent exploitation of emulation errors in
47414 + virtualization of vm86 mode like the one discovered in VMWare in 2009.
47415 + Nearly all users should be able to enable this option.
47416 +
47417 +config GRKERNSEC_IO
47418 + bool "Disable privileged I/O"
47419 + depends on X86
47420 + select RTC_CLASS
47421 + select RTC_INTF_DEV
47422 + select RTC_DRV_CMOS
47423 +
47424 + help
47425 + If you say Y here, all ioperm and iopl calls will return an error.
47426 + Ioperm and iopl can be used to modify the running kernel.
47427 + Unfortunately, some programs need this access to operate properly,
47428 + the most notable of which are XFree86 and hwclock. hwclock can be
47429 + remedied by having RTC support in the kernel, so real-time
47430 + clock support is enabled if this option is enabled, to ensure
47431 + that hwclock operates correctly. XFree86 still will not
47432 + operate correctly with this option enabled, so DO NOT CHOOSE Y
47433 + IF YOU USE XFree86. If you use XFree86 and you still want to
47434 + protect your kernel against modification, use the RBAC system.
47435 +
47436 +config GRKERNSEC_PROC_MEMMAP
47437 + bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
47438 + default y if (PAX_NOEXEC || PAX_ASLR)
47439 + depends on PAX_NOEXEC || PAX_ASLR
47440 + help
47441 + If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
47442 + give no information about the addresses of its mappings if
47443 + PaX features that rely on random addresses are enabled on the task.
47444 + If you use PaX it is greatly recommended that you say Y here as it
47445 + closes up a hole that makes the full ASLR useless for suid
47446 + binaries.
47447 +
47448 +config GRKERNSEC_BRUTE
47449 + bool "Deter exploit bruteforcing"
47450 + help
47451 + If you say Y here, attempts to bruteforce exploits against forking
47452 + daemons such as apache or sshd, as well as against suid/sgid binaries
47453 + will be deterred. When a child of a forking daemon is killed by PaX
47454 + or crashes due to an illegal instruction or other suspicious signal,
47455 + the parent process will be delayed 30 seconds upon every subsequent
47456 + fork until the administrator is able to assess the situation and
47457 + restart the daemon.
47458 + In the suid/sgid case, the attempt is logged, the user has all their
47459 + processes terminated, and they are prevented from executing any further
47460 + processes for 15 minutes.
47461 + It is recommended that you also enable signal logging in the auditing
47462 + section so that logs are generated when a process triggers a suspicious
47463 + signal.
47464 + If the sysctl option is enabled, a sysctl option with name
47465 + "deter_bruteforce" is created.
47466 +
47467 +
47468 +config GRKERNSEC_MODHARDEN
47469 + bool "Harden module auto-loading"
47470 + depends on MODULES
47471 + help
47472 + If you say Y here, module auto-loading in response to use of some
47473 + feature implemented by an unloaded module will be restricted to
47474 + root users. Enabling this option helps defend against attacks
47475 + by unprivileged users who abuse the auto-loading behavior to
47476 + cause a vulnerable module to load that is then exploited.
47477 +
47478 + If this option prevents a legitimate use of auto-loading for a
47479 + non-root user, the administrator can execute modprobe manually
47480 + with the exact name of the module mentioned in the alert log.
47481 + Alternatively, the administrator can add the module to the list
47482 + of modules loaded at boot by modifying init scripts.
47483 +
47484 + Modification of init scripts will most likely be needed on
47485 + Ubuntu servers with encrypted home directory support enabled,
47486 + as the first non-root user logging in will cause the ecb(aes),
47487 + ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
47488 +
47489 +config GRKERNSEC_HIDESYM
47490 + bool "Hide kernel symbols"
47491 + help
47492 + If you say Y here, getting information on loaded modules, and
47493 + displaying all kernel symbols through a syscall will be restricted
47494 + to users with CAP_SYS_MODULE. For software compatibility reasons,
47495 + /proc/kallsyms will be restricted to the root user. The RBAC
47496 + system can hide that entry even from root.
47497 +
47498 + This option also prevents leaking of kernel addresses through
47499 + several /proc entries.
47500 +
47501 + Note that this option is only effective provided the following
47502 + conditions are met:
47503 + 1) The kernel using grsecurity is not precompiled by some distribution
47504 + 2) You have also enabled GRKERNSEC_DMESG
47505 + 3) You are using the RBAC system and hiding other files such as your
47506 + kernel image and System.map. Alternatively, enabling this option
47507 + causes the permissions on /boot, /lib/modules, and the kernel
47508 + source directory to change at compile time to prevent
47509 + reading by non-root users.
47510 + If the above conditions are met, this option will aid in providing a
47511 + useful protection against local kernel exploitation of overflows
47512 + and arbitrary read/write vulnerabilities.
47513 +
47514 +config GRKERNSEC_KERN_LOCKOUT
47515 + bool "Active kernel exploit response"
47516 + depends on X86 || ARM || PPC || SPARC
47517 + help
47518 + If you say Y here, when a PaX alert is triggered due to suspicious
47519 + activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
47520 + or an OOPs occurs due to bad memory accesses, instead of just
47521 + terminating the offending process (and potentially allowing
47522 + a subsequent exploit from the same user), we will take one of two
47523 + actions:
47524 + If the user was root, we will panic the system
47525 + If the user was non-root, we will log the attempt, terminate
47526 + all processes owned by the user, then prevent them from creating
47527 + any new processes until the system is restarted
47528 + This deters repeated kernel exploitation/bruteforcing attempts
47529 + and is useful for later forensics.
47530 +
47531 +endmenu
47532 +menu "Role Based Access Control Options"
47533 +depends on GRKERNSEC
47534 +
47535 +config GRKERNSEC_RBAC_DEBUG
47536 + bool
47537 +
47538 +config GRKERNSEC_NO_RBAC
47539 + bool "Disable RBAC system"
47540 + help
47541 + If you say Y here, the /dev/grsec device will be removed from the kernel,
47542 + preventing the RBAC system from being enabled. You should only say Y
47543 + here if you have no intention of using the RBAC system, so as to prevent
47544 + an attacker with root access from misusing the RBAC system to hide files
47545 + and processes when loadable module support and /dev/[k]mem have been
47546 + locked down.
47547 +
47548 +config GRKERNSEC_ACL_HIDEKERN
47549 + bool "Hide kernel processes"
47550 + help
47551 + If you say Y here, all kernel threads will be hidden to all
47552 + processes but those whose subject has the "view hidden processes"
47553 + flag.
47554 +
47555 +config GRKERNSEC_ACL_MAXTRIES
47556 + int "Maximum tries before password lockout"
47557 + default 3
47558 + help
47559 + This option enforces the maximum number of times a user can attempt
47560 + to authorize themselves with the grsecurity RBAC system before being
47561 + denied the ability to attempt authorization again for a specified time.
47562 + The lower the number, the harder it will be to brute-force a password.
47563 +
47564 +config GRKERNSEC_ACL_TIMEOUT
47565 + int "Time to wait after max password tries, in seconds"
47566 + default 30
47567 + help
47568 + This option specifies the time the user must wait after attempting to
47569 + authorize to the RBAC system with the maximum number of invalid
47570 + passwords. The higher the number, the harder it will be to brute-force
47571 + a password.
47572 +
47573 +endmenu
47574 +menu "Filesystem Protections"
47575 +depends on GRKERNSEC
47576 +
47577 +config GRKERNSEC_PROC
47578 + bool "Proc restrictions"
47579 + help
47580 + If you say Y here, the permissions of the /proc filesystem
47581 + will be altered to enhance system security and privacy. You MUST
47582 + choose either a user only restriction or a user and group restriction.
47583 + Depending upon the option you choose, you can either restrict users to
47584 + see only the processes they themselves run, or choose a group that can
47585 + view all processes and files normally restricted to root if you choose
47586 + the "restrict to user only" option. NOTE: If you're running identd as
47587 + a non-root user, you will have to run it as the group you specify here.
47588 +
47589 +config GRKERNSEC_PROC_USER
47590 + bool "Restrict /proc to user only"
47591 + depends on GRKERNSEC_PROC
47592 + help
47593 + If you say Y here, non-root users will only be able to view their own
47594 + processes, and restricts them from viewing network-related information,
47595 + and viewing kernel symbol and module information.
47596 +
47597 +config GRKERNSEC_PROC_USERGROUP
47598 + bool "Allow special group"
47599 + depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
47600 + help
47601 + If you say Y here, you will be able to select a group that will be
47602 + able to view all processes and network-related information. If you've
47603 + enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
47604 + remain hidden. This option is useful if you want to run identd as
47605 + a non-root user.
47606 +
47607 +config GRKERNSEC_PROC_GID
47608 + int "GID for special group"
47609 + depends on GRKERNSEC_PROC_USERGROUP
47610 + default 1001
47611 +
47612 +config GRKERNSEC_PROC_ADD
47613 + bool "Additional restrictions"
47614 + depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
47615 + help
47616 + If you say Y here, additional restrictions will be placed on
47617 + /proc that keep normal users from viewing device information and
47618 + slabinfo information that could be useful for exploits.
47619 +
47620 +config GRKERNSEC_LINK
47621 + bool "Linking restrictions"
47622 + help
47623 + If you say Y here, /tmp race exploits will be prevented, since users
47624 + will no longer be able to follow symlinks owned by other users in
47625 + world-writable +t directories (e.g. /tmp), unless the owner of the
47626 + symlink is the owner of the directory. users will also not be
47627 + able to hardlink to files they do not own. If the sysctl option is
47628 + enabled, a sysctl option with name "linking_restrictions" is created.
47629 +
47630 +config GRKERNSEC_FIFO
47631 + bool "FIFO restrictions"
47632 + help
47633 + If you say Y here, users will not be able to write to FIFOs they don't
47634 + own in world-writable +t directories (e.g. /tmp), unless the owner of
47635 + the FIFO is the same owner of the directory it's held in. If the sysctl
47636 + option is enabled, a sysctl option with name "fifo_restrictions" is
47637 + created.
47638 +
47639 +config GRKERNSEC_SYSFS_RESTRICT
47640 + bool "Sysfs/debugfs restriction"
47641 + depends on SYSFS
47642 + help
47643 + If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
47644 + any filesystem normally mounted under it (e.g. debugfs) will only
47645 + be accessible by root. These filesystems generally provide access
47646 + to hardware and debug information that isn't appropriate for unprivileged
47647 + users of the system. Sysfs and debugfs have also become a large source
47648 + of new vulnerabilities, ranging from infoleaks to local compromise.
47649 + There has been very little oversight with an eye toward security involved
47650 + in adding new exporters of information to these filesystems, so their
47651 + use is discouraged.
47652 + This option is equivalent to a chmod 0700 of the mount paths.
47653 +
47654 +config GRKERNSEC_ROFS
47655 + bool "Runtime read-only mount protection"
47656 + help
47657 + If you say Y here, a sysctl option with name "romount_protect" will
47658 + be created. By setting this option to 1 at runtime, filesystems
47659 + will be protected in the following ways:
47660 + * No new writable mounts will be allowed
47661 + * Existing read-only mounts won't be able to be remounted read/write
47662 + * Write operations will be denied on all block devices
47663 + This option acts independently of grsec_lock: once it is set to 1,
47664 + it cannot be turned off. Therefore, please be mindful of the resulting
47665 + behavior if this option is enabled in an init script on a read-only
47666 + filesystem. This feature is mainly intended for secure embedded systems.
47667 +
47668 +config GRKERNSEC_CHROOT
47669 + bool "Chroot jail restrictions"
47670 + help
47671 + If you say Y here, you will be able to choose several options that will
47672 + make breaking out of a chrooted jail much more difficult. If you
47673 + encounter no software incompatibilities with the following options, it
47674 + is recommended that you enable each one.
47675 +
47676 +config GRKERNSEC_CHROOT_MOUNT
47677 + bool "Deny mounts"
47678 + depends on GRKERNSEC_CHROOT
47679 + help
47680 + If you say Y here, processes inside a chroot will not be able to
47681 + mount or remount filesystems. If the sysctl option is enabled, a
47682 + sysctl option with name "chroot_deny_mount" is created.
47683 +
47684 +config GRKERNSEC_CHROOT_DOUBLE
47685 + bool "Deny double-chroots"
47686 + depends on GRKERNSEC_CHROOT
47687 + help
47688 + If you say Y here, processes inside a chroot will not be able to chroot
47689 + again outside the chroot. This is a widely used method of breaking
47690 + out of a chroot jail and should not be allowed. If the sysctl
47691 + option is enabled, a sysctl option with name
47692 + "chroot_deny_chroot" is created.
47693 +
47694 +config GRKERNSEC_CHROOT_PIVOT
47695 + bool "Deny pivot_root in chroot"
47696 + depends on GRKERNSEC_CHROOT
47697 + help
47698 + If you say Y here, processes inside a chroot will not be able to use
47699 + a function called pivot_root() that was introduced in Linux 2.3.41. It
47700 + works similar to chroot in that it changes the root filesystem. This
47701 + function could be misused in a chrooted process to attempt to break out
47702 + of the chroot, and therefore should not be allowed. If the sysctl
47703 + option is enabled, a sysctl option with name "chroot_deny_pivot" is
47704 + created.
47705 +
47706 +config GRKERNSEC_CHROOT_CHDIR
47707 + bool "Enforce chdir(\"/\") on all chroots"
47708 + depends on GRKERNSEC_CHROOT
47709 + help
47710 + If you say Y here, the current working directory of all newly-chrooted
47711 + applications will be set to the the root directory of the chroot.
47712 + The man page on chroot(2) states:
47713 + Note that this call does not change the current working
47714 + directory, so that `.' can be outside the tree rooted at
47715 + `/'. In particular, the super-user can escape from a
47716 + `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
47717 +
47718 + It is recommended that you say Y here, since it's not known to break
47719 + any software. If the sysctl option is enabled, a sysctl option with
47720 + name "chroot_enforce_chdir" is created.
47721 +
47722 +config GRKERNSEC_CHROOT_CHMOD
47723 + bool "Deny (f)chmod +s"
47724 + depends on GRKERNSEC_CHROOT
47725 + help
47726 + If you say Y here, processes inside a chroot will not be able to chmod
47727 + or fchmod files to make them have suid or sgid bits. This protects
47728 + against another published method of breaking a chroot. If the sysctl
47729 + option is enabled, a sysctl option with name "chroot_deny_chmod" is
47730 + created.
47731 +
47732 +config GRKERNSEC_CHROOT_FCHDIR
47733 + bool "Deny fchdir out of chroot"
47734 + depends on GRKERNSEC_CHROOT
47735 + help
47736 + If you say Y here, a well-known method of breaking chroots by fchdir'ing
47737 + to a file descriptor of the chrooting process that points to a directory
47738 + outside the filesystem will be stopped. If the sysctl option
47739 + is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
47740 +
47741 +config GRKERNSEC_CHROOT_MKNOD
47742 + bool "Deny mknod"
47743 + depends on GRKERNSEC_CHROOT
47744 + help
47745 + If you say Y here, processes inside a chroot will not be allowed to
47746 + mknod. The problem with using mknod inside a chroot is that it
47747 + would allow an attacker to create a device entry that is the same
47748 + as one on the physical root of your system, which could range from
47749 + anything from the console device to a device for your harddrive (which
47750 + they could then use to wipe the drive or steal data). It is recommended
47751 + that you say Y here, unless you run into software incompatibilities.
47752 + If the sysctl option is enabled, a sysctl option with name
47753 + "chroot_deny_mknod" is created.
47754 +
47755 +config GRKERNSEC_CHROOT_SHMAT
47756 + bool "Deny shmat() out of chroot"
47757 + depends on GRKERNSEC_CHROOT
47758 + help
47759 + If you say Y here, processes inside a chroot will not be able to attach
47760 + to shared memory segments that were created outside of the chroot jail.
47761 + It is recommended that you say Y here. If the sysctl option is enabled,
47762 + a sysctl option with name "chroot_deny_shmat" is created.
47763 +
47764 +config GRKERNSEC_CHROOT_UNIX
47765 + bool "Deny access to abstract AF_UNIX sockets out of chroot"
47766 + depends on GRKERNSEC_CHROOT
47767 + help
47768 + If you say Y here, processes inside a chroot will not be able to
47769 + connect to abstract (meaning not belonging to a filesystem) Unix
47770 + domain sockets that were bound outside of a chroot. It is recommended
47771 + that you say Y here. If the sysctl option is enabled, a sysctl option
47772 + with name "chroot_deny_unix" is created.
47773 +
47774 +config GRKERNSEC_CHROOT_FINDTASK
47775 + bool "Protect outside processes"
47776 + depends on GRKERNSEC_CHROOT
47777 + help
47778 + If you say Y here, processes inside a chroot will not be able to
47779 + kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
47780 + getsid, or view any process outside of the chroot. If the sysctl
47781 + option is enabled, a sysctl option with name "chroot_findtask" is
47782 + created.
47783 +
47784 +config GRKERNSEC_CHROOT_NICE
47785 + bool "Restrict priority changes"
47786 + depends on GRKERNSEC_CHROOT
47787 + help
47788 + If you say Y here, processes inside a chroot will not be able to raise
47789 + the priority of processes in the chroot, or alter the priority of
47790 + processes outside the chroot. This provides more security than simply
47791 + removing CAP_SYS_NICE from the process' capability set. If the
47792 + sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
47793 + is created.
47794 +
47795 +config GRKERNSEC_CHROOT_SYSCTL
47796 + bool "Deny sysctl writes"
47797 + depends on GRKERNSEC_CHROOT
47798 + help
47799 + If you say Y here, an attacker in a chroot will not be able to
47800 + write to sysctl entries, either by sysctl(2) or through a /proc
47801 + interface. It is strongly recommended that you say Y here. If the
47802 + sysctl option is enabled, a sysctl option with name
47803 + "chroot_deny_sysctl" is created.
47804 +
47805 +config GRKERNSEC_CHROOT_CAPS
47806 + bool "Capability restrictions"
47807 + depends on GRKERNSEC_CHROOT
47808 + help
47809 + If you say Y here, the capabilities on all processes within a
47810 + chroot jail will be lowered to stop module insertion, raw i/o,
47811 + system and net admin tasks, rebooting the system, modifying immutable
47812 + files, modifying IPC owned by another, and changing the system time.
47813 + This is left an option because it can break some apps. Disable this
47814 + if your chrooted apps are having problems performing those kinds of
47815 + tasks. If the sysctl option is enabled, a sysctl option with
47816 + name "chroot_caps" is created.
47817 +
47818 +endmenu
47819 +menu "Kernel Auditing"
47820 +depends on GRKERNSEC
47821 +
47822 +config GRKERNSEC_AUDIT_GROUP
47823 + bool "Single group for auditing"
47824 + help
47825 + If you say Y here, the exec, chdir, and (un)mount logging features
47826 + will only operate on a group you specify. This option is recommended
47827 + if you only want to watch certain users instead of having a large
47828 + amount of logs from the entire system. If the sysctl option is enabled,
47829 + a sysctl option with name "audit_group" is created.
47830 +
47831 +config GRKERNSEC_AUDIT_GID
47832 + int "GID for auditing"
47833 + depends on GRKERNSEC_AUDIT_GROUP
47834 + default 1007
47835 +
47836 +config GRKERNSEC_EXECLOG
47837 + bool "Exec logging"
47838 + help
47839 + If you say Y here, all execve() calls will be logged (since the
47840 + other exec*() calls are frontends to execve(), all execution
47841 + will be logged). Useful for shell-servers that like to keep track
47842 + of their users. If the sysctl option is enabled, a sysctl option with
47843 + name "exec_logging" is created.
47844 + WARNING: This option when enabled will produce a LOT of logs, especially
47845 + on an active system.
47846 +
47847 +config GRKERNSEC_RESLOG
47848 + bool "Resource logging"
47849 + help
47850 + If you say Y here, all attempts to overstep resource limits will
47851 + be logged with the resource name, the requested size, and the current
47852 + limit. It is highly recommended that you say Y here. If the sysctl
47853 + option is enabled, a sysctl option with name "resource_logging" is
47854 + created. If the RBAC system is enabled, the sysctl value is ignored.
47855 +
47856 +config GRKERNSEC_CHROOT_EXECLOG
47857 + bool "Log execs within chroot"
47858 + help
47859 + If you say Y here, all executions inside a chroot jail will be logged
47860 + to syslog. This can cause a large amount of logs if certain
47861 + applications (eg. djb's daemontools) are installed on the system, and
47862 + is therefore left as an option. If the sysctl option is enabled, a
47863 + sysctl option with name "chroot_execlog" is created.
47864 +
47865 +config GRKERNSEC_AUDIT_PTRACE
47866 + bool "Ptrace logging"
47867 + help
47868 + If you say Y here, all attempts to attach to a process via ptrace
47869 + will be logged. If the sysctl option is enabled, a sysctl option
47870 + with name "audit_ptrace" is created.
47871 +
47872 +config GRKERNSEC_AUDIT_CHDIR
47873 + bool "Chdir logging"
47874 + help
47875 + If you say Y here, all chdir() calls will be logged. If the sysctl
47876 + option is enabled, a sysctl option with name "audit_chdir" is created.
47877 +
47878 +config GRKERNSEC_AUDIT_MOUNT
47879 + bool "(Un)Mount logging"
47880 + help
47881 + If you say Y here, all mounts and unmounts will be logged. If the
47882 + sysctl option is enabled, a sysctl option with name "audit_mount" is
47883 + created.
47884 +
47885 +config GRKERNSEC_SIGNAL
47886 + bool "Signal logging"
47887 + help
47888 + If you say Y here, certain important signals will be logged, such as
47889 + SIGSEGV, which will as a result inform you of when a error in a program
47890 + occurred, which in some cases could mean a possible exploit attempt.
47891 + If the sysctl option is enabled, a sysctl option with name
47892 + "signal_logging" is created.
47893 +
47894 +config GRKERNSEC_FORKFAIL
47895 + bool "Fork failure logging"
47896 + help
47897 + If you say Y here, all failed fork() attempts will be logged.
47898 + This could suggest a fork bomb, or someone attempting to overstep
47899 + their process limit. If the sysctl option is enabled, a sysctl option
47900 + with name "forkfail_logging" is created.
47901 +
47902 +config GRKERNSEC_TIME
47903 + bool "Time change logging"
47904 + help
47905 + If you say Y here, any changes of the system clock will be logged.
47906 + If the sysctl option is enabled, a sysctl option with name
47907 + "timechange_logging" is created.
47908 +
47909 +config GRKERNSEC_PROC_IPADDR
47910 + bool "/proc/<pid>/ipaddr support"
47911 + help
47912 + If you say Y here, a new entry will be added to each /proc/<pid>
47913 + directory that contains the IP address of the person using the task.
47914 + The IP is carried across local TCP and AF_UNIX stream sockets.
47915 + This information can be useful for IDS/IPSes to perform remote response
47916 + to a local attack. The entry is readable by only the owner of the
47917 + process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
47918 + the RBAC system), and thus does not create privacy concerns.
47919 +
47920 +config GRKERNSEC_RWXMAP_LOG
47921 + bool 'Denied RWX mmap/mprotect logging'
47922 + depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
47923 + help
47924 + If you say Y here, calls to mmap() and mprotect() with explicit
47925 + usage of PROT_WRITE and PROT_EXEC together will be logged when
47926 + denied by the PAX_MPROTECT feature. If the sysctl option is
47927 + enabled, a sysctl option with name "rwxmap_logging" is created.
47928 +
47929 +config GRKERNSEC_AUDIT_TEXTREL
47930 + bool 'ELF text relocations logging (READ HELP)'
47931 + depends on PAX_MPROTECT
47932 + help
47933 + If you say Y here, text relocations will be logged with the filename
47934 + of the offending library or binary. The purpose of the feature is
47935 + to help Linux distribution developers get rid of libraries and
47936 + binaries that need text relocations which hinder the future progress
47937 + of PaX. Only Linux distribution developers should say Y here, and
47938 + never on a production machine, as this option creates an information
47939 + leak that could aid an attacker in defeating the randomization of
47940 + a single memory region. If the sysctl option is enabled, a sysctl
47941 + option with name "audit_textrel" is created.
47942 +
47943 +endmenu
47944 +
47945 +menu "Executable Protections"
47946 +depends on GRKERNSEC
47947 +
47948 +config GRKERNSEC_DMESG
47949 + bool "Dmesg(8) restriction"
47950 + help
47951 + If you say Y here, non-root users will not be able to use dmesg(8)
47952 + to view up to the last 4kb of messages in the kernel's log buffer.
47953 + The kernel's log buffer often contains kernel addresses and other
47954 + identifying information useful to an attacker in fingerprinting a
47955 + system for a targeted exploit.
47956 + If the sysctl option is enabled, a sysctl option with name "dmesg" is
47957 + created.
47958 +
47959 +config GRKERNSEC_HARDEN_PTRACE
47960 + bool "Deter ptrace-based process snooping"
47961 + help
47962 + If you say Y here, TTY sniffers and other malicious monitoring
47963 + programs implemented through ptrace will be defeated. If you
47964 + have been using the RBAC system, this option has already been
47965 + enabled for several years for all users, with the ability to make
47966 + fine-grained exceptions.
47967 +
47968 + This option only affects the ability of non-root users to ptrace
47969 + processes that are not a descendent of the ptracing process.
47970 + This means that strace ./binary and gdb ./binary will still work,
47971 + but attaching to arbitrary processes will not. If the sysctl
47972 + option is enabled, a sysctl option with name "harden_ptrace" is
47973 + created.
47974 +
47975 +config GRKERNSEC_PTRACE_READEXEC
47976 + bool "Require read access to ptrace sensitive binaries"
47977 + help
47978 + If you say Y here, unprivileged users will not be able to ptrace unreadable
47979 + binaries. This option is useful in environments that
47980 + remove the read bits (e.g. file mode 4711) from suid binaries to
47981 + prevent infoleaking of their contents. This option adds
47982 + consistency to the use of that file mode, as the binary could normally
47983 + be read out when run without privileges while ptracing.
47984 +
47985 + If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
47986 + is created.
47987 +
47988 +config GRKERNSEC_SETXID
47989 + bool "Enforce consistent multithreaded privileges"
47990 + help
47991 + If you say Y here, a change from a root uid to a non-root uid
47992 + in a multithreaded application will cause the resulting uids,
47993 + gids, supplementary groups, and capabilities in that thread
47994 + to be propagated to the other threads of the process. In most
47995 + cases this is unnecessary, as glibc will emulate this behavior
47996 + on behalf of the application. Other libcs do not act in the
47997 + same way, allowing the other threads of the process to continue
47998 + running with root privileges. If the sysctl option is enabled,
47999 + a sysctl option with name "consistent_setxid" is created.
48000 +
48001 +config GRKERNSEC_TPE
48002 + bool "Trusted Path Execution (TPE)"
48003 + help
48004 + If you say Y here, you will be able to choose a gid to add to the
48005 + supplementary groups of users you want to mark as "untrusted."
48006 + These users will not be able to execute any files that are not in
48007 + root-owned directories writable only by root. If the sysctl option
48008 + is enabled, a sysctl option with name "tpe" is created.
48009 +
48010 +config GRKERNSEC_TPE_ALL
48011 + bool "Partially restrict all non-root users"
48012 + depends on GRKERNSEC_TPE
48013 + help
48014 + If you say Y here, all non-root users will be covered under
48015 + a weaker TPE restriction. This is separate from, and in addition to,
48016 + the main TPE options that you have selected elsewhere. Thus, if a
48017 + "trusted" GID is chosen, this restriction applies to even that GID.
48018 + Under this restriction, all non-root users will only be allowed to
48019 + execute files in directories they own that are not group or
48020 + world-writable, or in directories owned by root and writable only by
48021 + root. If the sysctl option is enabled, a sysctl option with name
48022 + "tpe_restrict_all" is created.
48023 +
48024 +config GRKERNSEC_TPE_INVERT
48025 + bool "Invert GID option"
48026 + depends on GRKERNSEC_TPE
48027 + help
48028 + If you say Y here, the group you specify in the TPE configuration will
48029 + decide what group TPE restrictions will be *disabled* for. This
48030 + option is useful if you want TPE restrictions to be applied to most
48031 + users on the system. If the sysctl option is enabled, a sysctl option
48032 + with name "tpe_invert" is created. Unlike other sysctl options, this
48033 + entry will default to on for backward-compatibility.
48034 +
48035 +config GRKERNSEC_TPE_GID
48036 + int "GID for untrusted users"
48037 + depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
48038 + default 1005
48039 + help
48040 + Setting this GID determines what group TPE restrictions will be
48041 + *enabled* for. If the sysctl option is enabled, a sysctl option
48042 + with name "tpe_gid" is created.
48043 +
48044 +config GRKERNSEC_TPE_GID
48045 + int "GID for trusted users"
48046 + depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
48047 + default 1005
48048 + help
48049 + Setting this GID determines what group TPE restrictions will be
48050 + *disabled* for. If the sysctl option is enabled, a sysctl option
48051 + with name "tpe_gid" is created.
48052 +
48053 +endmenu
48054 +menu "Network Protections"
48055 +depends on GRKERNSEC
48056 +
48057 +config GRKERNSEC_RANDNET
48058 + bool "Larger entropy pools"
48059 + help
48060 + If you say Y here, the entropy pools used for many features of Linux
48061 + and grsecurity will be doubled in size. Since several grsecurity
48062 + features use additional randomness, it is recommended that you say Y
48063 + here. Saying Y here has a similar effect as modifying
48064 + /proc/sys/kernel/random/poolsize.
48065 +
48066 +config GRKERNSEC_BLACKHOLE
48067 + bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
48068 + depends on NET
48069 + help
48070 + If you say Y here, neither TCP resets nor ICMP
48071 + destination-unreachable packets will be sent in response to packets
48072 + sent to ports for which no associated listening process exists.
48073 + This feature supports both IPV4 and IPV6 and exempts the
48074 + loopback interface from blackholing. Enabling this feature
48075 + makes a host more resilient to DoS attacks and reduces network
48076 + visibility against scanners.
48077 +
48078 + The blackhole feature as-implemented is equivalent to the FreeBSD
48079 + blackhole feature, as it prevents RST responses to all packets, not
48080 + just SYNs. Under most application behavior this causes no
48081 + problems, but applications (like haproxy) may not close certain
48082 + connections in a way that cleanly terminates them on the remote
48083 + end, leaving the remote host in LAST_ACK state. Because of this
48084 + side-effect and to prevent intentional LAST_ACK DoSes, this
48085 + feature also adds automatic mitigation against such attacks.
48086 + The mitigation drastically reduces the amount of time a socket
48087 + can spend in LAST_ACK state. If you're using haproxy and not
48088 + all servers it connects to have this option enabled, consider
48089 + disabling this feature on the haproxy host.
48090 +
48091 + If the sysctl option is enabled, two sysctl options with names
48092 + "ip_blackhole" and "lastack_retries" will be created.
48093 + While "ip_blackhole" takes the standard zero/non-zero on/off
48094 + toggle, "lastack_retries" uses the same kinds of values as
48095 + "tcp_retries1" and "tcp_retries2". The default value of 4
48096 + prevents a socket from lasting more than 45 seconds in LAST_ACK
48097 + state.
48098 +
48099 +config GRKERNSEC_SOCKET
48100 + bool "Socket restrictions"
48101 + depends on NET
48102 + help
48103 + If you say Y here, you will be able to choose from several options.
48104 + If you assign a GID on your system and add it to the supplementary
48105 + groups of users you want to restrict socket access to, this patch
48106 + will perform up to three things, based on the option(s) you choose.
48107 +
48108 +config GRKERNSEC_SOCKET_ALL
48109 + bool "Deny any sockets to group"
48110 + depends on GRKERNSEC_SOCKET
48111 + help
48112 + If you say Y here, you will be able to choose a GID of whose users will
48113 + be unable to connect to other hosts from your machine or run server
48114 + applications from your machine. If the sysctl option is enabled, a
48115 + sysctl option with name "socket_all" is created.
48116 +
48117 +config GRKERNSEC_SOCKET_ALL_GID
48118 + int "GID to deny all sockets for"
48119 + depends on GRKERNSEC_SOCKET_ALL
48120 + default 1004
48121 + help
48122 + Here you can choose the GID to disable socket access for. Remember to
48123 + add the users you want socket access disabled for to the GID
48124 + specified here. If the sysctl option is enabled, a sysctl option
48125 + with name "socket_all_gid" is created.
48126 +
48127 +config GRKERNSEC_SOCKET_CLIENT
48128 + bool "Deny client sockets to group"
48129 + depends on GRKERNSEC_SOCKET
48130 + help
48131 + If you say Y here, you will be able to choose a GID of whose users will
48132 + be unable to connect to other hosts from your machine, but will be
48133 + able to run servers. If this option is enabled, all users in the group
48134 + you specify will have to use passive mode when initiating ftp transfers
48135 + from the shell on your machine. If the sysctl option is enabled, a
48136 + sysctl option with name "socket_client" is created.
48137 +
48138 +config GRKERNSEC_SOCKET_CLIENT_GID
48139 + int "GID to deny client sockets for"
48140 + depends on GRKERNSEC_SOCKET_CLIENT
48141 + default 1003
48142 + help
48143 + Here you can choose the GID to disable client socket access for.
48144 + Remember to add the users you want client socket access disabled for to
48145 + the GID specified here. If the sysctl option is enabled, a sysctl
48146 + option with name "socket_client_gid" is created.
48147 +
48148 +config GRKERNSEC_SOCKET_SERVER
48149 + bool "Deny server sockets to group"
48150 + depends on GRKERNSEC_SOCKET
48151 + help
48152 + If you say Y here, you will be able to choose a GID of whose users will
48153 + be unable to run server applications from your machine. If the sysctl
48154 + option is enabled, a sysctl option with name "socket_server" is created.
48155 +
48156 +config GRKERNSEC_SOCKET_SERVER_GID
48157 + int "GID to deny server sockets for"
48158 + depends on GRKERNSEC_SOCKET_SERVER
48159 + default 1002
48160 + help
48161 + Here you can choose the GID to disable server socket access for.
48162 + Remember to add the users you want server socket access disabled for to
48163 + the GID specified here. If the sysctl option is enabled, a sysctl
48164 + option with name "socket_server_gid" is created.
48165 +
48166 +endmenu
48167 +menu "Sysctl support"
48168 +depends on GRKERNSEC && SYSCTL
48169 +
48170 +config GRKERNSEC_SYSCTL
48171 + bool "Sysctl support"
48172 + help
48173 + If you say Y here, you will be able to change the options that
48174 + grsecurity runs with at bootup, without having to recompile your
48175 + kernel. You can echo values to files in /proc/sys/kernel/grsecurity
48176 + to enable (1) or disable (0) various features. All the sysctl entries
48177 + are mutable until the "grsec_lock" entry is set to a non-zero value.
48178 + All features enabled in the kernel configuration are disabled at boot
48179 + if you do not say Y to the "Turn on features by default" option.
48180 + All options should be set at startup, and the grsec_lock entry should
48181 + be set to a non-zero value after all the options are set.
48182 + *THIS IS EXTREMELY IMPORTANT*
48183 +
48184 +config GRKERNSEC_SYSCTL_DISTRO
48185 + bool "Extra sysctl support for distro makers (READ HELP)"
48186 + depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
48187 + help
48188 + If you say Y here, additional sysctl options will be created
48189 + for features that affect processes running as root. Therefore,
48190 + it is critical when using this option that the grsec_lock entry be
48191 + enabled after boot. Only distros with prebuilt kernel packages
48192 + with this option enabled that can ensure grsec_lock is enabled
48193 + after boot should use this option.
48194 + *Failure to set grsec_lock after boot makes all grsec features
48195 + this option covers useless*
48196 +
48197 + Currently this option creates the following sysctl entries:
48198 + "Disable Privileged I/O": "disable_priv_io"
48199 +
48200 +config GRKERNSEC_SYSCTL_ON
48201 + bool "Turn on features by default"
48202 + depends on GRKERNSEC_SYSCTL
48203 + help
48204 + If you say Y here, instead of having all features enabled in the
48205 + kernel configuration disabled at boot time, the features will be
48206 + enabled at boot time. It is recommended you say Y here unless
48207 + there is some reason you would want all sysctl-tunable features to
48208 + be disabled by default. As mentioned elsewhere, it is important
48209 + to enable the grsec_lock entry once you have finished modifying
48210 + the sysctl entries.
48211 +
48212 +endmenu
48213 +menu "Logging Options"
48214 +depends on GRKERNSEC
48215 +
48216 +config GRKERNSEC_FLOODTIME
48217 + int "Seconds in between log messages (minimum)"
48218 + default 10
48219 + help
48220 + This option allows you to enforce the number of seconds between
48221 + grsecurity log messages. The default should be suitable for most
48222 + people, however, if you choose to change it, choose a value small enough
48223 + to allow informative logs to be produced, but large enough to
48224 + prevent flooding.
48225 +
48226 +config GRKERNSEC_FLOODBURST
48227 + int "Number of messages in a burst (maximum)"
48228 + default 6
48229 + help
48230 + This option allows you to choose the maximum number of messages allowed
48231 + within the flood time interval you chose in a separate option. The
48232 + default should be suitable for most people, however if you find that
48233 + many of your logs are being interpreted as flooding, you may want to
48234 + raise this value.
48235 +
48236 +endmenu
48237 +
48238 +endmenu
48239 diff --git a/grsecurity/Makefile b/grsecurity/Makefile
48240 new file mode 100644
48241 index 0000000..be9ae3a
48242 --- /dev/null
48243 +++ b/grsecurity/Makefile
48244 @@ -0,0 +1,36 @@
48245 +# grsecurity's ACL system was originally written in 2001 by Michael Dalton
48246 +# during 2001-2009 it has been completely redesigned by Brad Spengler
48247 +# into an RBAC system
48248 +#
48249 +# All code in this directory and various hooks inserted throughout the kernel
48250 +# are copyright Brad Spengler - Open Source Security, Inc., and released
48251 +# under the GPL v2 or higher
48252 +
48253 +obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
48254 + grsec_mount.o grsec_sig.o grsec_sysctl.o \
48255 + grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
48256 +
48257 +obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
48258 + gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
48259 + gracl_learn.o grsec_log.o
48260 +obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
48261 +
48262 +ifdef CONFIG_NET
48263 +obj-y += grsec_sock.o
48264 +obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
48265 +endif
48266 +
48267 +ifndef CONFIG_GRKERNSEC
48268 +obj-y += grsec_disabled.o
48269 +endif
48270 +
48271 +ifdef CONFIG_GRKERNSEC_HIDESYM
48272 +extra-y := grsec_hidesym.o
48273 +$(obj)/grsec_hidesym.o:
48274 + @-chmod -f 500 /boot
48275 + @-chmod -f 500 /lib/modules
48276 + @-chmod -f 500 /lib64/modules
48277 + @-chmod -f 500 /lib32/modules
48278 + @-chmod -f 700 .
48279 + @echo ' grsec: protected kernel image paths'
48280 +endif
48281 diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
48282 new file mode 100644
48283 index 0000000..d3b423d
48284 --- /dev/null
48285 +++ b/grsecurity/gracl.c
48286 @@ -0,0 +1,4155 @@
48287 +#include <linux/kernel.h>
48288 +#include <linux/module.h>
48289 +#include <linux/sched.h>
48290 +#include <linux/mm.h>
48291 +#include <linux/file.h>
48292 +#include <linux/fs.h>
48293 +#include <linux/namei.h>
48294 +#include <linux/mount.h>
48295 +#include <linux/tty.h>
48296 +#include <linux/proc_fs.h>
48297 +#include <linux/lglock.h>
48298 +#include <linux/slab.h>
48299 +#include <linux/vmalloc.h>
48300 +#include <linux/types.h>
48301 +#include <linux/sysctl.h>
48302 +#include <linux/netdevice.h>
48303 +#include <linux/ptrace.h>
48304 +#include <linux/gracl.h>
48305 +#include <linux/gralloc.h>
48306 +#include <linux/security.h>
48307 +#include <linux/grinternal.h>
48308 +#include <linux/pid_namespace.h>
48309 +#include <linux/fdtable.h>
48310 +#include <linux/percpu.h>
48311 +
48312 +#include <asm/uaccess.h>
48313 +#include <asm/errno.h>
48314 +#include <asm/mman.h>
48315 +
48316 +static struct acl_role_db acl_role_set;
48317 +static struct name_db name_set;
48318 +static struct inodev_db inodev_set;
48319 +
48320 +/* for keeping track of userspace pointers used for subjects, so we
48321 + can share references in the kernel as well
48322 +*/
48323 +
48324 +static struct path real_root;
48325 +
48326 +static struct acl_subj_map_db subj_map_set;
48327 +
48328 +static struct acl_role_label *default_role;
48329 +
48330 +static struct acl_role_label *role_list;
48331 +
48332 +static u16 acl_sp_role_value;
48333 +
48334 +extern char *gr_shared_page[4];
48335 +static DEFINE_MUTEX(gr_dev_mutex);
48336 +DEFINE_RWLOCK(gr_inode_lock);
48337 +
48338 +struct gr_arg *gr_usermode;
48339 +
48340 +static unsigned int gr_status __read_only = GR_STATUS_INIT;
48341 +
48342 +extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
48343 +extern void gr_clear_learn_entries(void);
48344 +
48345 +#ifdef CONFIG_GRKERNSEC_RESLOG
48346 +extern void gr_log_resource(const struct task_struct *task,
48347 + const int res, const unsigned long wanted, const int gt);
48348 +#endif
48349 +
48350 +unsigned char *gr_system_salt;
48351 +unsigned char *gr_system_sum;
48352 +
48353 +static struct sprole_pw **acl_special_roles = NULL;
48354 +static __u16 num_sprole_pws = 0;
48355 +
48356 +static struct acl_role_label *kernel_role = NULL;
48357 +
48358 +static unsigned int gr_auth_attempts = 0;
48359 +static unsigned long gr_auth_expires = 0UL;
48360 +
48361 +#ifdef CONFIG_NET
48362 +extern struct vfsmount *sock_mnt;
48363 +#endif
48364 +
48365 +extern struct vfsmount *pipe_mnt;
48366 +extern struct vfsmount *shm_mnt;
48367 +#ifdef CONFIG_HUGETLBFS
48368 +extern struct vfsmount *hugetlbfs_vfsmount;
48369 +#endif
48370 +
48371 +static struct acl_object_label *fakefs_obj_rw;
48372 +static struct acl_object_label *fakefs_obj_rwx;
48373 +
48374 +extern int gr_init_uidset(void);
48375 +extern void gr_free_uidset(void);
48376 +extern void gr_remove_uid(uid_t uid);
48377 +extern int gr_find_uid(uid_t uid);
48378 +
48379 +DECLARE_BRLOCK(vfsmount_lock);
48380 +
48381 +__inline__ int
48382 +gr_acl_is_enabled(void)
48383 +{
48384 + return (gr_status & GR_READY);
48385 +}
48386 +
48387 +#ifdef CONFIG_BTRFS_FS
48388 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
48389 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
48390 +#endif
48391 +
48392 +static inline dev_t __get_dev(const struct dentry *dentry)
48393 +{
48394 +#ifdef CONFIG_BTRFS_FS
48395 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
48396 + return get_btrfs_dev_from_inode(dentry->d_inode);
48397 + else
48398 +#endif
48399 + return dentry->d_inode->i_sb->s_dev;
48400 +}
48401 +
48402 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
48403 +{
48404 + return __get_dev(dentry);
48405 +}
48406 +
48407 +static char gr_task_roletype_to_char(struct task_struct *task)
48408 +{
48409 + switch (task->role->roletype &
48410 + (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
48411 + GR_ROLE_SPECIAL)) {
48412 + case GR_ROLE_DEFAULT:
48413 + return 'D';
48414 + case GR_ROLE_USER:
48415 + return 'U';
48416 + case GR_ROLE_GROUP:
48417 + return 'G';
48418 + case GR_ROLE_SPECIAL:
48419 + return 'S';
48420 + }
48421 +
48422 + return 'X';
48423 +}
48424 +
48425 +char gr_roletype_to_char(void)
48426 +{
48427 + return gr_task_roletype_to_char(current);
48428 +}
48429 +
48430 +__inline__ int
48431 +gr_acl_tpe_check(void)
48432 +{
48433 + if (unlikely(!(gr_status & GR_READY)))
48434 + return 0;
48435 + if (current->role->roletype & GR_ROLE_TPE)
48436 + return 1;
48437 + else
48438 + return 0;
48439 +}
48440 +
48441 +int
48442 +gr_handle_rawio(const struct inode *inode)
48443 +{
48444 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
48445 + if (inode && S_ISBLK(inode->i_mode) &&
48446 + grsec_enable_chroot_caps && proc_is_chrooted(current) &&
48447 + !capable(CAP_SYS_RAWIO))
48448 + return 1;
48449 +#endif
48450 + return 0;
48451 +}
48452 +
48453 +static int
48454 +gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
48455 +{
48456 + if (likely(lena != lenb))
48457 + return 0;
48458 +
48459 + return !memcmp(a, b, lena);
48460 +}
48461 +
48462 +static int prepend(char **buffer, int *buflen, const char *str, int namelen)
48463 +{
48464 + *buflen -= namelen;
48465 + if (*buflen < 0)
48466 + return -ENAMETOOLONG;
48467 + *buffer -= namelen;
48468 + memcpy(*buffer, str, namelen);
48469 + return 0;
48470 +}
48471 +
48472 +static int prepend_name(char **buffer, int *buflen, struct qstr *name)
48473 +{
48474 + return prepend(buffer, buflen, name->name, name->len);
48475 +}
48476 +
48477 +static int prepend_path(const struct path *path, struct path *root,
48478 + char **buffer, int *buflen)
48479 +{
48480 + struct dentry *dentry = path->dentry;
48481 + struct vfsmount *vfsmnt = path->mnt;
48482 + bool slash = false;
48483 + int error = 0;
48484 +
48485 + while (dentry != root->dentry || vfsmnt != root->mnt) {
48486 + struct dentry * parent;
48487 +
48488 + if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
48489 + /* Global root? */
48490 + if (vfsmnt->mnt_parent == vfsmnt) {
48491 + goto out;
48492 + }
48493 + dentry = vfsmnt->mnt_mountpoint;
48494 + vfsmnt = vfsmnt->mnt_parent;
48495 + continue;
48496 + }
48497 + parent = dentry->d_parent;
48498 + prefetch(parent);
48499 + spin_lock(&dentry->d_lock);
48500 + error = prepend_name(buffer, buflen, &dentry->d_name);
48501 + spin_unlock(&dentry->d_lock);
48502 + if (!error)
48503 + error = prepend(buffer, buflen, "/", 1);
48504 + if (error)
48505 + break;
48506 +
48507 + slash = true;
48508 + dentry = parent;
48509 + }
48510 +
48511 +out:
48512 + if (!error && !slash)
48513 + error = prepend(buffer, buflen, "/", 1);
48514 +
48515 + return error;
48516 +}
48517 +
48518 +/* this must be called with vfsmount_lock and rename_lock held */
48519 +
48520 +static char *__our_d_path(const struct path *path, struct path *root,
48521 + char *buf, int buflen)
48522 +{
48523 + char *res = buf + buflen;
48524 + int error;
48525 +
48526 + prepend(&res, &buflen, "\0", 1);
48527 + error = prepend_path(path, root, &res, &buflen);
48528 + if (error)
48529 + return ERR_PTR(error);
48530 +
48531 + return res;
48532 +}
48533 +
48534 +static char *
48535 +gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
48536 +{
48537 + char *retval;
48538 +
48539 + retval = __our_d_path(path, root, buf, buflen);
48540 + if (unlikely(IS_ERR(retval)))
48541 + retval = strcpy(buf, "<path too long>");
48542 + else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
48543 + retval[1] = '\0';
48544 +
48545 + return retval;
48546 +}
48547 +
48548 +static char *
48549 +__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
48550 + char *buf, int buflen)
48551 +{
48552 + struct path path;
48553 + char *res;
48554 +
48555 + path.dentry = (struct dentry *)dentry;
48556 + path.mnt = (struct vfsmount *)vfsmnt;
48557 +
48558 + /* we can use real_root.dentry, real_root.mnt, because this is only called
48559 + by the RBAC system */
48560 + res = gen_full_path(&path, &real_root, buf, buflen);
48561 +
48562 + return res;
48563 +}
48564 +
48565 +static char *
48566 +d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
48567 + char *buf, int buflen)
48568 +{
48569 + char *res;
48570 + struct path path;
48571 + struct path root;
48572 + struct task_struct *reaper = &init_task;
48573 +
48574 + path.dentry = (struct dentry *)dentry;
48575 + path.mnt = (struct vfsmount *)vfsmnt;
48576 +
48577 + /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
48578 + get_fs_root(reaper->fs, &root);
48579 +
48580 + write_seqlock(&rename_lock);
48581 + br_read_lock(vfsmount_lock);
48582 + res = gen_full_path(&path, &root, buf, buflen);
48583 + br_read_unlock(vfsmount_lock);
48584 + write_sequnlock(&rename_lock);
48585 +
48586 + path_put(&root);
48587 + return res;
48588 +}
48589 +
48590 +static char *
48591 +gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
48592 +{
48593 + char *ret;
48594 + write_seqlock(&rename_lock);
48595 + br_read_lock(vfsmount_lock);
48596 + ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
48597 + PAGE_SIZE);
48598 + br_read_unlock(vfsmount_lock);
48599 + write_sequnlock(&rename_lock);
48600 + return ret;
48601 +}
48602 +
48603 +static char *
48604 +gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
48605 +{
48606 + char *ret;
48607 + char *buf;
48608 + int buflen;
48609 +
48610 + write_seqlock(&rename_lock);
48611 + br_read_lock(vfsmount_lock);
48612 + buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
48613 + ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
48614 + buflen = (int)(ret - buf);
48615 + if (buflen >= 5)
48616 + prepend(&ret, &buflen, "/proc", 5);
48617 + else
48618 + ret = strcpy(buf, "<path too long>");
48619 + br_read_unlock(vfsmount_lock);
48620 + write_sequnlock(&rename_lock);
48621 + return ret;
48622 +}
48623 +
48624 +char *
48625 +gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
48626 +{
48627 + return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
48628 + PAGE_SIZE);
48629 +}
48630 +
48631 +char *
48632 +gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
48633 +{
48634 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
48635 + PAGE_SIZE);
48636 +}
48637 +
48638 +char *
48639 +gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
48640 +{
48641 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
48642 + PAGE_SIZE);
48643 +}
48644 +
48645 +char *
48646 +gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
48647 +{
48648 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
48649 + PAGE_SIZE);
48650 +}
48651 +
48652 +char *
48653 +gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
48654 +{
48655 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
48656 + PAGE_SIZE);
48657 +}
48658 +
48659 +__inline__ __u32
48660 +to_gr_audit(const __u32 reqmode)
48661 +{
48662 + /* masks off auditable permission flags, then shifts them to create
48663 + auditing flags, and adds the special case of append auditing if
48664 + we're requesting write */
48665 + return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
48666 +}
48667 +
48668 +struct acl_subject_label *
48669 +lookup_subject_map(const struct acl_subject_label *userp)
48670 +{
48671 + unsigned int index = shash(userp, subj_map_set.s_size);
48672 + struct subject_map *match;
48673 +
48674 + match = subj_map_set.s_hash[index];
48675 +
48676 + while (match && match->user != userp)
48677 + match = match->next;
48678 +
48679 + if (match != NULL)
48680 + return match->kernel;
48681 + else
48682 + return NULL;
48683 +}
48684 +
48685 +static void
48686 +insert_subj_map_entry(struct subject_map *subjmap)
48687 +{
48688 + unsigned int index = shash(subjmap->user, subj_map_set.s_size);
48689 + struct subject_map **curr;
48690 +
48691 + subjmap->prev = NULL;
48692 +
48693 + curr = &subj_map_set.s_hash[index];
48694 + if (*curr != NULL)
48695 + (*curr)->prev = subjmap;
48696 +
48697 + subjmap->next = *curr;
48698 + *curr = subjmap;
48699 +
48700 + return;
48701 +}
48702 +
48703 +static struct acl_role_label *
48704 +lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
48705 + const gid_t gid)
48706 +{
48707 + unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
48708 + struct acl_role_label *match;
48709 + struct role_allowed_ip *ipp;
48710 + unsigned int x;
48711 + u32 curr_ip = task->signal->curr_ip;
48712 +
48713 + task->signal->saved_ip = curr_ip;
48714 +
48715 + match = acl_role_set.r_hash[index];
48716 +
48717 + while (match) {
48718 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
48719 + for (x = 0; x < match->domain_child_num; x++) {
48720 + if (match->domain_children[x] == uid)
48721 + goto found;
48722 + }
48723 + } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
48724 + break;
48725 + match = match->next;
48726 + }
48727 +found:
48728 + if (match == NULL) {
48729 + try_group:
48730 + index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
48731 + match = acl_role_set.r_hash[index];
48732 +
48733 + while (match) {
48734 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
48735 + for (x = 0; x < match->domain_child_num; x++) {
48736 + if (match->domain_children[x] == gid)
48737 + goto found2;
48738 + }
48739 + } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
48740 + break;
48741 + match = match->next;
48742 + }
48743 +found2:
48744 + if (match == NULL)
48745 + match = default_role;
48746 + if (match->allowed_ips == NULL)
48747 + return match;
48748 + else {
48749 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
48750 + if (likely
48751 + ((ntohl(curr_ip) & ipp->netmask) ==
48752 + (ntohl(ipp->addr) & ipp->netmask)))
48753 + return match;
48754 + }
48755 + match = default_role;
48756 + }
48757 + } else if (match->allowed_ips == NULL) {
48758 + return match;
48759 + } else {
48760 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
48761 + if (likely
48762 + ((ntohl(curr_ip) & ipp->netmask) ==
48763 + (ntohl(ipp->addr) & ipp->netmask)))
48764 + return match;
48765 + }
48766 + goto try_group;
48767 + }
48768 +
48769 + return match;
48770 +}
48771 +
48772 +struct acl_subject_label *
48773 +lookup_acl_subj_label(const ino_t ino, const dev_t dev,
48774 + const struct acl_role_label *role)
48775 +{
48776 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
48777 + struct acl_subject_label *match;
48778 +
48779 + match = role->subj_hash[index];
48780 +
48781 + while (match && (match->inode != ino || match->device != dev ||
48782 + (match->mode & GR_DELETED))) {
48783 + match = match->next;
48784 + }
48785 +
48786 + if (match && !(match->mode & GR_DELETED))
48787 + return match;
48788 + else
48789 + return NULL;
48790 +}
48791 +
48792 +struct acl_subject_label *
48793 +lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
48794 + const struct acl_role_label *role)
48795 +{
48796 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
48797 + struct acl_subject_label *match;
48798 +
48799 + match = role->subj_hash[index];
48800 +
48801 + while (match && (match->inode != ino || match->device != dev ||
48802 + !(match->mode & GR_DELETED))) {
48803 + match = match->next;
48804 + }
48805 +
48806 + if (match && (match->mode & GR_DELETED))
48807 + return match;
48808 + else
48809 + return NULL;
48810 +}
48811 +
48812 +static struct acl_object_label *
48813 +lookup_acl_obj_label(const ino_t ino, const dev_t dev,
48814 + const struct acl_subject_label *subj)
48815 +{
48816 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
48817 + struct acl_object_label *match;
48818 +
48819 + match = subj->obj_hash[index];
48820 +
48821 + while (match && (match->inode != ino || match->device != dev ||
48822 + (match->mode & GR_DELETED))) {
48823 + match = match->next;
48824 + }
48825 +
48826 + if (match && !(match->mode & GR_DELETED))
48827 + return match;
48828 + else
48829 + return NULL;
48830 +}
48831 +
48832 +static struct acl_object_label *
48833 +lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
48834 + const struct acl_subject_label *subj)
48835 +{
48836 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
48837 + struct acl_object_label *match;
48838 +
48839 + match = subj->obj_hash[index];
48840 +
48841 + while (match && (match->inode != ino || match->device != dev ||
48842 + !(match->mode & GR_DELETED))) {
48843 + match = match->next;
48844 + }
48845 +
48846 + if (match && (match->mode & GR_DELETED))
48847 + return match;
48848 +
48849 + match = subj->obj_hash[index];
48850 +
48851 + while (match && (match->inode != ino || match->device != dev ||
48852 + (match->mode & GR_DELETED))) {
48853 + match = match->next;
48854 + }
48855 +
48856 + if (match && !(match->mode & GR_DELETED))
48857 + return match;
48858 + else
48859 + return NULL;
48860 +}
48861 +
48862 +static struct name_entry *
48863 +lookup_name_entry(const char *name)
48864 +{
48865 + unsigned int len = strlen(name);
48866 + unsigned int key = full_name_hash(name, len);
48867 + unsigned int index = key % name_set.n_size;
48868 + struct name_entry *match;
48869 +
48870 + match = name_set.n_hash[index];
48871 +
48872 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
48873 + match = match->next;
48874 +
48875 + return match;
48876 +}
48877 +
48878 +static struct name_entry *
48879 +lookup_name_entry_create(const char *name)
48880 +{
48881 + unsigned int len = strlen(name);
48882 + unsigned int key = full_name_hash(name, len);
48883 + unsigned int index = key % name_set.n_size;
48884 + struct name_entry *match;
48885 +
48886 + match = name_set.n_hash[index];
48887 +
48888 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
48889 + !match->deleted))
48890 + match = match->next;
48891 +
48892 + if (match && match->deleted)
48893 + return match;
48894 +
48895 + match = name_set.n_hash[index];
48896 +
48897 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
48898 + match->deleted))
48899 + match = match->next;
48900 +
48901 + if (match && !match->deleted)
48902 + return match;
48903 + else
48904 + return NULL;
48905 +}
48906 +
48907 +static struct inodev_entry *
48908 +lookup_inodev_entry(const ino_t ino, const dev_t dev)
48909 +{
48910 + unsigned int index = fhash(ino, dev, inodev_set.i_size);
48911 + struct inodev_entry *match;
48912 +
48913 + match = inodev_set.i_hash[index];
48914 +
48915 + while (match && (match->nentry->inode != ino || match->nentry->device != dev))
48916 + match = match->next;
48917 +
48918 + return match;
48919 +}
48920 +
48921 +static void
48922 +insert_inodev_entry(struct inodev_entry *entry)
48923 +{
48924 + unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
48925 + inodev_set.i_size);
48926 + struct inodev_entry **curr;
48927 +
48928 + entry->prev = NULL;
48929 +
48930 + curr = &inodev_set.i_hash[index];
48931 + if (*curr != NULL)
48932 + (*curr)->prev = entry;
48933 +
48934 + entry->next = *curr;
48935 + *curr = entry;
48936 +
48937 + return;
48938 +}
48939 +
48940 +static void
48941 +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
48942 +{
48943 + unsigned int index =
48944 + rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
48945 + struct acl_role_label **curr;
48946 + struct acl_role_label *tmp;
48947 +
48948 + curr = &acl_role_set.r_hash[index];
48949 +
48950 + /* if role was already inserted due to domains and already has
48951 + a role in the same bucket as it attached, then we need to
48952 + combine these two buckets
48953 + */
48954 + if (role->next) {
48955 + tmp = role->next;
48956 + while (tmp->next)
48957 + tmp = tmp->next;
48958 + tmp->next = *curr;
48959 + } else
48960 + role->next = *curr;
48961 + *curr = role;
48962 +
48963 + return;
48964 +}
48965 +
48966 +static void
48967 +insert_acl_role_label(struct acl_role_label *role)
48968 +{
48969 + int i;
48970 +
48971 + if (role_list == NULL) {
48972 + role_list = role;
48973 + role->prev = NULL;
48974 + } else {
48975 + role->prev = role_list;
48976 + role_list = role;
48977 + }
48978 +
48979 + /* used for hash chains */
48980 + role->next = NULL;
48981 +
48982 + if (role->roletype & GR_ROLE_DOMAIN) {
48983 + for (i = 0; i < role->domain_child_num; i++)
48984 + __insert_acl_role_label(role, role->domain_children[i]);
48985 + } else
48986 + __insert_acl_role_label(role, role->uidgid);
48987 +}
48988 +
48989 +static int
48990 +insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
48991 +{
48992 + struct name_entry **curr, *nentry;
48993 + struct inodev_entry *ientry;
48994 + unsigned int len = strlen(name);
48995 + unsigned int key = full_name_hash(name, len);
48996 + unsigned int index = key % name_set.n_size;
48997 +
48998 + curr = &name_set.n_hash[index];
48999 +
49000 + while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
49001 + curr = &((*curr)->next);
49002 +
49003 + if (*curr != NULL)
49004 + return 1;
49005 +
49006 + nentry = acl_alloc(sizeof (struct name_entry));
49007 + if (nentry == NULL)
49008 + return 0;
49009 + ientry = acl_alloc(sizeof (struct inodev_entry));
49010 + if (ientry == NULL)
49011 + return 0;
49012 + ientry->nentry = nentry;
49013 +
49014 + nentry->key = key;
49015 + nentry->name = name;
49016 + nentry->inode = inode;
49017 + nentry->device = device;
49018 + nentry->len = len;
49019 + nentry->deleted = deleted;
49020 +
49021 + nentry->prev = NULL;
49022 + curr = &name_set.n_hash[index];
49023 + if (*curr != NULL)
49024 + (*curr)->prev = nentry;
49025 + nentry->next = *curr;
49026 + *curr = nentry;
49027 +
49028 + /* insert us into the table searchable by inode/dev */
49029 + insert_inodev_entry(ientry);
49030 +
49031 + return 1;
49032 +}
49033 +
49034 +static void
49035 +insert_acl_obj_label(struct acl_object_label *obj,
49036 + struct acl_subject_label *subj)
49037 +{
49038 + unsigned int index =
49039 + fhash(obj->inode, obj->device, subj->obj_hash_size);
49040 + struct acl_object_label **curr;
49041 +
49042 +
49043 + obj->prev = NULL;
49044 +
49045 + curr = &subj->obj_hash[index];
49046 + if (*curr != NULL)
49047 + (*curr)->prev = obj;
49048 +
49049 + obj->next = *curr;
49050 + *curr = obj;
49051 +
49052 + return;
49053 +}
49054 +
49055 +static void
49056 +insert_acl_subj_label(struct acl_subject_label *obj,
49057 + struct acl_role_label *role)
49058 +{
49059 + unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
49060 + struct acl_subject_label **curr;
49061 +
49062 + obj->prev = NULL;
49063 +
49064 + curr = &role->subj_hash[index];
49065 + if (*curr != NULL)
49066 + (*curr)->prev = obj;
49067 +
49068 + obj->next = *curr;
49069 + *curr = obj;
49070 +
49071 + return;
49072 +}
49073 +
49074 +/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
49075 +
49076 +static void *
49077 +create_table(__u32 * len, int elementsize)
49078 +{
49079 + unsigned int table_sizes[] = {
49080 + 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
49081 + 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
49082 + 4194301, 8388593, 16777213, 33554393, 67108859
49083 + };
49084 + void *newtable = NULL;
49085 + unsigned int pwr = 0;
49086 +
49087 + while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
49088 + table_sizes[pwr] <= *len)
49089 + pwr++;
49090 +
49091 + if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
49092 + return newtable;
49093 +
49094 + if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
49095 + newtable =
49096 + kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
49097 + else
49098 + newtable = vmalloc(table_sizes[pwr] * elementsize);
49099 +
49100 + *len = table_sizes[pwr];
49101 +
49102 + return newtable;
49103 +}
49104 +
49105 +static int
49106 +init_variables(const struct gr_arg *arg)
49107 +{
49108 + struct task_struct *reaper = &init_task;
49109 + unsigned int stacksize;
49110 +
49111 + subj_map_set.s_size = arg->role_db.num_subjects;
49112 + acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
49113 + name_set.n_size = arg->role_db.num_objects;
49114 + inodev_set.i_size = arg->role_db.num_objects;
49115 +
49116 + if (!subj_map_set.s_size || !acl_role_set.r_size ||
49117 + !name_set.n_size || !inodev_set.i_size)
49118 + return 1;
49119 +
49120 + if (!gr_init_uidset())
49121 + return 1;
49122 +
49123 + /* set up the stack that holds allocation info */
49124 +
49125 + stacksize = arg->role_db.num_pointers + 5;
49126 +
49127 + if (!acl_alloc_stack_init(stacksize))
49128 + return 1;
49129 +
49130 + /* grab reference for the real root dentry and vfsmount */
49131 + get_fs_root(reaper->fs, &real_root);
49132 +
49133 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49134 + printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
49135 +#endif
49136 +
49137 + fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
49138 + if (fakefs_obj_rw == NULL)
49139 + return 1;
49140 + fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
49141 +
49142 + fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
49143 + if (fakefs_obj_rwx == NULL)
49144 + return 1;
49145 + fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
49146 +
49147 + subj_map_set.s_hash =
49148 + (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
49149 + acl_role_set.r_hash =
49150 + (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
49151 + name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
49152 + inodev_set.i_hash =
49153 + (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
49154 +
49155 + if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
49156 + !name_set.n_hash || !inodev_set.i_hash)
49157 + return 1;
49158 +
49159 + memset(subj_map_set.s_hash, 0,
49160 + sizeof(struct subject_map *) * subj_map_set.s_size);
49161 + memset(acl_role_set.r_hash, 0,
49162 + sizeof (struct acl_role_label *) * acl_role_set.r_size);
49163 + memset(name_set.n_hash, 0,
49164 + sizeof (struct name_entry *) * name_set.n_size);
49165 + memset(inodev_set.i_hash, 0,
49166 + sizeof (struct inodev_entry *) * inodev_set.i_size);
49167 +
49168 + return 0;
49169 +}
49170 +
49171 +/* free information not needed after startup
49172 + currently contains user->kernel pointer mappings for subjects
49173 +*/
49174 +
49175 +static void
49176 +free_init_variables(void)
49177 +{
49178 + __u32 i;
49179 +
49180 + if (subj_map_set.s_hash) {
49181 + for (i = 0; i < subj_map_set.s_size; i++) {
49182 + if (subj_map_set.s_hash[i]) {
49183 + kfree(subj_map_set.s_hash[i]);
49184 + subj_map_set.s_hash[i] = NULL;
49185 + }
49186 + }
49187 +
49188 + if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
49189 + PAGE_SIZE)
49190 + kfree(subj_map_set.s_hash);
49191 + else
49192 + vfree(subj_map_set.s_hash);
49193 + }
49194 +
49195 + return;
49196 +}
49197 +
49198 +static void
49199 +free_variables(void)
49200 +{
49201 + struct acl_subject_label *s;
49202 + struct acl_role_label *r;
49203 + struct task_struct *task, *task2;
49204 + unsigned int x;
49205 +
49206 + gr_clear_learn_entries();
49207 +
49208 + read_lock(&tasklist_lock);
49209 + do_each_thread(task2, task) {
49210 + task->acl_sp_role = 0;
49211 + task->acl_role_id = 0;
49212 + task->acl = NULL;
49213 + task->role = NULL;
49214 + } while_each_thread(task2, task);
49215 + read_unlock(&tasklist_lock);
49216 +
49217 + /* release the reference to the real root dentry and vfsmount */
49218 + path_put(&real_root);
49219 +
49220 + /* free all object hash tables */
49221 +
49222 + FOR_EACH_ROLE_START(r)
49223 + if (r->subj_hash == NULL)
49224 + goto next_role;
49225 + FOR_EACH_SUBJECT_START(r, s, x)
49226 + if (s->obj_hash == NULL)
49227 + break;
49228 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
49229 + kfree(s->obj_hash);
49230 + else
49231 + vfree(s->obj_hash);
49232 + FOR_EACH_SUBJECT_END(s, x)
49233 + FOR_EACH_NESTED_SUBJECT_START(r, s)
49234 + if (s->obj_hash == NULL)
49235 + break;
49236 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
49237 + kfree(s->obj_hash);
49238 + else
49239 + vfree(s->obj_hash);
49240 + FOR_EACH_NESTED_SUBJECT_END(s)
49241 + if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
49242 + kfree(r->subj_hash);
49243 + else
49244 + vfree(r->subj_hash);
49245 + r->subj_hash = NULL;
49246 +next_role:
49247 + FOR_EACH_ROLE_END(r)
49248 +
49249 + acl_free_all();
49250 +
49251 + if (acl_role_set.r_hash) {
49252 + if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
49253 + PAGE_SIZE)
49254 + kfree(acl_role_set.r_hash);
49255 + else
49256 + vfree(acl_role_set.r_hash);
49257 + }
49258 + if (name_set.n_hash) {
49259 + if ((name_set.n_size * sizeof (struct name_entry *)) <=
49260 + PAGE_SIZE)
49261 + kfree(name_set.n_hash);
49262 + else
49263 + vfree(name_set.n_hash);
49264 + }
49265 +
49266 + if (inodev_set.i_hash) {
49267 + if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
49268 + PAGE_SIZE)
49269 + kfree(inodev_set.i_hash);
49270 + else
49271 + vfree(inodev_set.i_hash);
49272 + }
49273 +
49274 + gr_free_uidset();
49275 +
49276 + memset(&name_set, 0, sizeof (struct name_db));
49277 + memset(&inodev_set, 0, sizeof (struct inodev_db));
49278 + memset(&acl_role_set, 0, sizeof (struct acl_role_db));
49279 + memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
49280 +
49281 + default_role = NULL;
49282 + role_list = NULL;
49283 +
49284 + return;
49285 +}
49286 +
49287 +static __u32
49288 +count_user_objs(struct acl_object_label *userp)
49289 +{
49290 + struct acl_object_label o_tmp;
49291 + __u32 num = 0;
49292 +
49293 + while (userp) {
49294 + if (copy_from_user(&o_tmp, userp,
49295 + sizeof (struct acl_object_label)))
49296 + break;
49297 +
49298 + userp = o_tmp.prev;
49299 + num++;
49300 + }
49301 +
49302 + return num;
49303 +}
49304 +
49305 +static struct acl_subject_label *
49306 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
49307 +
49308 +static int
49309 +copy_user_glob(struct acl_object_label *obj)
49310 +{
49311 + struct acl_object_label *g_tmp, **guser;
49312 + unsigned int len;
49313 + char *tmp;
49314 +
49315 + if (obj->globbed == NULL)
49316 + return 0;
49317 +
49318 + guser = &obj->globbed;
49319 + while (*guser) {
49320 + g_tmp = (struct acl_object_label *)
49321 + acl_alloc(sizeof (struct acl_object_label));
49322 + if (g_tmp == NULL)
49323 + return -ENOMEM;
49324 +
49325 + if (copy_from_user(g_tmp, *guser,
49326 + sizeof (struct acl_object_label)))
49327 + return -EFAULT;
49328 +
49329 + len = strnlen_user(g_tmp->filename, PATH_MAX);
49330 +
49331 + if (!len || len >= PATH_MAX)
49332 + return -EINVAL;
49333 +
49334 + if ((tmp = (char *) acl_alloc(len)) == NULL)
49335 + return -ENOMEM;
49336 +
49337 + if (copy_from_user(tmp, g_tmp->filename, len))
49338 + return -EFAULT;
49339 + tmp[len-1] = '\0';
49340 + g_tmp->filename = tmp;
49341 +
49342 + *guser = g_tmp;
49343 + guser = &(g_tmp->next);
49344 + }
49345 +
49346 + return 0;
49347 +}
49348 +
49349 +static int
49350 +copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
49351 + struct acl_role_label *role)
49352 +{
49353 + struct acl_object_label *o_tmp;
49354 + unsigned int len;
49355 + int ret;
49356 + char *tmp;
49357 +
49358 + while (userp) {
49359 + if ((o_tmp = (struct acl_object_label *)
49360 + acl_alloc(sizeof (struct acl_object_label))) == NULL)
49361 + return -ENOMEM;
49362 +
49363 + if (copy_from_user(o_tmp, userp,
49364 + sizeof (struct acl_object_label)))
49365 + return -EFAULT;
49366 +
49367 + userp = o_tmp->prev;
49368 +
49369 + len = strnlen_user(o_tmp->filename, PATH_MAX);
49370 +
49371 + if (!len || len >= PATH_MAX)
49372 + return -EINVAL;
49373 +
49374 + if ((tmp = (char *) acl_alloc(len)) == NULL)
49375 + return -ENOMEM;
49376 +
49377 + if (copy_from_user(tmp, o_tmp->filename, len))
49378 + return -EFAULT;
49379 + tmp[len-1] = '\0';
49380 + o_tmp->filename = tmp;
49381 +
49382 + insert_acl_obj_label(o_tmp, subj);
49383 + if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
49384 + o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
49385 + return -ENOMEM;
49386 +
49387 + ret = copy_user_glob(o_tmp);
49388 + if (ret)
49389 + return ret;
49390 +
49391 + if (o_tmp->nested) {
49392 + o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
49393 + if (IS_ERR(o_tmp->nested))
49394 + return PTR_ERR(o_tmp->nested);
49395 +
49396 + /* insert into nested subject list */
49397 + o_tmp->nested->next = role->hash->first;
49398 + role->hash->first = o_tmp->nested;
49399 + }
49400 + }
49401 +
49402 + return 0;
49403 +}
49404 +
49405 +static __u32
49406 +count_user_subjs(struct acl_subject_label *userp)
49407 +{
49408 + struct acl_subject_label s_tmp;
49409 + __u32 num = 0;
49410 +
49411 + while (userp) {
49412 + if (copy_from_user(&s_tmp, userp,
49413 + sizeof (struct acl_subject_label)))
49414 + break;
49415 +
49416 + userp = s_tmp.prev;
49417 + /* do not count nested subjects against this count, since
49418 + they are not included in the hash table, but are
49419 + attached to objects. We have already counted
49420 + the subjects in userspace for the allocation
49421 + stack
49422 + */
49423 + if (!(s_tmp.mode & GR_NESTED))
49424 + num++;
49425 + }
49426 +
49427 + return num;
49428 +}
49429 +
49430 +static int
49431 +copy_user_allowedips(struct acl_role_label *rolep)
49432 +{
49433 + struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
49434 +
49435 + ruserip = rolep->allowed_ips;
49436 +
49437 + while (ruserip) {
49438 + rlast = rtmp;
49439 +
49440 + if ((rtmp = (struct role_allowed_ip *)
49441 + acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
49442 + return -ENOMEM;
49443 +
49444 + if (copy_from_user(rtmp, ruserip,
49445 + sizeof (struct role_allowed_ip)))
49446 + return -EFAULT;
49447 +
49448 + ruserip = rtmp->prev;
49449 +
49450 + if (!rlast) {
49451 + rtmp->prev = NULL;
49452 + rolep->allowed_ips = rtmp;
49453 + } else {
49454 + rlast->next = rtmp;
49455 + rtmp->prev = rlast;
49456 + }
49457 +
49458 + if (!ruserip)
49459 + rtmp->next = NULL;
49460 + }
49461 +
49462 + return 0;
49463 +}
49464 +
49465 +static int
49466 +copy_user_transitions(struct acl_role_label *rolep)
49467 +{
49468 + struct role_transition *rusertp, *rtmp = NULL, *rlast;
49469 +
49470 + unsigned int len;
49471 + char *tmp;
49472 +
49473 + rusertp = rolep->transitions;
49474 +
49475 + while (rusertp) {
49476 + rlast = rtmp;
49477 +
49478 + if ((rtmp = (struct role_transition *)
49479 + acl_alloc(sizeof (struct role_transition))) == NULL)
49480 + return -ENOMEM;
49481 +
49482 + if (copy_from_user(rtmp, rusertp,
49483 + sizeof (struct role_transition)))
49484 + return -EFAULT;
49485 +
49486 + rusertp = rtmp->prev;
49487 +
49488 + len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
49489 +
49490 + if (!len || len >= GR_SPROLE_LEN)
49491 + return -EINVAL;
49492 +
49493 + if ((tmp = (char *) acl_alloc(len)) == NULL)
49494 + return -ENOMEM;
49495 +
49496 + if (copy_from_user(tmp, rtmp->rolename, len))
49497 + return -EFAULT;
49498 + tmp[len-1] = '\0';
49499 + rtmp->rolename = tmp;
49500 +
49501 + if (!rlast) {
49502 + rtmp->prev = NULL;
49503 + rolep->transitions = rtmp;
49504 + } else {
49505 + rlast->next = rtmp;
49506 + rtmp->prev = rlast;
49507 + }
49508 +
49509 + if (!rusertp)
49510 + rtmp->next = NULL;
49511 + }
49512 +
49513 + return 0;
49514 +}
49515 +
49516 +static struct acl_subject_label *
49517 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
49518 +{
49519 + struct acl_subject_label *s_tmp = NULL, *s_tmp2;
49520 + unsigned int len;
49521 + char *tmp;
49522 + __u32 num_objs;
49523 + struct acl_ip_label **i_tmp, *i_utmp2;
49524 + struct gr_hash_struct ghash;
49525 + struct subject_map *subjmap;
49526 + unsigned int i_num;
49527 + int err;
49528 +
49529 + s_tmp = lookup_subject_map(userp);
49530 +
49531 + /* we've already copied this subject into the kernel, just return
49532 + the reference to it, and don't copy it over again
49533 + */
49534 + if (s_tmp)
49535 + return(s_tmp);
49536 +
49537 + if ((s_tmp = (struct acl_subject_label *)
49538 + acl_alloc(sizeof (struct acl_subject_label))) == NULL)
49539 + return ERR_PTR(-ENOMEM);
49540 +
49541 + subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
49542 + if (subjmap == NULL)
49543 + return ERR_PTR(-ENOMEM);
49544 +
49545 + subjmap->user = userp;
49546 + subjmap->kernel = s_tmp;
49547 + insert_subj_map_entry(subjmap);
49548 +
49549 + if (copy_from_user(s_tmp, userp,
49550 + sizeof (struct acl_subject_label)))
49551 + return ERR_PTR(-EFAULT);
49552 +
49553 + len = strnlen_user(s_tmp->filename, PATH_MAX);
49554 +
49555 + if (!len || len >= PATH_MAX)
49556 + return ERR_PTR(-EINVAL);
49557 +
49558 + if ((tmp = (char *) acl_alloc(len)) == NULL)
49559 + return ERR_PTR(-ENOMEM);
49560 +
49561 + if (copy_from_user(tmp, s_tmp->filename, len))
49562 + return ERR_PTR(-EFAULT);
49563 + tmp[len-1] = '\0';
49564 + s_tmp->filename = tmp;
49565 +
49566 + if (!strcmp(s_tmp->filename, "/"))
49567 + role->root_label = s_tmp;
49568 +
49569 + if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
49570 + return ERR_PTR(-EFAULT);
49571 +
49572 + /* copy user and group transition tables */
49573 +
49574 + if (s_tmp->user_trans_num) {
49575 + uid_t *uidlist;
49576 +
49577 + uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
49578 + if (uidlist == NULL)
49579 + return ERR_PTR(-ENOMEM);
49580 + if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
49581 + return ERR_PTR(-EFAULT);
49582 +
49583 + s_tmp->user_transitions = uidlist;
49584 + }
49585 +
49586 + if (s_tmp->group_trans_num) {
49587 + gid_t *gidlist;
49588 +
49589 + gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
49590 + if (gidlist == NULL)
49591 + return ERR_PTR(-ENOMEM);
49592 + if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
49593 + return ERR_PTR(-EFAULT);
49594 +
49595 + s_tmp->group_transitions = gidlist;
49596 + }
49597 +
49598 + /* set up object hash table */
49599 + num_objs = count_user_objs(ghash.first);
49600 +
49601 + s_tmp->obj_hash_size = num_objs;
49602 + s_tmp->obj_hash =
49603 + (struct acl_object_label **)
49604 + create_table(&(s_tmp->obj_hash_size), sizeof(void *));
49605 +
49606 + if (!s_tmp->obj_hash)
49607 + return ERR_PTR(-ENOMEM);
49608 +
49609 + memset(s_tmp->obj_hash, 0,
49610 + s_tmp->obj_hash_size *
49611 + sizeof (struct acl_object_label *));
49612 +
49613 + /* add in objects */
49614 + err = copy_user_objs(ghash.first, s_tmp, role);
49615 +
49616 + if (err)
49617 + return ERR_PTR(err);
49618 +
49619 + /* set pointer for parent subject */
49620 + if (s_tmp->parent_subject) {
49621 + s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
49622 +
49623 + if (IS_ERR(s_tmp2))
49624 + return s_tmp2;
49625 +
49626 + s_tmp->parent_subject = s_tmp2;
49627 + }
49628 +
49629 + /* add in ip acls */
49630 +
49631 + if (!s_tmp->ip_num) {
49632 + s_tmp->ips = NULL;
49633 + goto insert;
49634 + }
49635 +
49636 + i_tmp =
49637 + (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
49638 + sizeof (struct acl_ip_label *));
49639 +
49640 + if (!i_tmp)
49641 + return ERR_PTR(-ENOMEM);
49642 +
49643 + for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
49644 + *(i_tmp + i_num) =
49645 + (struct acl_ip_label *)
49646 + acl_alloc(sizeof (struct acl_ip_label));
49647 + if (!*(i_tmp + i_num))
49648 + return ERR_PTR(-ENOMEM);
49649 +
49650 + if (copy_from_user
49651 + (&i_utmp2, s_tmp->ips + i_num,
49652 + sizeof (struct acl_ip_label *)))
49653 + return ERR_PTR(-EFAULT);
49654 +
49655 + if (copy_from_user
49656 + (*(i_tmp + i_num), i_utmp2,
49657 + sizeof (struct acl_ip_label)))
49658 + return ERR_PTR(-EFAULT);
49659 +
49660 + if ((*(i_tmp + i_num))->iface == NULL)
49661 + continue;
49662 +
49663 + len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
49664 + if (!len || len >= IFNAMSIZ)
49665 + return ERR_PTR(-EINVAL);
49666 + tmp = acl_alloc(len);
49667 + if (tmp == NULL)
49668 + return ERR_PTR(-ENOMEM);
49669 + if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
49670 + return ERR_PTR(-EFAULT);
49671 + (*(i_tmp + i_num))->iface = tmp;
49672 + }
49673 +
49674 + s_tmp->ips = i_tmp;
49675 +
49676 +insert:
49677 + if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
49678 + s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
49679 + return ERR_PTR(-ENOMEM);
49680 +
49681 + return s_tmp;
49682 +}
49683 +
49684 +static int
49685 +copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
49686 +{
49687 + struct acl_subject_label s_pre;
49688 + struct acl_subject_label * ret;
49689 + int err;
49690 +
49691 + while (userp) {
49692 + if (copy_from_user(&s_pre, userp,
49693 + sizeof (struct acl_subject_label)))
49694 + return -EFAULT;
49695 +
49696 + /* do not add nested subjects here, add
49697 + while parsing objects
49698 + */
49699 +
49700 + if (s_pre.mode & GR_NESTED) {
49701 + userp = s_pre.prev;
49702 + continue;
49703 + }
49704 +
49705 + ret = do_copy_user_subj(userp, role);
49706 +
49707 + err = PTR_ERR(ret);
49708 + if (IS_ERR(ret))
49709 + return err;
49710 +
49711 + insert_acl_subj_label(ret, role);
49712 +
49713 + userp = s_pre.prev;
49714 + }
49715 +
49716 + return 0;
49717 +}
49718 +
49719 +static int
49720 +copy_user_acl(struct gr_arg *arg)
49721 +{
49722 + struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
49723 + struct sprole_pw *sptmp;
49724 + struct gr_hash_struct *ghash;
49725 + uid_t *domainlist;
49726 + unsigned int r_num;
49727 + unsigned int len;
49728 + char *tmp;
49729 + int err = 0;
49730 + __u16 i;
49731 + __u32 num_subjs;
49732 +
49733 + /* we need a default and kernel role */
49734 + if (arg->role_db.num_roles < 2)
49735 + return -EINVAL;
49736 +
49737 + /* copy special role authentication info from userspace */
49738 +
49739 + num_sprole_pws = arg->num_sprole_pws;
49740 + acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
49741 +
49742 + if (!acl_special_roles) {
49743 + err = -ENOMEM;
49744 + goto cleanup;
49745 + }
49746 +
49747 + for (i = 0; i < num_sprole_pws; i++) {
49748 + sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
49749 + if (!sptmp) {
49750 + err = -ENOMEM;
49751 + goto cleanup;
49752 + }
49753 + if (copy_from_user(sptmp, arg->sprole_pws + i,
49754 + sizeof (struct sprole_pw))) {
49755 + err = -EFAULT;
49756 + goto cleanup;
49757 + }
49758 +
49759 + len =
49760 + strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
49761 +
49762 + if (!len || len >= GR_SPROLE_LEN) {
49763 + err = -EINVAL;
49764 + goto cleanup;
49765 + }
49766 +
49767 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
49768 + err = -ENOMEM;
49769 + goto cleanup;
49770 + }
49771 +
49772 + if (copy_from_user(tmp, sptmp->rolename, len)) {
49773 + err = -EFAULT;
49774 + goto cleanup;
49775 + }
49776 + tmp[len-1] = '\0';
49777 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49778 + printk(KERN_ALERT "Copying special role %s\n", tmp);
49779 +#endif
49780 + sptmp->rolename = tmp;
49781 + acl_special_roles[i] = sptmp;
49782 + }
49783 +
49784 + r_utmp = (struct acl_role_label **) arg->role_db.r_table;
49785 +
49786 + for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
49787 + r_tmp = acl_alloc(sizeof (struct acl_role_label));
49788 +
49789 + if (!r_tmp) {
49790 + err = -ENOMEM;
49791 + goto cleanup;
49792 + }
49793 +
49794 + if (copy_from_user(&r_utmp2, r_utmp + r_num,
49795 + sizeof (struct acl_role_label *))) {
49796 + err = -EFAULT;
49797 + goto cleanup;
49798 + }
49799 +
49800 + if (copy_from_user(r_tmp, r_utmp2,
49801 + sizeof (struct acl_role_label))) {
49802 + err = -EFAULT;
49803 + goto cleanup;
49804 + }
49805 +
49806 + len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
49807 +
49808 + if (!len || len >= PATH_MAX) {
49809 + err = -EINVAL;
49810 + goto cleanup;
49811 + }
49812 +
49813 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
49814 + err = -ENOMEM;
49815 + goto cleanup;
49816 + }
49817 + if (copy_from_user(tmp, r_tmp->rolename, len)) {
49818 + err = -EFAULT;
49819 + goto cleanup;
49820 + }
49821 + tmp[len-1] = '\0';
49822 + r_tmp->rolename = tmp;
49823 +
49824 + if (!strcmp(r_tmp->rolename, "default")
49825 + && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
49826 + default_role = r_tmp;
49827 + } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
49828 + kernel_role = r_tmp;
49829 + }
49830 +
49831 + if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
49832 + err = -ENOMEM;
49833 + goto cleanup;
49834 + }
49835 + if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
49836 + err = -EFAULT;
49837 + goto cleanup;
49838 + }
49839 +
49840 + r_tmp->hash = ghash;
49841 +
49842 + num_subjs = count_user_subjs(r_tmp->hash->first);
49843 +
49844 + r_tmp->subj_hash_size = num_subjs;
49845 + r_tmp->subj_hash =
49846 + (struct acl_subject_label **)
49847 + create_table(&(r_tmp->subj_hash_size), sizeof(void *));
49848 +
49849 + if (!r_tmp->subj_hash) {
49850 + err = -ENOMEM;
49851 + goto cleanup;
49852 + }
49853 +
49854 + err = copy_user_allowedips(r_tmp);
49855 + if (err)
49856 + goto cleanup;
49857 +
49858 + /* copy domain info */
49859 + if (r_tmp->domain_children != NULL) {
49860 + domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
49861 + if (domainlist == NULL) {
49862 + err = -ENOMEM;
49863 + goto cleanup;
49864 + }
49865 + if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
49866 + err = -EFAULT;
49867 + goto cleanup;
49868 + }
49869 + r_tmp->domain_children = domainlist;
49870 + }
49871 +
49872 + err = copy_user_transitions(r_tmp);
49873 + if (err)
49874 + goto cleanup;
49875 +
49876 + memset(r_tmp->subj_hash, 0,
49877 + r_tmp->subj_hash_size *
49878 + sizeof (struct acl_subject_label *));
49879 +
49880 + err = copy_user_subjs(r_tmp->hash->first, r_tmp);
49881 +
49882 + if (err)
49883 + goto cleanup;
49884 +
49885 + /* set nested subject list to null */
49886 + r_tmp->hash->first = NULL;
49887 +
49888 + insert_acl_role_label(r_tmp);
49889 + }
49890 +
49891 + goto return_err;
49892 + cleanup:
49893 + free_variables();
49894 + return_err:
49895 + return err;
49896 +
49897 +}
49898 +
49899 +static int
49900 +gracl_init(struct gr_arg *args)
49901 +{
49902 + int error = 0;
49903 +
49904 + memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
49905 + memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
49906 +
49907 + if (init_variables(args)) {
49908 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
49909 + error = -ENOMEM;
49910 + free_variables();
49911 + goto out;
49912 + }
49913 +
49914 + error = copy_user_acl(args);
49915 + free_init_variables();
49916 + if (error) {
49917 + free_variables();
49918 + goto out;
49919 + }
49920 +
49921 + if ((error = gr_set_acls(0))) {
49922 + free_variables();
49923 + goto out;
49924 + }
49925 +
49926 + pax_open_kernel();
49927 + gr_status |= GR_READY;
49928 + pax_close_kernel();
49929 +
49930 + out:
49931 + return error;
49932 +}
49933 +
49934 +/* derived from glibc fnmatch() 0: match, 1: no match*/
49935 +
49936 +static int
49937 +glob_match(const char *p, const char *n)
49938 +{
49939 + char c;
49940 +
49941 + while ((c = *p++) != '\0') {
49942 + switch (c) {
49943 + case '?':
49944 + if (*n == '\0')
49945 + return 1;
49946 + else if (*n == '/')
49947 + return 1;
49948 + break;
49949 + case '\\':
49950 + if (*n != c)
49951 + return 1;
49952 + break;
49953 + case '*':
49954 + for (c = *p++; c == '?' || c == '*'; c = *p++) {
49955 + if (*n == '/')
49956 + return 1;
49957 + else if (c == '?') {
49958 + if (*n == '\0')
49959 + return 1;
49960 + else
49961 + ++n;
49962 + }
49963 + }
49964 + if (c == '\0') {
49965 + return 0;
49966 + } else {
49967 + const char *endp;
49968 +
49969 + if ((endp = strchr(n, '/')) == NULL)
49970 + endp = n + strlen(n);
49971 +
49972 + if (c == '[') {
49973 + for (--p; n < endp; ++n)
49974 + if (!glob_match(p, n))
49975 + return 0;
49976 + } else if (c == '/') {
49977 + while (*n != '\0' && *n != '/')
49978 + ++n;
49979 + if (*n == '/' && !glob_match(p, n + 1))
49980 + return 0;
49981 + } else {
49982 + for (--p; n < endp; ++n)
49983 + if (*n == c && !glob_match(p, n))
49984 + return 0;
49985 + }
49986 +
49987 + return 1;
49988 + }
49989 + case '[':
49990 + {
49991 + int not;
49992 + char cold;
49993 +
49994 + if (*n == '\0' || *n == '/')
49995 + return 1;
49996 +
49997 + not = (*p == '!' || *p == '^');
49998 + if (not)
49999 + ++p;
50000 +
50001 + c = *p++;
50002 + for (;;) {
50003 + unsigned char fn = (unsigned char)*n;
50004 +
50005 + if (c == '\0')
50006 + return 1;
50007 + else {
50008 + if (c == fn)
50009 + goto matched;
50010 + cold = c;
50011 + c = *p++;
50012 +
50013 + if (c == '-' && *p != ']') {
50014 + unsigned char cend = *p++;
50015 +
50016 + if (cend == '\0')
50017 + return 1;
50018 +
50019 + if (cold <= fn && fn <= cend)
50020 + goto matched;
50021 +
50022 + c = *p++;
50023 + }
50024 + }
50025 +
50026 + if (c == ']')
50027 + break;
50028 + }
50029 + if (!not)
50030 + return 1;
50031 + break;
50032 + matched:
50033 + while (c != ']') {
50034 + if (c == '\0')
50035 + return 1;
50036 +
50037 + c = *p++;
50038 + }
50039 + if (not)
50040 + return 1;
50041 + }
50042 + break;
50043 + default:
50044 + if (c != *n)
50045 + return 1;
50046 + }
50047 +
50048 + ++n;
50049 + }
50050 +
50051 + if (*n == '\0')
50052 + return 0;
50053 +
50054 + if (*n == '/')
50055 + return 0;
50056 +
50057 + return 1;
50058 +}
50059 +
50060 +static struct acl_object_label *
50061 +chk_glob_label(struct acl_object_label *globbed,
50062 + struct dentry *dentry, struct vfsmount *mnt, char **path)
50063 +{
50064 + struct acl_object_label *tmp;
50065 +
50066 + if (*path == NULL)
50067 + *path = gr_to_filename_nolock(dentry, mnt);
50068 +
50069 + tmp = globbed;
50070 +
50071 + while (tmp) {
50072 + if (!glob_match(tmp->filename, *path))
50073 + return tmp;
50074 + tmp = tmp->next;
50075 + }
50076 +
50077 + return NULL;
50078 +}
50079 +
50080 +static struct acl_object_label *
50081 +__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
50082 + const ino_t curr_ino, const dev_t curr_dev,
50083 + const struct acl_subject_label *subj, char **path, const int checkglob)
50084 +{
50085 + struct acl_subject_label *tmpsubj;
50086 + struct acl_object_label *retval;
50087 + struct acl_object_label *retval2;
50088 +
50089 + tmpsubj = (struct acl_subject_label *) subj;
50090 + read_lock(&gr_inode_lock);
50091 + do {
50092 + retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
50093 + if (retval) {
50094 + if (checkglob && retval->globbed) {
50095 + retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
50096 + (struct vfsmount *)orig_mnt, path);
50097 + if (retval2)
50098 + retval = retval2;
50099 + }
50100 + break;
50101 + }
50102 + } while ((tmpsubj = tmpsubj->parent_subject));
50103 + read_unlock(&gr_inode_lock);
50104 +
50105 + return retval;
50106 +}
50107 +
50108 +static __inline__ struct acl_object_label *
50109 +full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
50110 + struct dentry *curr_dentry,
50111 + const struct acl_subject_label *subj, char **path, const int checkglob)
50112 +{
50113 + int newglob = checkglob;
50114 + ino_t inode;
50115 + dev_t device;
50116 +
50117 + /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
50118 + as we don't want a / * rule to match instead of the / object
50119 + don't do this for create lookups that call this function though, since they're looking up
50120 + on the parent and thus need globbing checks on all paths
50121 + */
50122 + if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
50123 + newglob = GR_NO_GLOB;
50124 +
50125 + spin_lock(&curr_dentry->d_lock);
50126 + inode = curr_dentry->d_inode->i_ino;
50127 + device = __get_dev(curr_dentry);
50128 + spin_unlock(&curr_dentry->d_lock);
50129 +
50130 + return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
50131 +}
50132 +
50133 +static struct acl_object_label *
50134 +__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
50135 + const struct acl_subject_label *subj, char *path, const int checkglob)
50136 +{
50137 + struct dentry *dentry = (struct dentry *) l_dentry;
50138 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
50139 + struct acl_object_label *retval;
50140 + struct dentry *parent;
50141 +
50142 + write_seqlock(&rename_lock);
50143 + br_read_lock(vfsmount_lock);
50144 +
50145 + if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
50146 +#ifdef CONFIG_NET
50147 + mnt == sock_mnt ||
50148 +#endif
50149 +#ifdef CONFIG_HUGETLBFS
50150 + (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
50151 +#endif
50152 + /* ignore Eric Biederman */
50153 + IS_PRIVATE(l_dentry->d_inode))) {
50154 + retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
50155 + goto out;
50156 + }
50157 +
50158 + for (;;) {
50159 + if (dentry == real_root.dentry && mnt == real_root.mnt)
50160 + break;
50161 +
50162 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
50163 + if (mnt->mnt_parent == mnt)
50164 + break;
50165 +
50166 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
50167 + if (retval != NULL)
50168 + goto out;
50169 +
50170 + dentry = mnt->mnt_mountpoint;
50171 + mnt = mnt->mnt_parent;
50172 + continue;
50173 + }
50174 +
50175 + parent = dentry->d_parent;
50176 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
50177 + if (retval != NULL)
50178 + goto out;
50179 +
50180 + dentry = parent;
50181 + }
50182 +
50183 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
50184 +
50185 + /* real_root is pinned so we don't have to hold a reference */
50186 + if (retval == NULL)
50187 + retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
50188 +out:
50189 + br_read_unlock(vfsmount_lock);
50190 + write_sequnlock(&rename_lock);
50191 +
50192 + BUG_ON(retval == NULL);
50193 +
50194 + return retval;
50195 +}
50196 +
50197 +static __inline__ struct acl_object_label *
50198 +chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
50199 + const struct acl_subject_label *subj)
50200 +{
50201 + char *path = NULL;
50202 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
50203 +}
50204 +
50205 +static __inline__ struct acl_object_label *
50206 +chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
50207 + const struct acl_subject_label *subj)
50208 +{
50209 + char *path = NULL;
50210 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
50211 +}
50212 +
50213 +static __inline__ struct acl_object_label *
50214 +chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
50215 + const struct acl_subject_label *subj, char *path)
50216 +{
50217 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
50218 +}
50219 +
50220 +static struct acl_subject_label *
50221 +chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
50222 + const struct acl_role_label *role)
50223 +{
50224 + struct dentry *dentry = (struct dentry *) l_dentry;
50225 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
50226 + struct acl_subject_label *retval;
50227 + struct dentry *parent;
50228 +
50229 + write_seqlock(&rename_lock);
50230 + br_read_lock(vfsmount_lock);
50231 +
50232 + for (;;) {
50233 + if (dentry == real_root.dentry && mnt == real_root.mnt)
50234 + break;
50235 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
50236 + if (mnt->mnt_parent == mnt)
50237 + break;
50238 +
50239 + spin_lock(&dentry->d_lock);
50240 + read_lock(&gr_inode_lock);
50241 + retval =
50242 + lookup_acl_subj_label(dentry->d_inode->i_ino,
50243 + __get_dev(dentry), role);
50244 + read_unlock(&gr_inode_lock);
50245 + spin_unlock(&dentry->d_lock);
50246 + if (retval != NULL)
50247 + goto out;
50248 +
50249 + dentry = mnt->mnt_mountpoint;
50250 + mnt = mnt->mnt_parent;
50251 + continue;
50252 + }
50253 +
50254 + spin_lock(&dentry->d_lock);
50255 + read_lock(&gr_inode_lock);
50256 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
50257 + __get_dev(dentry), role);
50258 + read_unlock(&gr_inode_lock);
50259 + parent = dentry->d_parent;
50260 + spin_unlock(&dentry->d_lock);
50261 +
50262 + if (retval != NULL)
50263 + goto out;
50264 +
50265 + dentry = parent;
50266 + }
50267 +
50268 + spin_lock(&dentry->d_lock);
50269 + read_lock(&gr_inode_lock);
50270 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
50271 + __get_dev(dentry), role);
50272 + read_unlock(&gr_inode_lock);
50273 + spin_unlock(&dentry->d_lock);
50274 +
50275 + if (unlikely(retval == NULL)) {
50276 + /* real_root is pinned, we don't need to hold a reference */
50277 + read_lock(&gr_inode_lock);
50278 + retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
50279 + __get_dev(real_root.dentry), role);
50280 + read_unlock(&gr_inode_lock);
50281 + }
50282 +out:
50283 + br_read_unlock(vfsmount_lock);
50284 + write_sequnlock(&rename_lock);
50285 +
50286 + BUG_ON(retval == NULL);
50287 +
50288 + return retval;
50289 +}
50290 +
50291 +static void
50292 +gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
50293 +{
50294 + struct task_struct *task = current;
50295 + const struct cred *cred = current_cred();
50296 +
50297 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
50298 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
50299 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
50300 + 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
50301 +
50302 + return;
50303 +}
50304 +
50305 +static void
50306 +gr_log_learn_sysctl(const char *path, const __u32 mode)
50307 +{
50308 + struct task_struct *task = current;
50309 + const struct cred *cred = current_cred();
50310 +
50311 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
50312 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
50313 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
50314 + 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
50315 +
50316 + return;
50317 +}
50318 +
50319 +static void
50320 +gr_log_learn_id_change(const char type, const unsigned int real,
50321 + const unsigned int effective, const unsigned int fs)
50322 +{
50323 + struct task_struct *task = current;
50324 + const struct cred *cred = current_cred();
50325 +
50326 + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
50327 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
50328 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
50329 + type, real, effective, fs, &task->signal->saved_ip);
50330 +
50331 + return;
50332 +}
50333 +
50334 +__u32
50335 +gr_search_file(const struct dentry * dentry, const __u32 mode,
50336 + const struct vfsmount * mnt)
50337 +{
50338 + __u32 retval = mode;
50339 + struct acl_subject_label *curracl;
50340 + struct acl_object_label *currobj;
50341 +
50342 + if (unlikely(!(gr_status & GR_READY)))
50343 + return (mode & ~GR_AUDITS);
50344 +
50345 + curracl = current->acl;
50346 +
50347 + currobj = chk_obj_label(dentry, mnt, curracl);
50348 + retval = currobj->mode & mode;
50349 +
50350 + /* if we're opening a specified transfer file for writing
50351 + (e.g. /dev/initctl), then transfer our role to init
50352 + */
50353 + if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
50354 + current->role->roletype & GR_ROLE_PERSIST)) {
50355 + struct task_struct *task = init_pid_ns.child_reaper;
50356 +
50357 + if (task->role != current->role) {
50358 + task->acl_sp_role = 0;
50359 + task->acl_role_id = current->acl_role_id;
50360 + task->role = current->role;
50361 + rcu_read_lock();
50362 + read_lock(&grsec_exec_file_lock);
50363 + gr_apply_subject_to_task(task);
50364 + read_unlock(&grsec_exec_file_lock);
50365 + rcu_read_unlock();
50366 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
50367 + }
50368 + }
50369 +
50370 + if (unlikely
50371 + ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
50372 + && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
50373 + __u32 new_mode = mode;
50374 +
50375 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
50376 +
50377 + retval = new_mode;
50378 +
50379 + if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
50380 + new_mode |= GR_INHERIT;
50381 +
50382 + if (!(mode & GR_NOLEARN))
50383 + gr_log_learn(dentry, mnt, new_mode);
50384 + }
50385 +
50386 + return retval;
50387 +}
50388 +
50389 +struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
50390 + const struct dentry *parent,
50391 + const struct vfsmount *mnt)
50392 +{
50393 + struct name_entry *match;
50394 + struct acl_object_label *matchpo;
50395 + struct acl_subject_label *curracl;
50396 + char *path;
50397 +
50398 + if (unlikely(!(gr_status & GR_READY)))
50399 + return NULL;
50400 +
50401 + preempt_disable();
50402 + path = gr_to_filename_rbac(new_dentry, mnt);
50403 + match = lookup_name_entry_create(path);
50404 +
50405 + curracl = current->acl;
50406 +
50407 + if (match) {
50408 + read_lock(&gr_inode_lock);
50409 + matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
50410 + read_unlock(&gr_inode_lock);
50411 +
50412 + if (matchpo) {
50413 + preempt_enable();
50414 + return matchpo;
50415 + }
50416 + }
50417 +
50418 + // lookup parent
50419 +
50420 + matchpo = chk_obj_create_label(parent, mnt, curracl, path);
50421 +
50422 + preempt_enable();
50423 + return matchpo;
50424 +}
50425 +
50426 +__u32
50427 +gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
50428 + const struct vfsmount * mnt, const __u32 mode)
50429 +{
50430 + struct acl_object_label *matchpo;
50431 + __u32 retval;
50432 +
50433 + if (unlikely(!(gr_status & GR_READY)))
50434 + return (mode & ~GR_AUDITS);
50435 +
50436 + matchpo = gr_get_create_object(new_dentry, parent, mnt);
50437 +
50438 + retval = matchpo->mode & mode;
50439 +
50440 + if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
50441 + && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
50442 + __u32 new_mode = mode;
50443 +
50444 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
50445 +
50446 + gr_log_learn(new_dentry, mnt, new_mode);
50447 + return new_mode;
50448 + }
50449 +
50450 + return retval;
50451 +}
50452 +
50453 +__u32
50454 +gr_check_link(const struct dentry * new_dentry,
50455 + const struct dentry * parent_dentry,
50456 + const struct vfsmount * parent_mnt,
50457 + const struct dentry * old_dentry, const struct vfsmount * old_mnt)
50458 +{
50459 + struct acl_object_label *obj;
50460 + __u32 oldmode, newmode;
50461 + __u32 needmode;
50462 + __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
50463 + GR_DELETE | GR_INHERIT;
50464 +
50465 + if (unlikely(!(gr_status & GR_READY)))
50466 + return (GR_CREATE | GR_LINK);
50467 +
50468 + obj = chk_obj_label(old_dentry, old_mnt, current->acl);
50469 + oldmode = obj->mode;
50470 +
50471 + obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
50472 + newmode = obj->mode;
50473 +
50474 + needmode = newmode & checkmodes;
50475 +
50476 + // old name for hardlink must have at least the permissions of the new name
50477 + if ((oldmode & needmode) != needmode)
50478 + goto bad;
50479 +
50480 + // if old name had restrictions/auditing, make sure the new name does as well
50481 + needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
50482 +
50483 + // don't allow hardlinking of suid/sgid files without permission
50484 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
50485 + needmode |= GR_SETID;
50486 +
50487 + if ((newmode & needmode) != needmode)
50488 + goto bad;
50489 +
50490 + // enforce minimum permissions
50491 + if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
50492 + return newmode;
50493 +bad:
50494 + needmode = oldmode;
50495 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
50496 + needmode |= GR_SETID;
50497 +
50498 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
50499 + gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
50500 + return (GR_CREATE | GR_LINK);
50501 + } else if (newmode & GR_SUPPRESS)
50502 + return GR_SUPPRESS;
50503 + else
50504 + return 0;
50505 +}
50506 +
50507 +int
50508 +gr_check_hidden_task(const struct task_struct *task)
50509 +{
50510 + if (unlikely(!(gr_status & GR_READY)))
50511 + return 0;
50512 +
50513 + if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
50514 + return 1;
50515 +
50516 + return 0;
50517 +}
50518 +
50519 +int
50520 +gr_check_protected_task(const struct task_struct *task)
50521 +{
50522 + if (unlikely(!(gr_status & GR_READY) || !task))
50523 + return 0;
50524 +
50525 + if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
50526 + task->acl != current->acl)
50527 + return 1;
50528 +
50529 + return 0;
50530 +}
50531 +
50532 +int
50533 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
50534 +{
50535 + struct task_struct *p;
50536 + int ret = 0;
50537 +
50538 + if (unlikely(!(gr_status & GR_READY) || !pid))
50539 + return ret;
50540 +
50541 + read_lock(&tasklist_lock);
50542 + do_each_pid_task(pid, type, p) {
50543 + if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
50544 + p->acl != current->acl) {
50545 + ret = 1;
50546 + goto out;
50547 + }
50548 + } while_each_pid_task(pid, type, p);
50549 +out:
50550 + read_unlock(&tasklist_lock);
50551 +
50552 + return ret;
50553 +}
50554 +
50555 +void
50556 +gr_copy_label(struct task_struct *tsk)
50557 +{
50558 + tsk->signal->used_accept = 0;
50559 + tsk->acl_sp_role = 0;
50560 + tsk->acl_role_id = current->acl_role_id;
50561 + tsk->acl = current->acl;
50562 + tsk->role = current->role;
50563 + tsk->signal->curr_ip = current->signal->curr_ip;
50564 + tsk->signal->saved_ip = current->signal->saved_ip;
50565 + if (current->exec_file)
50566 + get_file(current->exec_file);
50567 + tsk->exec_file = current->exec_file;
50568 + tsk->is_writable = current->is_writable;
50569 + if (unlikely(current->signal->used_accept)) {
50570 + current->signal->curr_ip = 0;
50571 + current->signal->saved_ip = 0;
50572 + }
50573 +
50574 + return;
50575 +}
50576 +
50577 +static void
50578 +gr_set_proc_res(struct task_struct *task)
50579 +{
50580 + struct acl_subject_label *proc;
50581 + unsigned short i;
50582 +
50583 + proc = task->acl;
50584 +
50585 + if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
50586 + return;
50587 +
50588 + for (i = 0; i < RLIM_NLIMITS; i++) {
50589 + if (!(proc->resmask & (1 << i)))
50590 + continue;
50591 +
50592 + task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
50593 + task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
50594 + }
50595 +
50596 + return;
50597 +}
50598 +
50599 +extern int __gr_process_user_ban(struct user_struct *user);
50600 +
50601 +int
50602 +gr_check_user_change(int real, int effective, int fs)
50603 +{
50604 + unsigned int i;
50605 + __u16 num;
50606 + uid_t *uidlist;
50607 + int curuid;
50608 + int realok = 0;
50609 + int effectiveok = 0;
50610 + int fsok = 0;
50611 +
50612 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
50613 + struct user_struct *user;
50614 +
50615 + if (real == -1)
50616 + goto skipit;
50617 +
50618 + user = find_user(real);
50619 + if (user == NULL)
50620 + goto skipit;
50621 +
50622 + if (__gr_process_user_ban(user)) {
50623 + /* for find_user */
50624 + free_uid(user);
50625 + return 1;
50626 + }
50627 +
50628 + /* for find_user */
50629 + free_uid(user);
50630 +
50631 +skipit:
50632 +#endif
50633 +
50634 + if (unlikely(!(gr_status & GR_READY)))
50635 + return 0;
50636 +
50637 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
50638 + gr_log_learn_id_change('u', real, effective, fs);
50639 +
50640 + num = current->acl->user_trans_num;
50641 + uidlist = current->acl->user_transitions;
50642 +
50643 + if (uidlist == NULL)
50644 + return 0;
50645 +
50646 + if (real == -1)
50647 + realok = 1;
50648 + if (effective == -1)
50649 + effectiveok = 1;
50650 + if (fs == -1)
50651 + fsok = 1;
50652 +
50653 + if (current->acl->user_trans_type & GR_ID_ALLOW) {
50654 + for (i = 0; i < num; i++) {
50655 + curuid = (int)uidlist[i];
50656 + if (real == curuid)
50657 + realok = 1;
50658 + if (effective == curuid)
50659 + effectiveok = 1;
50660 + if (fs == curuid)
50661 + fsok = 1;
50662 + }
50663 + } else if (current->acl->user_trans_type & GR_ID_DENY) {
50664 + for (i = 0; i < num; i++) {
50665 + curuid = (int)uidlist[i];
50666 + if (real == curuid)
50667 + break;
50668 + if (effective == curuid)
50669 + break;
50670 + if (fs == curuid)
50671 + break;
50672 + }
50673 + /* not in deny list */
50674 + if (i == num) {
50675 + realok = 1;
50676 + effectiveok = 1;
50677 + fsok = 1;
50678 + }
50679 + }
50680 +
50681 + if (realok && effectiveok && fsok)
50682 + return 0;
50683 + else {
50684 + gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
50685 + return 1;
50686 + }
50687 +}
50688 +
50689 +int
50690 +gr_check_group_change(int real, int effective, int fs)
50691 +{
50692 + unsigned int i;
50693 + __u16 num;
50694 + gid_t *gidlist;
50695 + int curgid;
50696 + int realok = 0;
50697 + int effectiveok = 0;
50698 + int fsok = 0;
50699 +
50700 + if (unlikely(!(gr_status & GR_READY)))
50701 + return 0;
50702 +
50703 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
50704 + gr_log_learn_id_change('g', real, effective, fs);
50705 +
50706 + num = current->acl->group_trans_num;
50707 + gidlist = current->acl->group_transitions;
50708 +
50709 + if (gidlist == NULL)
50710 + return 0;
50711 +
50712 + if (real == -1)
50713 + realok = 1;
50714 + if (effective == -1)
50715 + effectiveok = 1;
50716 + if (fs == -1)
50717 + fsok = 1;
50718 +
50719 + if (current->acl->group_trans_type & GR_ID_ALLOW) {
50720 + for (i = 0; i < num; i++) {
50721 + curgid = (int)gidlist[i];
50722 + if (real == curgid)
50723 + realok = 1;
50724 + if (effective == curgid)
50725 + effectiveok = 1;
50726 + if (fs == curgid)
50727 + fsok = 1;
50728 + }
50729 + } else if (current->acl->group_trans_type & GR_ID_DENY) {
50730 + for (i = 0; i < num; i++) {
50731 + curgid = (int)gidlist[i];
50732 + if (real == curgid)
50733 + break;
50734 + if (effective == curgid)
50735 + break;
50736 + if (fs == curgid)
50737 + break;
50738 + }
50739 + /* not in deny list */
50740 + if (i == num) {
50741 + realok = 1;
50742 + effectiveok = 1;
50743 + fsok = 1;
50744 + }
50745 + }
50746 +
50747 + if (realok && effectiveok && fsok)
50748 + return 0;
50749 + else {
50750 + gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
50751 + return 1;
50752 + }
50753 +}
50754 +
50755 +void
50756 +gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
50757 +{
50758 + struct acl_role_label *role = task->role;
50759 + struct acl_subject_label *subj = NULL;
50760 + struct acl_object_label *obj;
50761 + struct file *filp;
50762 +
50763 + if (unlikely(!(gr_status & GR_READY)))
50764 + return;
50765 +
50766 + filp = task->exec_file;
50767 +
50768 + /* kernel process, we'll give them the kernel role */
50769 + if (unlikely(!filp)) {
50770 + task->role = kernel_role;
50771 + task->acl = kernel_role->root_label;
50772 + return;
50773 + } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
50774 + role = lookup_acl_role_label(task, uid, gid);
50775 +
50776 + /* perform subject lookup in possibly new role
50777 + we can use this result below in the case where role == task->role
50778 + */
50779 + subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
50780 +
50781 + /* if we changed uid/gid, but result in the same role
50782 + and are using inheritance, don't lose the inherited subject
50783 + if current subject is other than what normal lookup
50784 + would result in, we arrived via inheritance, don't
50785 + lose subject
50786 + */
50787 + if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
50788 + (subj == task->acl)))
50789 + task->acl = subj;
50790 +
50791 + task->role = role;
50792 +
50793 + task->is_writable = 0;
50794 +
50795 + /* ignore additional mmap checks for processes that are writable
50796 + by the default ACL */
50797 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
50798 + if (unlikely(obj->mode & GR_WRITE))
50799 + task->is_writable = 1;
50800 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
50801 + if (unlikely(obj->mode & GR_WRITE))
50802 + task->is_writable = 1;
50803 +
50804 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
50805 + printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
50806 +#endif
50807 +
50808 + gr_set_proc_res(task);
50809 +
50810 + return;
50811 +}
50812 +
50813 +int
50814 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
50815 + const int unsafe_flags)
50816 +{
50817 + struct task_struct *task = current;
50818 + struct acl_subject_label *newacl;
50819 + struct acl_object_label *obj;
50820 + __u32 retmode;
50821 +
50822 + if (unlikely(!(gr_status & GR_READY)))
50823 + return 0;
50824 +
50825 + newacl = chk_subj_label(dentry, mnt, task->role);
50826 +
50827 + task_lock(task);
50828 + if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
50829 + !(task->role->roletype & GR_ROLE_GOD) &&
50830 + !gr_search_file(dentry, GR_PTRACERD, mnt) &&
50831 + !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
50832 + task_unlock(task);
50833 + if (unsafe_flags & LSM_UNSAFE_SHARE)
50834 + gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
50835 + else
50836 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
50837 + return -EACCES;
50838 + }
50839 + task_unlock(task);
50840 +
50841 + obj = chk_obj_label(dentry, mnt, task->acl);
50842 + retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
50843 +
50844 + if (!(task->acl->mode & GR_INHERITLEARN) &&
50845 + ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
50846 + if (obj->nested)
50847 + task->acl = obj->nested;
50848 + else
50849 + task->acl = newacl;
50850 + } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
50851 + gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
50852 +
50853 + task->is_writable = 0;
50854 +
50855 + /* ignore additional mmap checks for processes that are writable
50856 + by the default ACL */
50857 + obj = chk_obj_label(dentry, mnt, default_role->root_label);
50858 + if (unlikely(obj->mode & GR_WRITE))
50859 + task->is_writable = 1;
50860 + obj = chk_obj_label(dentry, mnt, task->role->root_label);
50861 + if (unlikely(obj->mode & GR_WRITE))
50862 + task->is_writable = 1;
50863 +
50864 + gr_set_proc_res(task);
50865 +
50866 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
50867 + printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
50868 +#endif
50869 + return 0;
50870 +}
50871 +
50872 +/* always called with valid inodev ptr */
50873 +static void
50874 +do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
50875 +{
50876 + struct acl_object_label *matchpo;
50877 + struct acl_subject_label *matchps;
50878 + struct acl_subject_label *subj;
50879 + struct acl_role_label *role;
50880 + unsigned int x;
50881 +
50882 + FOR_EACH_ROLE_START(role)
50883 + FOR_EACH_SUBJECT_START(role, subj, x)
50884 + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
50885 + matchpo->mode |= GR_DELETED;
50886 + FOR_EACH_SUBJECT_END(subj,x)
50887 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
50888 + if (subj->inode == ino && subj->device == dev)
50889 + subj->mode |= GR_DELETED;
50890 + FOR_EACH_NESTED_SUBJECT_END(subj)
50891 + if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
50892 + matchps->mode |= GR_DELETED;
50893 + FOR_EACH_ROLE_END(role)
50894 +
50895 + inodev->nentry->deleted = 1;
50896 +
50897 + return;
50898 +}
50899 +
50900 +void
50901 +gr_handle_delete(const ino_t ino, const dev_t dev)
50902 +{
50903 + struct inodev_entry *inodev;
50904 +
50905 + if (unlikely(!(gr_status & GR_READY)))
50906 + return;
50907 +
50908 + write_lock(&gr_inode_lock);
50909 + inodev = lookup_inodev_entry(ino, dev);
50910 + if (inodev != NULL)
50911 + do_handle_delete(inodev, ino, dev);
50912 + write_unlock(&gr_inode_lock);
50913 +
50914 + return;
50915 +}
50916 +
50917 +static void
50918 +update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
50919 + const ino_t newinode, const dev_t newdevice,
50920 + struct acl_subject_label *subj)
50921 +{
50922 + unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
50923 + struct acl_object_label *match;
50924 +
50925 + match = subj->obj_hash[index];
50926 +
50927 + while (match && (match->inode != oldinode ||
50928 + match->device != olddevice ||
50929 + !(match->mode & GR_DELETED)))
50930 + match = match->next;
50931 +
50932 + if (match && (match->inode == oldinode)
50933 + && (match->device == olddevice)
50934 + && (match->mode & GR_DELETED)) {
50935 + if (match->prev == NULL) {
50936 + subj->obj_hash[index] = match->next;
50937 + if (match->next != NULL)
50938 + match->next->prev = NULL;
50939 + } else {
50940 + match->prev->next = match->next;
50941 + if (match->next != NULL)
50942 + match->next->prev = match->prev;
50943 + }
50944 + match->prev = NULL;
50945 + match->next = NULL;
50946 + match->inode = newinode;
50947 + match->device = newdevice;
50948 + match->mode &= ~GR_DELETED;
50949 +
50950 + insert_acl_obj_label(match, subj);
50951 + }
50952 +
50953 + return;
50954 +}
50955 +
50956 +static void
50957 +update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
50958 + const ino_t newinode, const dev_t newdevice,
50959 + struct acl_role_label *role)
50960 +{
50961 + unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
50962 + struct acl_subject_label *match;
50963 +
50964 + match = role->subj_hash[index];
50965 +
50966 + while (match && (match->inode != oldinode ||
50967 + match->device != olddevice ||
50968 + !(match->mode & GR_DELETED)))
50969 + match = match->next;
50970 +
50971 + if (match && (match->inode == oldinode)
50972 + && (match->device == olddevice)
50973 + && (match->mode & GR_DELETED)) {
50974 + if (match->prev == NULL) {
50975 + role->subj_hash[index] = match->next;
50976 + if (match->next != NULL)
50977 + match->next->prev = NULL;
50978 + } else {
50979 + match->prev->next = match->next;
50980 + if (match->next != NULL)
50981 + match->next->prev = match->prev;
50982 + }
50983 + match->prev = NULL;
50984 + match->next = NULL;
50985 + match->inode = newinode;
50986 + match->device = newdevice;
50987 + match->mode &= ~GR_DELETED;
50988 +
50989 + insert_acl_subj_label(match, role);
50990 + }
50991 +
50992 + return;
50993 +}
50994 +
50995 +static void
50996 +update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
50997 + const ino_t newinode, const dev_t newdevice)
50998 +{
50999 + unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
51000 + struct inodev_entry *match;
51001 +
51002 + match = inodev_set.i_hash[index];
51003 +
51004 + while (match && (match->nentry->inode != oldinode ||
51005 + match->nentry->device != olddevice || !match->nentry->deleted))
51006 + match = match->next;
51007 +
51008 + if (match && (match->nentry->inode == oldinode)
51009 + && (match->nentry->device == olddevice) &&
51010 + match->nentry->deleted) {
51011 + if (match->prev == NULL) {
51012 + inodev_set.i_hash[index] = match->next;
51013 + if (match->next != NULL)
51014 + match->next->prev = NULL;
51015 + } else {
51016 + match->prev->next = match->next;
51017 + if (match->next != NULL)
51018 + match->next->prev = match->prev;
51019 + }
51020 + match->prev = NULL;
51021 + match->next = NULL;
51022 + match->nentry->inode = newinode;
51023 + match->nentry->device = newdevice;
51024 + match->nentry->deleted = 0;
51025 +
51026 + insert_inodev_entry(match);
51027 + }
51028 +
51029 + return;
51030 +}
51031 +
51032 +static void
51033 +__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
51034 +{
51035 + struct acl_subject_label *subj;
51036 + struct acl_role_label *role;
51037 + unsigned int x;
51038 +
51039 + FOR_EACH_ROLE_START(role)
51040 + update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
51041 +
51042 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
51043 + if ((subj->inode == ino) && (subj->device == dev)) {
51044 + subj->inode = ino;
51045 + subj->device = dev;
51046 + }
51047 + FOR_EACH_NESTED_SUBJECT_END(subj)
51048 + FOR_EACH_SUBJECT_START(role, subj, x)
51049 + update_acl_obj_label(matchn->inode, matchn->device,
51050 + ino, dev, subj);
51051 + FOR_EACH_SUBJECT_END(subj,x)
51052 + FOR_EACH_ROLE_END(role)
51053 +
51054 + update_inodev_entry(matchn->inode, matchn->device, ino, dev);
51055 +
51056 + return;
51057 +}
51058 +
51059 +static void
51060 +do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
51061 + const struct vfsmount *mnt)
51062 +{
51063 + ino_t ino = dentry->d_inode->i_ino;
51064 + dev_t dev = __get_dev(dentry);
51065 +
51066 + __do_handle_create(matchn, ino, dev);
51067 +
51068 + return;
51069 +}
51070 +
51071 +void
51072 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
51073 +{
51074 + struct name_entry *matchn;
51075 +
51076 + if (unlikely(!(gr_status & GR_READY)))
51077 + return;
51078 +
51079 + preempt_disable();
51080 + matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
51081 +
51082 + if (unlikely((unsigned long)matchn)) {
51083 + write_lock(&gr_inode_lock);
51084 + do_handle_create(matchn, dentry, mnt);
51085 + write_unlock(&gr_inode_lock);
51086 + }
51087 + preempt_enable();
51088 +
51089 + return;
51090 +}
51091 +
51092 +void
51093 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
51094 +{
51095 + struct name_entry *matchn;
51096 +
51097 + if (unlikely(!(gr_status & GR_READY)))
51098 + return;
51099 +
51100 + preempt_disable();
51101 + matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
51102 +
51103 + if (unlikely((unsigned long)matchn)) {
51104 + write_lock(&gr_inode_lock);
51105 + __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
51106 + write_unlock(&gr_inode_lock);
51107 + }
51108 + preempt_enable();
51109 +
51110 + return;
51111 +}
51112 +
51113 +void
51114 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
51115 + struct dentry *old_dentry,
51116 + struct dentry *new_dentry,
51117 + struct vfsmount *mnt, const __u8 replace)
51118 +{
51119 + struct name_entry *matchn;
51120 + struct inodev_entry *inodev;
51121 + struct inode *inode = new_dentry->d_inode;
51122 + ino_t old_ino = old_dentry->d_inode->i_ino;
51123 + dev_t old_dev = __get_dev(old_dentry);
51124 +
51125 + /* vfs_rename swaps the name and parent link for old_dentry and
51126 + new_dentry
51127 + at this point, old_dentry has the new name, parent link, and inode
51128 + for the renamed file
51129 + if a file is being replaced by a rename, new_dentry has the inode
51130 + and name for the replaced file
51131 + */
51132 +
51133 + if (unlikely(!(gr_status & GR_READY)))
51134 + return;
51135 +
51136 + preempt_disable();
51137 + matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
51138 +
51139 + /* we wouldn't have to check d_inode if it weren't for
51140 + NFS silly-renaming
51141 + */
51142 +
51143 + write_lock(&gr_inode_lock);
51144 + if (unlikely(replace && inode)) {
51145 + ino_t new_ino = inode->i_ino;
51146 + dev_t new_dev = __get_dev(new_dentry);
51147 +
51148 + inodev = lookup_inodev_entry(new_ino, new_dev);
51149 + if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
51150 + do_handle_delete(inodev, new_ino, new_dev);
51151 + }
51152 +
51153 + inodev = lookup_inodev_entry(old_ino, old_dev);
51154 + if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
51155 + do_handle_delete(inodev, old_ino, old_dev);
51156 +
51157 + if (unlikely((unsigned long)matchn))
51158 + do_handle_create(matchn, old_dentry, mnt);
51159 +
51160 + write_unlock(&gr_inode_lock);
51161 + preempt_enable();
51162 +
51163 + return;
51164 +}
51165 +
51166 +static int
51167 +lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
51168 + unsigned char **sum)
51169 +{
51170 + struct acl_role_label *r;
51171 + struct role_allowed_ip *ipp;
51172 + struct role_transition *trans;
51173 + unsigned int i;
51174 + int found = 0;
51175 + u32 curr_ip = current->signal->curr_ip;
51176 +
51177 + current->signal->saved_ip = curr_ip;
51178 +
51179 + /* check transition table */
51180 +
51181 + for (trans = current->role->transitions; trans; trans = trans->next) {
51182 + if (!strcmp(rolename, trans->rolename)) {
51183 + found = 1;
51184 + break;
51185 + }
51186 + }
51187 +
51188 + if (!found)
51189 + return 0;
51190 +
51191 + /* handle special roles that do not require authentication
51192 + and check ip */
51193 +
51194 + FOR_EACH_ROLE_START(r)
51195 + if (!strcmp(rolename, r->rolename) &&
51196 + (r->roletype & GR_ROLE_SPECIAL)) {
51197 + found = 0;
51198 + if (r->allowed_ips != NULL) {
51199 + for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
51200 + if ((ntohl(curr_ip) & ipp->netmask) ==
51201 + (ntohl(ipp->addr) & ipp->netmask))
51202 + found = 1;
51203 + }
51204 + } else
51205 + found = 2;
51206 + if (!found)
51207 + return 0;
51208 +
51209 + if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
51210 + ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
51211 + *salt = NULL;
51212 + *sum = NULL;
51213 + return 1;
51214 + }
51215 + }
51216 + FOR_EACH_ROLE_END(r)
51217 +
51218 + for (i = 0; i < num_sprole_pws; i++) {
51219 + if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
51220 + *salt = acl_special_roles[i]->salt;
51221 + *sum = acl_special_roles[i]->sum;
51222 + return 1;
51223 + }
51224 + }
51225 +
51226 + return 0;
51227 +}
51228 +
51229 +static void
51230 +assign_special_role(char *rolename)
51231 +{
51232 + struct acl_object_label *obj;
51233 + struct acl_role_label *r;
51234 + struct acl_role_label *assigned = NULL;
51235 + struct task_struct *tsk;
51236 + struct file *filp;
51237 +
51238 + FOR_EACH_ROLE_START(r)
51239 + if (!strcmp(rolename, r->rolename) &&
51240 + (r->roletype & GR_ROLE_SPECIAL)) {
51241 + assigned = r;
51242 + break;
51243 + }
51244 + FOR_EACH_ROLE_END(r)
51245 +
51246 + if (!assigned)
51247 + return;
51248 +
51249 + read_lock(&tasklist_lock);
51250 + read_lock(&grsec_exec_file_lock);
51251 +
51252 + tsk = current->real_parent;
51253 + if (tsk == NULL)
51254 + goto out_unlock;
51255 +
51256 + filp = tsk->exec_file;
51257 + if (filp == NULL)
51258 + goto out_unlock;
51259 +
51260 + tsk->is_writable = 0;
51261 +
51262 + tsk->acl_sp_role = 1;
51263 + tsk->acl_role_id = ++acl_sp_role_value;
51264 + tsk->role = assigned;
51265 + tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
51266 +
51267 + /* ignore additional mmap checks for processes that are writable
51268 + by the default ACL */
51269 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
51270 + if (unlikely(obj->mode & GR_WRITE))
51271 + tsk->is_writable = 1;
51272 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
51273 + if (unlikely(obj->mode & GR_WRITE))
51274 + tsk->is_writable = 1;
51275 +
51276 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
51277 + printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
51278 +#endif
51279 +
51280 +out_unlock:
51281 + read_unlock(&grsec_exec_file_lock);
51282 + read_unlock(&tasklist_lock);
51283 + return;
51284 +}
51285 +
51286 +int gr_check_secure_terminal(struct task_struct *task)
51287 +{
51288 + struct task_struct *p, *p2, *p3;
51289 + struct files_struct *files;
51290 + struct fdtable *fdt;
51291 + struct file *our_file = NULL, *file;
51292 + int i;
51293 +
51294 + if (task->signal->tty == NULL)
51295 + return 1;
51296 +
51297 + files = get_files_struct(task);
51298 + if (files != NULL) {
51299 + rcu_read_lock();
51300 + fdt = files_fdtable(files);
51301 + for (i=0; i < fdt->max_fds; i++) {
51302 + file = fcheck_files(files, i);
51303 + if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
51304 + get_file(file);
51305 + our_file = file;
51306 + }
51307 + }
51308 + rcu_read_unlock();
51309 + put_files_struct(files);
51310 + }
51311 +
51312 + if (our_file == NULL)
51313 + return 1;
51314 +
51315 + read_lock(&tasklist_lock);
51316 + do_each_thread(p2, p) {
51317 + files = get_files_struct(p);
51318 + if (files == NULL ||
51319 + (p->signal && p->signal->tty == task->signal->tty)) {
51320 + if (files != NULL)
51321 + put_files_struct(files);
51322 + continue;
51323 + }
51324 + rcu_read_lock();
51325 + fdt = files_fdtable(files);
51326 + for (i=0; i < fdt->max_fds; i++) {
51327 + file = fcheck_files(files, i);
51328 + if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
51329 + file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
51330 + p3 = task;
51331 + while (p3->pid > 0) {
51332 + if (p3 == p)
51333 + break;
51334 + p3 = p3->real_parent;
51335 + }
51336 + if (p3 == p)
51337 + break;
51338 + gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
51339 + gr_handle_alertkill(p);
51340 + rcu_read_unlock();
51341 + put_files_struct(files);
51342 + read_unlock(&tasklist_lock);
51343 + fput(our_file);
51344 + return 0;
51345 + }
51346 + }
51347 + rcu_read_unlock();
51348 + put_files_struct(files);
51349 + } while_each_thread(p2, p);
51350 + read_unlock(&tasklist_lock);
51351 +
51352 + fput(our_file);
51353 + return 1;
51354 +}
51355 +
51356 +ssize_t
51357 +write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
51358 +{
51359 + struct gr_arg_wrapper uwrap;
51360 + unsigned char *sprole_salt = NULL;
51361 + unsigned char *sprole_sum = NULL;
51362 + int error = sizeof (struct gr_arg_wrapper);
51363 + int error2 = 0;
51364 +
51365 + mutex_lock(&gr_dev_mutex);
51366 +
51367 + if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
51368 + error = -EPERM;
51369 + goto out;
51370 + }
51371 +
51372 + if (count != sizeof (struct gr_arg_wrapper)) {
51373 + gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
51374 + error = -EINVAL;
51375 + goto out;
51376 + }
51377 +
51378 +
51379 + if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
51380 + gr_auth_expires = 0;
51381 + gr_auth_attempts = 0;
51382 + }
51383 +
51384 + if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
51385 + error = -EFAULT;
51386 + goto out;
51387 + }
51388 +
51389 + if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
51390 + error = -EINVAL;
51391 + goto out;
51392 + }
51393 +
51394 + if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
51395 + error = -EFAULT;
51396 + goto out;
51397 + }
51398 +
51399 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
51400 + gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
51401 + time_after(gr_auth_expires, get_seconds())) {
51402 + error = -EBUSY;
51403 + goto out;
51404 + }
51405 +
51406 + /* if non-root trying to do anything other than use a special role,
51407 + do not attempt authentication, do not count towards authentication
51408 + locking
51409 + */
51410 +
51411 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
51412 + gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
51413 + current_uid()) {
51414 + error = -EPERM;
51415 + goto out;
51416 + }
51417 +
51418 + /* ensure pw and special role name are null terminated */
51419 +
51420 + gr_usermode->pw[GR_PW_LEN - 1] = '\0';
51421 + gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
51422 +
51423 + /* Okay.
51424 + * We have our enough of the argument structure..(we have yet
51425 + * to copy_from_user the tables themselves) . Copy the tables
51426 + * only if we need them, i.e. for loading operations. */
51427 +
51428 + switch (gr_usermode->mode) {
51429 + case GR_STATUS:
51430 + if (gr_status & GR_READY) {
51431 + error = 1;
51432 + if (!gr_check_secure_terminal(current))
51433 + error = 3;
51434 + } else
51435 + error = 2;
51436 + goto out;
51437 + case GR_SHUTDOWN:
51438 + if ((gr_status & GR_READY)
51439 + && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
51440 + pax_open_kernel();
51441 + gr_status &= ~GR_READY;
51442 + pax_close_kernel();
51443 +
51444 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
51445 + free_variables();
51446 + memset(gr_usermode, 0, sizeof (struct gr_arg));
51447 + memset(gr_system_salt, 0, GR_SALT_LEN);
51448 + memset(gr_system_sum, 0, GR_SHA_LEN);
51449 + } else if (gr_status & GR_READY) {
51450 + gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
51451 + error = -EPERM;
51452 + } else {
51453 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
51454 + error = -EAGAIN;
51455 + }
51456 + break;
51457 + case GR_ENABLE:
51458 + if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
51459 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
51460 + else {
51461 + if (gr_status & GR_READY)
51462 + error = -EAGAIN;
51463 + else
51464 + error = error2;
51465 + gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
51466 + }
51467 + break;
51468 + case GR_RELOAD:
51469 + if (!(gr_status & GR_READY)) {
51470 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
51471 + error = -EAGAIN;
51472 + } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
51473 + preempt_disable();
51474 +
51475 + pax_open_kernel();
51476 + gr_status &= ~GR_READY;
51477 + pax_close_kernel();
51478 +
51479 + free_variables();
51480 + if (!(error2 = gracl_init(gr_usermode))) {
51481 + preempt_enable();
51482 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
51483 + } else {
51484 + preempt_enable();
51485 + error = error2;
51486 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
51487 + }
51488 + } else {
51489 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
51490 + error = -EPERM;
51491 + }
51492 + break;
51493 + case GR_SEGVMOD:
51494 + if (unlikely(!(gr_status & GR_READY))) {
51495 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
51496 + error = -EAGAIN;
51497 + break;
51498 + }
51499 +
51500 + if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
51501 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
51502 + if (gr_usermode->segv_device && gr_usermode->segv_inode) {
51503 + struct acl_subject_label *segvacl;
51504 + segvacl =
51505 + lookup_acl_subj_label(gr_usermode->segv_inode,
51506 + gr_usermode->segv_device,
51507 + current->role);
51508 + if (segvacl) {
51509 + segvacl->crashes = 0;
51510 + segvacl->expires = 0;
51511 + }
51512 + } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
51513 + gr_remove_uid(gr_usermode->segv_uid);
51514 + }
51515 + } else {
51516 + gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
51517 + error = -EPERM;
51518 + }
51519 + break;
51520 + case GR_SPROLE:
51521 + case GR_SPROLEPAM:
51522 + if (unlikely(!(gr_status & GR_READY))) {
51523 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
51524 + error = -EAGAIN;
51525 + break;
51526 + }
51527 +
51528 + if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
51529 + current->role->expires = 0;
51530 + current->role->auth_attempts = 0;
51531 + }
51532 +
51533 + if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
51534 + time_after(current->role->expires, get_seconds())) {
51535 + error = -EBUSY;
51536 + goto out;
51537 + }
51538 +
51539 + if (lookup_special_role_auth
51540 + (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
51541 + && ((!sprole_salt && !sprole_sum)
51542 + || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
51543 + char *p = "";
51544 + assign_special_role(gr_usermode->sp_role);
51545 + read_lock(&tasklist_lock);
51546 + if (current->real_parent)
51547 + p = current->real_parent->role->rolename;
51548 + read_unlock(&tasklist_lock);
51549 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
51550 + p, acl_sp_role_value);
51551 + } else {
51552 + gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
51553 + error = -EPERM;
51554 + if(!(current->role->auth_attempts++))
51555 + current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
51556 +
51557 + goto out;
51558 + }
51559 + break;
51560 + case GR_UNSPROLE:
51561 + if (unlikely(!(gr_status & GR_READY))) {
51562 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
51563 + error = -EAGAIN;
51564 + break;
51565 + }
51566 +
51567 + if (current->role->roletype & GR_ROLE_SPECIAL) {
51568 + char *p = "";
51569 + int i = 0;
51570 +
51571 + read_lock(&tasklist_lock);
51572 + if (current->real_parent) {
51573 + p = current->real_parent->role->rolename;
51574 + i = current->real_parent->acl_role_id;
51575 + }
51576 + read_unlock(&tasklist_lock);
51577 +
51578 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
51579 + gr_set_acls(1);
51580 + } else {
51581 + error = -EPERM;
51582 + goto out;
51583 + }
51584 + break;
51585 + default:
51586 + gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
51587 + error = -EINVAL;
51588 + break;
51589 + }
51590 +
51591 + if (error != -EPERM)
51592 + goto out;
51593 +
51594 + if(!(gr_auth_attempts++))
51595 + gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
51596 +
51597 + out:
51598 + mutex_unlock(&gr_dev_mutex);
51599 + return error;
51600 +}
51601 +
51602 +/* must be called with
51603 + rcu_read_lock();
51604 + read_lock(&tasklist_lock);
51605 + read_lock(&grsec_exec_file_lock);
51606 +*/
51607 +int gr_apply_subject_to_task(struct task_struct *task)
51608 +{
51609 + struct acl_object_label *obj;
51610 + char *tmpname;
51611 + struct acl_subject_label *tmpsubj;
51612 + struct file *filp;
51613 + struct name_entry *nmatch;
51614 +
51615 + filp = task->exec_file;
51616 + if (filp == NULL)
51617 + return 0;
51618 +
51619 + /* the following is to apply the correct subject
51620 + on binaries running when the RBAC system
51621 + is enabled, when the binaries have been
51622 + replaced or deleted since their execution
51623 + -----
51624 + when the RBAC system starts, the inode/dev
51625 + from exec_file will be one the RBAC system
51626 + is unaware of. It only knows the inode/dev
51627 + of the present file on disk, or the absence
51628 + of it.
51629 + */
51630 + preempt_disable();
51631 + tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
51632 +
51633 + nmatch = lookup_name_entry(tmpname);
51634 + preempt_enable();
51635 + tmpsubj = NULL;
51636 + if (nmatch) {
51637 + if (nmatch->deleted)
51638 + tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
51639 + else
51640 + tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
51641 + if (tmpsubj != NULL)
51642 + task->acl = tmpsubj;
51643 + }
51644 + if (tmpsubj == NULL)
51645 + task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
51646 + task->role);
51647 + if (task->acl) {
51648 + task->is_writable = 0;
51649 + /* ignore additional mmap checks for processes that are writable
51650 + by the default ACL */
51651 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
51652 + if (unlikely(obj->mode & GR_WRITE))
51653 + task->is_writable = 1;
51654 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
51655 + if (unlikely(obj->mode & GR_WRITE))
51656 + task->is_writable = 1;
51657 +
51658 + gr_set_proc_res(task);
51659 +
51660 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
51661 + printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
51662 +#endif
51663 + } else {
51664 + return 1;
51665 + }
51666 +
51667 + return 0;
51668 +}
51669 +
51670 +int
51671 +gr_set_acls(const int type)
51672 +{
51673 + struct task_struct *task, *task2;
51674 + struct acl_role_label *role = current->role;
51675 + __u16 acl_role_id = current->acl_role_id;
51676 + const struct cred *cred;
51677 + int ret;
51678 +
51679 + rcu_read_lock();
51680 + read_lock(&tasklist_lock);
51681 + read_lock(&grsec_exec_file_lock);
51682 + do_each_thread(task2, task) {
51683 + /* check to see if we're called from the exit handler,
51684 + if so, only replace ACLs that have inherited the admin
51685 + ACL */
51686 +
51687 + if (type && (task->role != role ||
51688 + task->acl_role_id != acl_role_id))
51689 + continue;
51690 +
51691 + task->acl_role_id = 0;
51692 + task->acl_sp_role = 0;
51693 +
51694 + if (task->exec_file) {
51695 + cred = __task_cred(task);
51696 + task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
51697 + ret = gr_apply_subject_to_task(task);
51698 + if (ret) {
51699 + read_unlock(&grsec_exec_file_lock);
51700 + read_unlock(&tasklist_lock);
51701 + rcu_read_unlock();
51702 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
51703 + return ret;
51704 + }
51705 + } else {
51706 + // it's a kernel process
51707 + task->role = kernel_role;
51708 + task->acl = kernel_role->root_label;
51709 +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
51710 + task->acl->mode &= ~GR_PROCFIND;
51711 +#endif
51712 + }
51713 + } while_each_thread(task2, task);
51714 + read_unlock(&grsec_exec_file_lock);
51715 + read_unlock(&tasklist_lock);
51716 + rcu_read_unlock();
51717 +
51718 + return 0;
51719 +}
51720 +
51721 +void
51722 +gr_learn_resource(const struct task_struct *task,
51723 + const int res, const unsigned long wanted, const int gt)
51724 +{
51725 + struct acl_subject_label *acl;
51726 + const struct cred *cred;
51727 +
51728 + if (unlikely((gr_status & GR_READY) &&
51729 + task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
51730 + goto skip_reslog;
51731 +
51732 +#ifdef CONFIG_GRKERNSEC_RESLOG
51733 + gr_log_resource(task, res, wanted, gt);
51734 +#endif
51735 + skip_reslog:
51736 +
51737 + if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
51738 + return;
51739 +
51740 + acl = task->acl;
51741 +
51742 + if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
51743 + !(acl->resmask & (1 << (unsigned short) res))))
51744 + return;
51745 +
51746 + if (wanted >= acl->res[res].rlim_cur) {
51747 + unsigned long res_add;
51748 +
51749 + res_add = wanted;
51750 + switch (res) {
51751 + case RLIMIT_CPU:
51752 + res_add += GR_RLIM_CPU_BUMP;
51753 + break;
51754 + case RLIMIT_FSIZE:
51755 + res_add += GR_RLIM_FSIZE_BUMP;
51756 + break;
51757 + case RLIMIT_DATA:
51758 + res_add += GR_RLIM_DATA_BUMP;
51759 + break;
51760 + case RLIMIT_STACK:
51761 + res_add += GR_RLIM_STACK_BUMP;
51762 + break;
51763 + case RLIMIT_CORE:
51764 + res_add += GR_RLIM_CORE_BUMP;
51765 + break;
51766 + case RLIMIT_RSS:
51767 + res_add += GR_RLIM_RSS_BUMP;
51768 + break;
51769 + case RLIMIT_NPROC:
51770 + res_add += GR_RLIM_NPROC_BUMP;
51771 + break;
51772 + case RLIMIT_NOFILE:
51773 + res_add += GR_RLIM_NOFILE_BUMP;
51774 + break;
51775 + case RLIMIT_MEMLOCK:
51776 + res_add += GR_RLIM_MEMLOCK_BUMP;
51777 + break;
51778 + case RLIMIT_AS:
51779 + res_add += GR_RLIM_AS_BUMP;
51780 + break;
51781 + case RLIMIT_LOCKS:
51782 + res_add += GR_RLIM_LOCKS_BUMP;
51783 + break;
51784 + case RLIMIT_SIGPENDING:
51785 + res_add += GR_RLIM_SIGPENDING_BUMP;
51786 + break;
51787 + case RLIMIT_MSGQUEUE:
51788 + res_add += GR_RLIM_MSGQUEUE_BUMP;
51789 + break;
51790 + case RLIMIT_NICE:
51791 + res_add += GR_RLIM_NICE_BUMP;
51792 + break;
51793 + case RLIMIT_RTPRIO:
51794 + res_add += GR_RLIM_RTPRIO_BUMP;
51795 + break;
51796 + case RLIMIT_RTTIME:
51797 + res_add += GR_RLIM_RTTIME_BUMP;
51798 + break;
51799 + }
51800 +
51801 + acl->res[res].rlim_cur = res_add;
51802 +
51803 + if (wanted > acl->res[res].rlim_max)
51804 + acl->res[res].rlim_max = res_add;
51805 +
51806 + /* only log the subject filename, since resource logging is supported for
51807 + single-subject learning only */
51808 + rcu_read_lock();
51809 + cred = __task_cred(task);
51810 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
51811 + task->role->roletype, cred->uid, cred->gid, acl->filename,
51812 + acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
51813 + "", (unsigned long) res, &task->signal->saved_ip);
51814 + rcu_read_unlock();
51815 + }
51816 +
51817 + return;
51818 +}
51819 +
51820 +#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
51821 +void
51822 +pax_set_initial_flags(struct linux_binprm *bprm)
51823 +{
51824 + struct task_struct *task = current;
51825 + struct acl_subject_label *proc;
51826 + unsigned long flags;
51827 +
51828 + if (unlikely(!(gr_status & GR_READY)))
51829 + return;
51830 +
51831 + flags = pax_get_flags(task);
51832 +
51833 + proc = task->acl;
51834 +
51835 + if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
51836 + flags &= ~MF_PAX_PAGEEXEC;
51837 + if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
51838 + flags &= ~MF_PAX_SEGMEXEC;
51839 + if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
51840 + flags &= ~MF_PAX_RANDMMAP;
51841 + if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
51842 + flags &= ~MF_PAX_EMUTRAMP;
51843 + if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
51844 + flags &= ~MF_PAX_MPROTECT;
51845 +
51846 + if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
51847 + flags |= MF_PAX_PAGEEXEC;
51848 + if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
51849 + flags |= MF_PAX_SEGMEXEC;
51850 + if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
51851 + flags |= MF_PAX_RANDMMAP;
51852 + if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
51853 + flags |= MF_PAX_EMUTRAMP;
51854 + if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
51855 + flags |= MF_PAX_MPROTECT;
51856 +
51857 + pax_set_flags(task, flags);
51858 +
51859 + return;
51860 +}
51861 +#endif
51862 +
51863 +#ifdef CONFIG_SYSCTL
51864 +/* Eric Biederman likes breaking userland ABI and every inode-based security
51865 + system to save 35kb of memory */
51866 +
51867 +/* we modify the passed in filename, but adjust it back before returning */
51868 +static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
51869 +{
51870 + struct name_entry *nmatch;
51871 + char *p, *lastp = NULL;
51872 + struct acl_object_label *obj = NULL, *tmp;
51873 + struct acl_subject_label *tmpsubj;
51874 + char c = '\0';
51875 +
51876 + read_lock(&gr_inode_lock);
51877 +
51878 + p = name + len - 1;
51879 + do {
51880 + nmatch = lookup_name_entry(name);
51881 + if (lastp != NULL)
51882 + *lastp = c;
51883 +
51884 + if (nmatch == NULL)
51885 + goto next_component;
51886 + tmpsubj = current->acl;
51887 + do {
51888 + obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
51889 + if (obj != NULL) {
51890 + tmp = obj->globbed;
51891 + while (tmp) {
51892 + if (!glob_match(tmp->filename, name)) {
51893 + obj = tmp;
51894 + goto found_obj;
51895 + }
51896 + tmp = tmp->next;
51897 + }
51898 + goto found_obj;
51899 + }
51900 + } while ((tmpsubj = tmpsubj->parent_subject));
51901 +next_component:
51902 + /* end case */
51903 + if (p == name)
51904 + break;
51905 +
51906 + while (*p != '/')
51907 + p--;
51908 + if (p == name)
51909 + lastp = p + 1;
51910 + else {
51911 + lastp = p;
51912 + p--;
51913 + }
51914 + c = *lastp;
51915 + *lastp = '\0';
51916 + } while (1);
51917 +found_obj:
51918 + read_unlock(&gr_inode_lock);
51919 + /* obj returned will always be non-null */
51920 + return obj;
51921 +}
51922 +
51923 +/* returns 0 when allowing, non-zero on error
51924 + op of 0 is used for readdir, so we don't log the names of hidden files
51925 +*/
51926 +__u32
51927 +gr_handle_sysctl(const struct ctl_table *table, const int op)
51928 +{
51929 + struct ctl_table *tmp;
51930 + const char *proc_sys = "/proc/sys";
51931 + char *path;
51932 + struct acl_object_label *obj;
51933 + unsigned short len = 0, pos = 0, depth = 0, i;
51934 + __u32 err = 0;
51935 + __u32 mode = 0;
51936 +
51937 + if (unlikely(!(gr_status & GR_READY)))
51938 + return 0;
51939 +
51940 + /* for now, ignore operations on non-sysctl entries if it's not a
51941 + readdir*/
51942 + if (table->child != NULL && op != 0)
51943 + return 0;
51944 +
51945 + mode |= GR_FIND;
51946 + /* it's only a read if it's an entry, read on dirs is for readdir */
51947 + if (op & MAY_READ)
51948 + mode |= GR_READ;
51949 + if (op & MAY_WRITE)
51950 + mode |= GR_WRITE;
51951 +
51952 + preempt_disable();
51953 +
51954 + path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
51955 +
51956 + /* it's only a read/write if it's an actual entry, not a dir
51957 + (which are opened for readdir)
51958 + */
51959 +
51960 + /* convert the requested sysctl entry into a pathname */
51961 +
51962 + for (tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
51963 + len += strlen(tmp->procname);
51964 + len++;
51965 + depth++;
51966 + }
51967 +
51968 + if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
51969 + /* deny */
51970 + goto out;
51971 + }
51972 +
51973 + memset(path, 0, PAGE_SIZE);
51974 +
51975 + memcpy(path, proc_sys, strlen(proc_sys));
51976 +
51977 + pos += strlen(proc_sys);
51978 +
51979 + for (; depth > 0; depth--) {
51980 + path[pos] = '/';
51981 + pos++;
51982 + for (i = 1, tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
51983 + if (depth == i) {
51984 + memcpy(path + pos, tmp->procname,
51985 + strlen(tmp->procname));
51986 + pos += strlen(tmp->procname);
51987 + }
51988 + i++;
51989 + }
51990 + }
51991 +
51992 + obj = gr_lookup_by_name(path, pos);
51993 + err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
51994 +
51995 + if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
51996 + ((err & mode) != mode))) {
51997 + __u32 new_mode = mode;
51998 +
51999 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
52000 +
52001 + err = 0;
52002 + gr_log_learn_sysctl(path, new_mode);
52003 + } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
52004 + gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
52005 + err = -ENOENT;
52006 + } else if (!(err & GR_FIND)) {
52007 + err = -ENOENT;
52008 + } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
52009 + gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
52010 + path, (mode & GR_READ) ? " reading" : "",
52011 + (mode & GR_WRITE) ? " writing" : "");
52012 + err = -EACCES;
52013 + } else if ((err & mode) != mode) {
52014 + err = -EACCES;
52015 + } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
52016 + gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
52017 + path, (mode & GR_READ) ? " reading" : "",
52018 + (mode & GR_WRITE) ? " writing" : "");
52019 + err = 0;
52020 + } else
52021 + err = 0;
52022 +
52023 + out:
52024 + preempt_enable();
52025 +
52026 + return err;
52027 +}
52028 +#endif
52029 +
52030 +int
52031 +gr_handle_proc_ptrace(struct task_struct *task)
52032 +{
52033 + struct file *filp;
52034 + struct task_struct *tmp = task;
52035 + struct task_struct *curtemp = current;
52036 + __u32 retmode;
52037 +
52038 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
52039 + if (unlikely(!(gr_status & GR_READY)))
52040 + return 0;
52041 +#endif
52042 +
52043 + read_lock(&tasklist_lock);
52044 + read_lock(&grsec_exec_file_lock);
52045 + filp = task->exec_file;
52046 +
52047 + while (tmp->pid > 0) {
52048 + if (tmp == curtemp)
52049 + break;
52050 + tmp = tmp->real_parent;
52051 + }
52052 +
52053 + if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
52054 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
52055 + read_unlock(&grsec_exec_file_lock);
52056 + read_unlock(&tasklist_lock);
52057 + return 1;
52058 + }
52059 +
52060 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
52061 + if (!(gr_status & GR_READY)) {
52062 + read_unlock(&grsec_exec_file_lock);
52063 + read_unlock(&tasklist_lock);
52064 + return 0;
52065 + }
52066 +#endif
52067 +
52068 + retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
52069 + read_unlock(&grsec_exec_file_lock);
52070 + read_unlock(&tasklist_lock);
52071 +
52072 + if (retmode & GR_NOPTRACE)
52073 + return 1;
52074 +
52075 + if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
52076 + && (current->acl != task->acl || (current->acl != current->role->root_label
52077 + && current->pid != task->pid)))
52078 + return 1;
52079 +
52080 + return 0;
52081 +}
52082 +
52083 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
52084 +{
52085 + if (unlikely(!(gr_status & GR_READY)))
52086 + return;
52087 +
52088 + if (!(current->role->roletype & GR_ROLE_GOD))
52089 + return;
52090 +
52091 + seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
52092 + p->role->rolename, gr_task_roletype_to_char(p),
52093 + p->acl->filename);
52094 +}
52095 +
52096 +int
52097 +gr_handle_ptrace(struct task_struct *task, const long request)
52098 +{
52099 + struct task_struct *tmp = task;
52100 + struct task_struct *curtemp = current;
52101 + __u32 retmode;
52102 +
52103 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
52104 + if (unlikely(!(gr_status & GR_READY)))
52105 + return 0;
52106 +#endif
52107 +
52108 + read_lock(&tasklist_lock);
52109 + while (tmp->pid > 0) {
52110 + if (tmp == curtemp)
52111 + break;
52112 + tmp = tmp->real_parent;
52113 + }
52114 +
52115 + if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
52116 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
52117 + read_unlock(&tasklist_lock);
52118 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
52119 + return 1;
52120 + }
52121 + read_unlock(&tasklist_lock);
52122 +
52123 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
52124 + if (!(gr_status & GR_READY))
52125 + return 0;
52126 +#endif
52127 +
52128 + read_lock(&grsec_exec_file_lock);
52129 + if (unlikely(!task->exec_file)) {
52130 + read_unlock(&grsec_exec_file_lock);
52131 + return 0;
52132 + }
52133 +
52134 + retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
52135 + read_unlock(&grsec_exec_file_lock);
52136 +
52137 + if (retmode & GR_NOPTRACE) {
52138 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
52139 + return 1;
52140 + }
52141 +
52142 + if (retmode & GR_PTRACERD) {
52143 + switch (request) {
52144 + case PTRACE_SEIZE:
52145 + case PTRACE_POKETEXT:
52146 + case PTRACE_POKEDATA:
52147 + case PTRACE_POKEUSR:
52148 +#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
52149 + case PTRACE_SETREGS:
52150 + case PTRACE_SETFPREGS:
52151 +#endif
52152 +#ifdef CONFIG_X86
52153 + case PTRACE_SETFPXREGS:
52154 +#endif
52155 +#ifdef CONFIG_ALTIVEC
52156 + case PTRACE_SETVRREGS:
52157 +#endif
52158 + return 1;
52159 + default:
52160 + return 0;
52161 + }
52162 + } else if (!(current->acl->mode & GR_POVERRIDE) &&
52163 + !(current->role->roletype & GR_ROLE_GOD) &&
52164 + (current->acl != task->acl)) {
52165 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
52166 + return 1;
52167 + }
52168 +
52169 + return 0;
52170 +}
52171 +
52172 +static int is_writable_mmap(const struct file *filp)
52173 +{
52174 + struct task_struct *task = current;
52175 + struct acl_object_label *obj, *obj2;
52176 +
52177 + if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
52178 + !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
52179 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
52180 + obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
52181 + task->role->root_label);
52182 + if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
52183 + gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
52184 + return 1;
52185 + }
52186 + }
52187 + return 0;
52188 +}
52189 +
52190 +int
52191 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
52192 +{
52193 + __u32 mode;
52194 +
52195 + if (unlikely(!file || !(prot & PROT_EXEC)))
52196 + return 1;
52197 +
52198 + if (is_writable_mmap(file))
52199 + return 0;
52200 +
52201 + mode =
52202 + gr_search_file(file->f_path.dentry,
52203 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
52204 + file->f_path.mnt);
52205 +
52206 + if (!gr_tpe_allow(file))
52207 + return 0;
52208 +
52209 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
52210 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
52211 + return 0;
52212 + } else if (unlikely(!(mode & GR_EXEC))) {
52213 + return 0;
52214 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
52215 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
52216 + return 1;
52217 + }
52218 +
52219 + return 1;
52220 +}
52221 +
52222 +int
52223 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
52224 +{
52225 + __u32 mode;
52226 +
52227 + if (unlikely(!file || !(prot & PROT_EXEC)))
52228 + return 1;
52229 +
52230 + if (is_writable_mmap(file))
52231 + return 0;
52232 +
52233 + mode =
52234 + gr_search_file(file->f_path.dentry,
52235 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
52236 + file->f_path.mnt);
52237 +
52238 + if (!gr_tpe_allow(file))
52239 + return 0;
52240 +
52241 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
52242 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
52243 + return 0;
52244 + } else if (unlikely(!(mode & GR_EXEC))) {
52245 + return 0;
52246 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
52247 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
52248 + return 1;
52249 + }
52250 +
52251 + return 1;
52252 +}
52253 +
52254 +void
52255 +gr_acl_handle_psacct(struct task_struct *task, const long code)
52256 +{
52257 + unsigned long runtime;
52258 + unsigned long cputime;
52259 + unsigned int wday, cday;
52260 + __u8 whr, chr;
52261 + __u8 wmin, cmin;
52262 + __u8 wsec, csec;
52263 + struct timespec timeval;
52264 +
52265 + if (unlikely(!(gr_status & GR_READY) || !task->acl ||
52266 + !(task->acl->mode & GR_PROCACCT)))
52267 + return;
52268 +
52269 + do_posix_clock_monotonic_gettime(&timeval);
52270 + runtime = timeval.tv_sec - task->start_time.tv_sec;
52271 + wday = runtime / (3600 * 24);
52272 + runtime -= wday * (3600 * 24);
52273 + whr = runtime / 3600;
52274 + runtime -= whr * 3600;
52275 + wmin = runtime / 60;
52276 + runtime -= wmin * 60;
52277 + wsec = runtime;
52278 +
52279 + cputime = (task->utime + task->stime) / HZ;
52280 + cday = cputime / (3600 * 24);
52281 + cputime -= cday * (3600 * 24);
52282 + chr = cputime / 3600;
52283 + cputime -= chr * 3600;
52284 + cmin = cputime / 60;
52285 + cputime -= cmin * 60;
52286 + csec = cputime;
52287 +
52288 + gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
52289 +
52290 + return;
52291 +}
52292 +
52293 +void gr_set_kernel_label(struct task_struct *task)
52294 +{
52295 + if (gr_status & GR_READY) {
52296 + task->role = kernel_role;
52297 + task->acl = kernel_role->root_label;
52298 + }
52299 + return;
52300 +}
52301 +
52302 +#ifdef CONFIG_TASKSTATS
52303 +int gr_is_taskstats_denied(int pid)
52304 +{
52305 + struct task_struct *task;
52306 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52307 + const struct cred *cred;
52308 +#endif
52309 + int ret = 0;
52310 +
52311 + /* restrict taskstats viewing to un-chrooted root users
52312 + who have the 'view' subject flag if the RBAC system is enabled
52313 + */
52314 +
52315 + rcu_read_lock();
52316 + read_lock(&tasklist_lock);
52317 + task = find_task_by_vpid(pid);
52318 + if (task) {
52319 +#ifdef CONFIG_GRKERNSEC_CHROOT
52320 + if (proc_is_chrooted(task))
52321 + ret = -EACCES;
52322 +#endif
52323 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52324 + cred = __task_cred(task);
52325 +#ifdef CONFIG_GRKERNSEC_PROC_USER
52326 + if (cred->uid != 0)
52327 + ret = -EACCES;
52328 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52329 + if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
52330 + ret = -EACCES;
52331 +#endif
52332 +#endif
52333 + if (gr_status & GR_READY) {
52334 + if (!(task->acl->mode & GR_VIEW))
52335 + ret = -EACCES;
52336 + }
52337 + } else
52338 + ret = -ENOENT;
52339 +
52340 + read_unlock(&tasklist_lock);
52341 + rcu_read_unlock();
52342 +
52343 + return ret;
52344 +}
52345 +#endif
52346 +
52347 +/* AUXV entries are filled via a descendant of search_binary_handler
52348 + after we've already applied the subject for the target
52349 +*/
52350 +int gr_acl_enable_at_secure(void)
52351 +{
52352 + if (unlikely(!(gr_status & GR_READY)))
52353 + return 0;
52354 +
52355 + if (current->acl->mode & GR_ATSECURE)
52356 + return 1;
52357 +
52358 + return 0;
52359 +}
52360 +
52361 +int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
52362 +{
52363 + struct task_struct *task = current;
52364 + struct dentry *dentry = file->f_path.dentry;
52365 + struct vfsmount *mnt = file->f_path.mnt;
52366 + struct acl_object_label *obj, *tmp;
52367 + struct acl_subject_label *subj;
52368 + unsigned int bufsize;
52369 + int is_not_root;
52370 + char *path;
52371 + dev_t dev = __get_dev(dentry);
52372 +
52373 + if (unlikely(!(gr_status & GR_READY)))
52374 + return 1;
52375 +
52376 + if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
52377 + return 1;
52378 +
52379 + /* ignore Eric Biederman */
52380 + if (IS_PRIVATE(dentry->d_inode))
52381 + return 1;
52382 +
52383 + subj = task->acl;
52384 + do {
52385 + obj = lookup_acl_obj_label(ino, dev, subj);
52386 + if (obj != NULL)
52387 + return (obj->mode & GR_FIND) ? 1 : 0;
52388 + } while ((subj = subj->parent_subject));
52389 +
52390 + /* this is purely an optimization since we're looking for an object
52391 + for the directory we're doing a readdir on
52392 + if it's possible for any globbed object to match the entry we're
52393 + filling into the directory, then the object we find here will be
52394 + an anchor point with attached globbed objects
52395 + */
52396 + obj = chk_obj_label_noglob(dentry, mnt, task->acl);
52397 + if (obj->globbed == NULL)
52398 + return (obj->mode & GR_FIND) ? 1 : 0;
52399 +
52400 + is_not_root = ((obj->filename[0] == '/') &&
52401 + (obj->filename[1] == '\0')) ? 0 : 1;
52402 + bufsize = PAGE_SIZE - namelen - is_not_root;
52403 +
52404 + /* check bufsize > PAGE_SIZE || bufsize == 0 */
52405 + if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
52406 + return 1;
52407 +
52408 + preempt_disable();
52409 + path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
52410 + bufsize);
52411 +
52412 + bufsize = strlen(path);
52413 +
52414 + /* if base is "/", don't append an additional slash */
52415 + if (is_not_root)
52416 + *(path + bufsize) = '/';
52417 + memcpy(path + bufsize + is_not_root, name, namelen);
52418 + *(path + bufsize + namelen + is_not_root) = '\0';
52419 +
52420 + tmp = obj->globbed;
52421 + while (tmp) {
52422 + if (!glob_match(tmp->filename, path)) {
52423 + preempt_enable();
52424 + return (tmp->mode & GR_FIND) ? 1 : 0;
52425 + }
52426 + tmp = tmp->next;
52427 + }
52428 + preempt_enable();
52429 + return (obj->mode & GR_FIND) ? 1 : 0;
52430 +}
52431 +
52432 +#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
52433 +EXPORT_SYMBOL(gr_acl_is_enabled);
52434 +#endif
52435 +EXPORT_SYMBOL(gr_learn_resource);
52436 +EXPORT_SYMBOL(gr_set_kernel_label);
52437 +#ifdef CONFIG_SECURITY
52438 +EXPORT_SYMBOL(gr_check_user_change);
52439 +EXPORT_SYMBOL(gr_check_group_change);
52440 +#endif
52441 +
52442 diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
52443 new file mode 100644
52444 index 0000000..34fefda
52445 --- /dev/null
52446 +++ b/grsecurity/gracl_alloc.c
52447 @@ -0,0 +1,105 @@
52448 +#include <linux/kernel.h>
52449 +#include <linux/mm.h>
52450 +#include <linux/slab.h>
52451 +#include <linux/vmalloc.h>
52452 +#include <linux/gracl.h>
52453 +#include <linux/grsecurity.h>
52454 +
52455 +static unsigned long alloc_stack_next = 1;
52456 +static unsigned long alloc_stack_size = 1;
52457 +static void **alloc_stack;
52458 +
52459 +static __inline__ int
52460 +alloc_pop(void)
52461 +{
52462 + if (alloc_stack_next == 1)
52463 + return 0;
52464 +
52465 + kfree(alloc_stack[alloc_stack_next - 2]);
52466 +
52467 + alloc_stack_next--;
52468 +
52469 + return 1;
52470 +}
52471 +
52472 +static __inline__ int
52473 +alloc_push(void *buf)
52474 +{
52475 + if (alloc_stack_next >= alloc_stack_size)
52476 + return 1;
52477 +
52478 + alloc_stack[alloc_stack_next - 1] = buf;
52479 +
52480 + alloc_stack_next++;
52481 +
52482 + return 0;
52483 +}
52484 +
52485 +void *
52486 +acl_alloc(unsigned long len)
52487 +{
52488 + void *ret = NULL;
52489 +
52490 + if (!len || len > PAGE_SIZE)
52491 + goto out;
52492 +
52493 + ret = kmalloc(len, GFP_KERNEL);
52494 +
52495 + if (ret) {
52496 + if (alloc_push(ret)) {
52497 + kfree(ret);
52498 + ret = NULL;
52499 + }
52500 + }
52501 +
52502 +out:
52503 + return ret;
52504 +}
52505 +
52506 +void *
52507 +acl_alloc_num(unsigned long num, unsigned long len)
52508 +{
52509 + if (!len || (num > (PAGE_SIZE / len)))
52510 + return NULL;
52511 +
52512 + return acl_alloc(num * len);
52513 +}
52514 +
52515 +void
52516 +acl_free_all(void)
52517 +{
52518 + if (gr_acl_is_enabled() || !alloc_stack)
52519 + return;
52520 +
52521 + while (alloc_pop()) ;
52522 +
52523 + if (alloc_stack) {
52524 + if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
52525 + kfree(alloc_stack);
52526 + else
52527 + vfree(alloc_stack);
52528 + }
52529 +
52530 + alloc_stack = NULL;
52531 + alloc_stack_size = 1;
52532 + alloc_stack_next = 1;
52533 +
52534 + return;
52535 +}
52536 +
52537 +int
52538 +acl_alloc_stack_init(unsigned long size)
52539 +{
52540 + if ((size * sizeof (void *)) <= PAGE_SIZE)
52541 + alloc_stack =
52542 + (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
52543 + else
52544 + alloc_stack = (void **) vmalloc(size * sizeof (void *));
52545 +
52546 + alloc_stack_size = size;
52547 +
52548 + if (!alloc_stack)
52549 + return 0;
52550 + else
52551 + return 1;
52552 +}
52553 diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
52554 new file mode 100644
52555 index 0000000..955ddfb
52556 --- /dev/null
52557 +++ b/grsecurity/gracl_cap.c
52558 @@ -0,0 +1,101 @@
52559 +#include <linux/kernel.h>
52560 +#include <linux/module.h>
52561 +#include <linux/sched.h>
52562 +#include <linux/gracl.h>
52563 +#include <linux/grsecurity.h>
52564 +#include <linux/grinternal.h>
52565 +
52566 +extern const char *captab_log[];
52567 +extern int captab_log_entries;
52568 +
52569 +int
52570 +gr_acl_is_capable(const int cap)
52571 +{
52572 + struct task_struct *task = current;
52573 + const struct cred *cred = current_cred();
52574 + struct acl_subject_label *curracl;
52575 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
52576 + kernel_cap_t cap_audit = __cap_empty_set;
52577 +
52578 + if (!gr_acl_is_enabled())
52579 + return 1;
52580 +
52581 + curracl = task->acl;
52582 +
52583 + cap_drop = curracl->cap_lower;
52584 + cap_mask = curracl->cap_mask;
52585 + cap_audit = curracl->cap_invert_audit;
52586 +
52587 + while ((curracl = curracl->parent_subject)) {
52588 + /* if the cap isn't specified in the current computed mask but is specified in the
52589 + current level subject, and is lowered in the current level subject, then add
52590 + it to the set of dropped capabilities
52591 + otherwise, add the current level subject's mask to the current computed mask
52592 + */
52593 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
52594 + cap_raise(cap_mask, cap);
52595 + if (cap_raised(curracl->cap_lower, cap))
52596 + cap_raise(cap_drop, cap);
52597 + if (cap_raised(curracl->cap_invert_audit, cap))
52598 + cap_raise(cap_audit, cap);
52599 + }
52600 + }
52601 +
52602 + if (!cap_raised(cap_drop, cap)) {
52603 + if (cap_raised(cap_audit, cap))
52604 + gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
52605 + return 1;
52606 + }
52607 +
52608 + curracl = task->acl;
52609 +
52610 + if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
52611 + && cap_raised(cred->cap_effective, cap)) {
52612 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
52613 + task->role->roletype, cred->uid,
52614 + cred->gid, task->exec_file ?
52615 + gr_to_filename(task->exec_file->f_path.dentry,
52616 + task->exec_file->f_path.mnt) : curracl->filename,
52617 + curracl->filename, 0UL,
52618 + 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
52619 + return 1;
52620 + }
52621 +
52622 + if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
52623 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
52624 + return 0;
52625 +}
52626 +
52627 +int
52628 +gr_acl_is_capable_nolog(const int cap)
52629 +{
52630 + struct acl_subject_label *curracl;
52631 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
52632 +
52633 + if (!gr_acl_is_enabled())
52634 + return 1;
52635 +
52636 + curracl = current->acl;
52637 +
52638 + cap_drop = curracl->cap_lower;
52639 + cap_mask = curracl->cap_mask;
52640 +
52641 + while ((curracl = curracl->parent_subject)) {
52642 + /* if the cap isn't specified in the current computed mask but is specified in the
52643 + current level subject, and is lowered in the current level subject, then add
52644 + it to the set of dropped capabilities
52645 + otherwise, add the current level subject's mask to the current computed mask
52646 + */
52647 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
52648 + cap_raise(cap_mask, cap);
52649 + if (cap_raised(curracl->cap_lower, cap))
52650 + cap_raise(cap_drop, cap);
52651 + }
52652 + }
52653 +
52654 + if (!cap_raised(cap_drop, cap))
52655 + return 1;
52656 +
52657 + return 0;
52658 +}
52659 +
52660 diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
52661 new file mode 100644
52662 index 0000000..4eda5c3
52663 --- /dev/null
52664 +++ b/grsecurity/gracl_fs.c
52665 @@ -0,0 +1,433 @@
52666 +#include <linux/kernel.h>
52667 +#include <linux/sched.h>
52668 +#include <linux/types.h>
52669 +#include <linux/fs.h>
52670 +#include <linux/file.h>
52671 +#include <linux/stat.h>
52672 +#include <linux/grsecurity.h>
52673 +#include <linux/grinternal.h>
52674 +#include <linux/gracl.h>
52675 +
52676 +__u32
52677 +gr_acl_handle_hidden_file(const struct dentry * dentry,
52678 + const struct vfsmount * mnt)
52679 +{
52680 + __u32 mode;
52681 +
52682 + if (unlikely(!dentry->d_inode))
52683 + return GR_FIND;
52684 +
52685 + mode =
52686 + gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
52687 +
52688 + if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
52689 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
52690 + return mode;
52691 + } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
52692 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
52693 + return 0;
52694 + } else if (unlikely(!(mode & GR_FIND)))
52695 + return 0;
52696 +
52697 + return GR_FIND;
52698 +}
52699 +
52700 +__u32
52701 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
52702 + int acc_mode)
52703 +{
52704 + __u32 reqmode = GR_FIND;
52705 + __u32 mode;
52706 +
52707 + if (unlikely(!dentry->d_inode))
52708 + return reqmode;
52709 +
52710 + if (acc_mode & MAY_APPEND)
52711 + reqmode |= GR_APPEND;
52712 + else if (acc_mode & MAY_WRITE)
52713 + reqmode |= GR_WRITE;
52714 + if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
52715 + reqmode |= GR_READ;
52716 +
52717 + mode =
52718 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
52719 + mnt);
52720 +
52721 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
52722 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
52723 + reqmode & GR_READ ? " reading" : "",
52724 + reqmode & GR_WRITE ? " writing" : reqmode &
52725 + GR_APPEND ? " appending" : "");
52726 + return reqmode;
52727 + } else
52728 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
52729 + {
52730 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
52731 + reqmode & GR_READ ? " reading" : "",
52732 + reqmode & GR_WRITE ? " writing" : reqmode &
52733 + GR_APPEND ? " appending" : "");
52734 + return 0;
52735 + } else if (unlikely((mode & reqmode) != reqmode))
52736 + return 0;
52737 +
52738 + return reqmode;
52739 +}
52740 +
52741 +__u32
52742 +gr_acl_handle_creat(const struct dentry * dentry,
52743 + const struct dentry * p_dentry,
52744 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
52745 + const int imode)
52746 +{
52747 + __u32 reqmode = GR_WRITE | GR_CREATE;
52748 + __u32 mode;
52749 +
52750 + if (acc_mode & MAY_APPEND)
52751 + reqmode |= GR_APPEND;
52752 + // if a directory was required or the directory already exists, then
52753 + // don't count this open as a read
52754 + if ((acc_mode & MAY_READ) &&
52755 + !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
52756 + reqmode |= GR_READ;
52757 + if ((open_flags & O_CREAT) && (imode & (S_ISUID | S_ISGID)))
52758 + reqmode |= GR_SETID;
52759 +
52760 + mode =
52761 + gr_check_create(dentry, p_dentry, p_mnt,
52762 + reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
52763 +
52764 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
52765 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
52766 + reqmode & GR_READ ? " reading" : "",
52767 + reqmode & GR_WRITE ? " writing" : reqmode &
52768 + GR_APPEND ? " appending" : "");
52769 + return reqmode;
52770 + } else
52771 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
52772 + {
52773 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
52774 + reqmode & GR_READ ? " reading" : "",
52775 + reqmode & GR_WRITE ? " writing" : reqmode &
52776 + GR_APPEND ? " appending" : "");
52777 + return 0;
52778 + } else if (unlikely((mode & reqmode) != reqmode))
52779 + return 0;
52780 +
52781 + return reqmode;
52782 +}
52783 +
52784 +__u32
52785 +gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
52786 + const int fmode)
52787 +{
52788 + __u32 mode, reqmode = GR_FIND;
52789 +
52790 + if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
52791 + reqmode |= GR_EXEC;
52792 + if (fmode & S_IWOTH)
52793 + reqmode |= GR_WRITE;
52794 + if (fmode & S_IROTH)
52795 + reqmode |= GR_READ;
52796 +
52797 + mode =
52798 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
52799 + mnt);
52800 +
52801 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
52802 + gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
52803 + reqmode & GR_READ ? " reading" : "",
52804 + reqmode & GR_WRITE ? " writing" : "",
52805 + reqmode & GR_EXEC ? " executing" : "");
52806 + return reqmode;
52807 + } else
52808 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
52809 + {
52810 + gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
52811 + reqmode & GR_READ ? " reading" : "",
52812 + reqmode & GR_WRITE ? " writing" : "",
52813 + reqmode & GR_EXEC ? " executing" : "");
52814 + return 0;
52815 + } else if (unlikely((mode & reqmode) != reqmode))
52816 + return 0;
52817 +
52818 + return reqmode;
52819 +}
52820 +
52821 +static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
52822 +{
52823 + __u32 mode;
52824 +
52825 + mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
52826 +
52827 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
52828 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
52829 + return mode;
52830 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
52831 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
52832 + return 0;
52833 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
52834 + return 0;
52835 +
52836 + return (reqmode);
52837 +}
52838 +
52839 +__u32
52840 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
52841 +{
52842 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
52843 +}
52844 +
52845 +__u32
52846 +gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
52847 +{
52848 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
52849 +}
52850 +
52851 +__u32
52852 +gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
52853 +{
52854 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
52855 +}
52856 +
52857 +__u32
52858 +gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
52859 +{
52860 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
52861 +}
52862 +
52863 +__u32
52864 +gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
52865 + mode_t mode)
52866 +{
52867 + if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
52868 + return 1;
52869 +
52870 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
52871 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
52872 + GR_FCHMOD_ACL_MSG);
52873 + } else {
52874 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
52875 + }
52876 +}
52877 +
52878 +__u32
52879 +gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
52880 + mode_t mode)
52881 +{
52882 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
52883 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
52884 + GR_CHMOD_ACL_MSG);
52885 + } else {
52886 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
52887 + }
52888 +}
52889 +
52890 +__u32
52891 +gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
52892 +{
52893 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
52894 +}
52895 +
52896 +__u32
52897 +gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
52898 +{
52899 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
52900 +}
52901 +
52902 +__u32
52903 +gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
52904 +{
52905 + return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
52906 +}
52907 +
52908 +__u32
52909 +gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
52910 +{
52911 + return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
52912 + GR_UNIXCONNECT_ACL_MSG);
52913 +}
52914 +
52915 +/* hardlinks require at minimum create and link permission,
52916 + any additional privilege required is based on the
52917 + privilege of the file being linked to
52918 +*/
52919 +__u32
52920 +gr_acl_handle_link(const struct dentry * new_dentry,
52921 + const struct dentry * parent_dentry,
52922 + const struct vfsmount * parent_mnt,
52923 + const struct dentry * old_dentry,
52924 + const struct vfsmount * old_mnt, const char *to)
52925 +{
52926 + __u32 mode;
52927 + __u32 needmode = GR_CREATE | GR_LINK;
52928 + __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
52929 +
52930 + mode =
52931 + gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
52932 + old_mnt);
52933 +
52934 + if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
52935 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
52936 + return mode;
52937 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
52938 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
52939 + return 0;
52940 + } else if (unlikely((mode & needmode) != needmode))
52941 + return 0;
52942 +
52943 + return 1;
52944 +}
52945 +
52946 +__u32
52947 +gr_acl_handle_symlink(const struct dentry * new_dentry,
52948 + const struct dentry * parent_dentry,
52949 + const struct vfsmount * parent_mnt, const char *from)
52950 +{
52951 + __u32 needmode = GR_WRITE | GR_CREATE;
52952 + __u32 mode;
52953 +
52954 + mode =
52955 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
52956 + GR_CREATE | GR_AUDIT_CREATE |
52957 + GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
52958 +
52959 + if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
52960 + gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
52961 + return mode;
52962 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
52963 + gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
52964 + return 0;
52965 + } else if (unlikely((mode & needmode) != needmode))
52966 + return 0;
52967 +
52968 + return (GR_WRITE | GR_CREATE);
52969 +}
52970 +
52971 +static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
52972 +{
52973 + __u32 mode;
52974 +
52975 + mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
52976 +
52977 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
52978 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
52979 + return mode;
52980 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
52981 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
52982 + return 0;
52983 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
52984 + return 0;
52985 +
52986 + return (reqmode);
52987 +}
52988 +
52989 +__u32
52990 +gr_acl_handle_mknod(const struct dentry * new_dentry,
52991 + const struct dentry * parent_dentry,
52992 + const struct vfsmount * parent_mnt,
52993 + const int mode)
52994 +{
52995 + __u32 reqmode = GR_WRITE | GR_CREATE;
52996 + if (unlikely(mode & (S_ISUID | S_ISGID)))
52997 + reqmode |= GR_SETID;
52998 +
52999 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
53000 + reqmode, GR_MKNOD_ACL_MSG);
53001 +}
53002 +
53003 +__u32
53004 +gr_acl_handle_mkdir(const struct dentry *new_dentry,
53005 + const struct dentry *parent_dentry,
53006 + const struct vfsmount *parent_mnt)
53007 +{
53008 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
53009 + GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
53010 +}
53011 +
53012 +#define RENAME_CHECK_SUCCESS(old, new) \
53013 + (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
53014 + ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
53015 +
53016 +int
53017 +gr_acl_handle_rename(struct dentry *new_dentry,
53018 + struct dentry *parent_dentry,
53019 + const struct vfsmount *parent_mnt,
53020 + struct dentry *old_dentry,
53021 + struct inode *old_parent_inode,
53022 + struct vfsmount *old_mnt, const char *newname)
53023 +{
53024 + __u32 comp1, comp2;
53025 + int error = 0;
53026 +
53027 + if (unlikely(!gr_acl_is_enabled()))
53028 + return 0;
53029 +
53030 + if (!new_dentry->d_inode) {
53031 + comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
53032 + GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
53033 + GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
53034 + comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
53035 + GR_DELETE | GR_AUDIT_DELETE |
53036 + GR_AUDIT_READ | GR_AUDIT_WRITE |
53037 + GR_SUPPRESS, old_mnt);
53038 + } else {
53039 + comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
53040 + GR_CREATE | GR_DELETE |
53041 + GR_AUDIT_CREATE | GR_AUDIT_DELETE |
53042 + GR_AUDIT_READ | GR_AUDIT_WRITE |
53043 + GR_SUPPRESS, parent_mnt);
53044 + comp2 =
53045 + gr_search_file(old_dentry,
53046 + GR_READ | GR_WRITE | GR_AUDIT_READ |
53047 + GR_DELETE | GR_AUDIT_DELETE |
53048 + GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
53049 + }
53050 +
53051 + if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
53052 + ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
53053 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
53054 + else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
53055 + && !(comp2 & GR_SUPPRESS)) {
53056 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
53057 + error = -EACCES;
53058 + } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
53059 + error = -EACCES;
53060 +
53061 + return error;
53062 +}
53063 +
53064 +void
53065 +gr_acl_handle_exit(void)
53066 +{
53067 + u16 id;
53068 + char *rolename;
53069 + struct file *exec_file;
53070 +
53071 + if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
53072 + !(current->role->roletype & GR_ROLE_PERSIST))) {
53073 + id = current->acl_role_id;
53074 + rolename = current->role->rolename;
53075 + gr_set_acls(1);
53076 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
53077 + }
53078 +
53079 + write_lock(&grsec_exec_file_lock);
53080 + exec_file = current->exec_file;
53081 + current->exec_file = NULL;
53082 + write_unlock(&grsec_exec_file_lock);
53083 +
53084 + if (exec_file)
53085 + fput(exec_file);
53086 +}
53087 +
53088 +int
53089 +gr_acl_handle_procpidmem(const struct task_struct *task)
53090 +{
53091 + if (unlikely(!gr_acl_is_enabled()))
53092 + return 0;
53093 +
53094 + if (task != current && task->acl->mode & GR_PROTPROCFD)
53095 + return -EACCES;
53096 +
53097 + return 0;
53098 +}
53099 diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
53100 new file mode 100644
53101 index 0000000..17050ca
53102 --- /dev/null
53103 +++ b/grsecurity/gracl_ip.c
53104 @@ -0,0 +1,381 @@
53105 +#include <linux/kernel.h>
53106 +#include <asm/uaccess.h>
53107 +#include <asm/errno.h>
53108 +#include <net/sock.h>
53109 +#include <linux/file.h>
53110 +#include <linux/fs.h>
53111 +#include <linux/net.h>
53112 +#include <linux/in.h>
53113 +#include <linux/skbuff.h>
53114 +#include <linux/ip.h>
53115 +#include <linux/udp.h>
53116 +#include <linux/types.h>
53117 +#include <linux/sched.h>
53118 +#include <linux/netdevice.h>
53119 +#include <linux/inetdevice.h>
53120 +#include <linux/gracl.h>
53121 +#include <linux/grsecurity.h>
53122 +#include <linux/grinternal.h>
53123 +
53124 +#define GR_BIND 0x01
53125 +#define GR_CONNECT 0x02
53126 +#define GR_INVERT 0x04
53127 +#define GR_BINDOVERRIDE 0x08
53128 +#define GR_CONNECTOVERRIDE 0x10
53129 +#define GR_SOCK_FAMILY 0x20
53130 +
53131 +static const char * gr_protocols[IPPROTO_MAX] = {
53132 + "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
53133 + "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
53134 + "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
53135 + "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
53136 + "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
53137 + "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
53138 + "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
53139 + "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
53140 + "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
53141 + "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
53142 + "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
53143 + "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
53144 + "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
53145 + "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
53146 + "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
53147 + "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
53148 + "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
53149 + "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
53150 + "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
53151 + "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
53152 + "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
53153 + "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
53154 + "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
53155 + "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
53156 + "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
53157 + "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
53158 + "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
53159 + "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
53160 + "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
53161 + "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
53162 + "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
53163 + "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
53164 + };
53165 +
53166 +static const char * gr_socktypes[SOCK_MAX] = {
53167 + "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
53168 + "unknown:7", "unknown:8", "unknown:9", "packet"
53169 + };
53170 +
53171 +static const char * gr_sockfamilies[AF_MAX+1] = {
53172 + "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
53173 + "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
53174 + "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
53175 + "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
53176 + };
53177 +
53178 +const char *
53179 +gr_proto_to_name(unsigned char proto)
53180 +{
53181 + return gr_protocols[proto];
53182 +}
53183 +
53184 +const char *
53185 +gr_socktype_to_name(unsigned char type)
53186 +{
53187 + return gr_socktypes[type];
53188 +}
53189 +
53190 +const char *
53191 +gr_sockfamily_to_name(unsigned char family)
53192 +{
53193 + return gr_sockfamilies[family];
53194 +}
53195 +
53196 +int
53197 +gr_search_socket(const int domain, const int type, const int protocol)
53198 +{
53199 + struct acl_subject_label *curr;
53200 + const struct cred *cred = current_cred();
53201 +
53202 + if (unlikely(!gr_acl_is_enabled()))
53203 + goto exit;
53204 +
53205 + if ((domain < 0) || (type < 0) || (protocol < 0) ||
53206 + (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
53207 + goto exit; // let the kernel handle it
53208 +
53209 + curr = current->acl;
53210 +
53211 + if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
53212 + /* the family is allowed, if this is PF_INET allow it only if
53213 + the extra sock type/protocol checks pass */
53214 + if (domain == PF_INET)
53215 + goto inet_check;
53216 + goto exit;
53217 + } else {
53218 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
53219 + __u32 fakeip = 0;
53220 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
53221 + current->role->roletype, cred->uid,
53222 + cred->gid, current->exec_file ?
53223 + gr_to_filename(current->exec_file->f_path.dentry,
53224 + current->exec_file->f_path.mnt) :
53225 + curr->filename, curr->filename,
53226 + &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
53227 + &current->signal->saved_ip);
53228 + goto exit;
53229 + }
53230 + goto exit_fail;
53231 + }
53232 +
53233 +inet_check:
53234 + /* the rest of this checking is for IPv4 only */
53235 + if (!curr->ips)
53236 + goto exit;
53237 +
53238 + if ((curr->ip_type & (1 << type)) &&
53239 + (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
53240 + goto exit;
53241 +
53242 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
53243 + /* we don't place acls on raw sockets , and sometimes
53244 + dgram/ip sockets are opened for ioctl and not
53245 + bind/connect, so we'll fake a bind learn log */
53246 + if (type == SOCK_RAW || type == SOCK_PACKET) {
53247 + __u32 fakeip = 0;
53248 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
53249 + current->role->roletype, cred->uid,
53250 + cred->gid, current->exec_file ?
53251 + gr_to_filename(current->exec_file->f_path.dentry,
53252 + current->exec_file->f_path.mnt) :
53253 + curr->filename, curr->filename,
53254 + &fakeip, 0, type,
53255 + protocol, GR_CONNECT, &current->signal->saved_ip);
53256 + } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
53257 + __u32 fakeip = 0;
53258 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
53259 + current->role->roletype, cred->uid,
53260 + cred->gid, current->exec_file ?
53261 + gr_to_filename(current->exec_file->f_path.dentry,
53262 + current->exec_file->f_path.mnt) :
53263 + curr->filename, curr->filename,
53264 + &fakeip, 0, type,
53265 + protocol, GR_BIND, &current->signal->saved_ip);
53266 + }
53267 + /* we'll log when they use connect or bind */
53268 + goto exit;
53269 + }
53270 +
53271 +exit_fail:
53272 + if (domain == PF_INET)
53273 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
53274 + gr_socktype_to_name(type), gr_proto_to_name(protocol));
53275 + else
53276 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
53277 + gr_socktype_to_name(type), protocol);
53278 +
53279 + return 0;
53280 +exit:
53281 + return 1;
53282 +}
53283 +
53284 +int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
53285 +{
53286 + if ((ip->mode & mode) &&
53287 + (ip_port >= ip->low) &&
53288 + (ip_port <= ip->high) &&
53289 + ((ntohl(ip_addr) & our_netmask) ==
53290 + (ntohl(our_addr) & our_netmask))
53291 + && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
53292 + && (ip->type & (1 << type))) {
53293 + if (ip->mode & GR_INVERT)
53294 + return 2; // specifically denied
53295 + else
53296 + return 1; // allowed
53297 + }
53298 +
53299 + return 0; // not specifically allowed, may continue parsing
53300 +}
53301 +
53302 +static int
53303 +gr_search_connectbind(const int full_mode, struct sock *sk,
53304 + struct sockaddr_in *addr, const int type)
53305 +{
53306 + char iface[IFNAMSIZ] = {0};
53307 + struct acl_subject_label *curr;
53308 + struct acl_ip_label *ip;
53309 + struct inet_sock *isk;
53310 + struct net_device *dev;
53311 + struct in_device *idev;
53312 + unsigned long i;
53313 + int ret;
53314 + int mode = full_mode & (GR_BIND | GR_CONNECT);
53315 + __u32 ip_addr = 0;
53316 + __u32 our_addr;
53317 + __u32 our_netmask;
53318 + char *p;
53319 + __u16 ip_port = 0;
53320 + const struct cred *cred = current_cred();
53321 +
53322 + if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
53323 + return 0;
53324 +
53325 + curr = current->acl;
53326 + isk = inet_sk(sk);
53327 +
53328 + /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
53329 + if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
53330 + addr->sin_addr.s_addr = curr->inaddr_any_override;
53331 + if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
53332 + struct sockaddr_in saddr;
53333 + int err;
53334 +
53335 + saddr.sin_family = AF_INET;
53336 + saddr.sin_addr.s_addr = curr->inaddr_any_override;
53337 + saddr.sin_port = isk->inet_sport;
53338 +
53339 + err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
53340 + if (err)
53341 + return err;
53342 +
53343 + err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
53344 + if (err)
53345 + return err;
53346 + }
53347 +
53348 + if (!curr->ips)
53349 + return 0;
53350 +
53351 + ip_addr = addr->sin_addr.s_addr;
53352 + ip_port = ntohs(addr->sin_port);
53353 +
53354 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
53355 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
53356 + current->role->roletype, cred->uid,
53357 + cred->gid, current->exec_file ?
53358 + gr_to_filename(current->exec_file->f_path.dentry,
53359 + current->exec_file->f_path.mnt) :
53360 + curr->filename, curr->filename,
53361 + &ip_addr, ip_port, type,
53362 + sk->sk_protocol, mode, &current->signal->saved_ip);
53363 + return 0;
53364 + }
53365 +
53366 + for (i = 0; i < curr->ip_num; i++) {
53367 + ip = *(curr->ips + i);
53368 + if (ip->iface != NULL) {
53369 + strncpy(iface, ip->iface, IFNAMSIZ - 1);
53370 + p = strchr(iface, ':');
53371 + if (p != NULL)
53372 + *p = '\0';
53373 + dev = dev_get_by_name(sock_net(sk), iface);
53374 + if (dev == NULL)
53375 + continue;
53376 + idev = in_dev_get(dev);
53377 + if (idev == NULL) {
53378 + dev_put(dev);
53379 + continue;
53380 + }
53381 + rcu_read_lock();
53382 + for_ifa(idev) {
53383 + if (!strcmp(ip->iface, ifa->ifa_label)) {
53384 + our_addr = ifa->ifa_address;
53385 + our_netmask = 0xffffffff;
53386 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
53387 + if (ret == 1) {
53388 + rcu_read_unlock();
53389 + in_dev_put(idev);
53390 + dev_put(dev);
53391 + return 0;
53392 + } else if (ret == 2) {
53393 + rcu_read_unlock();
53394 + in_dev_put(idev);
53395 + dev_put(dev);
53396 + goto denied;
53397 + }
53398 + }
53399 + } endfor_ifa(idev);
53400 + rcu_read_unlock();
53401 + in_dev_put(idev);
53402 + dev_put(dev);
53403 + } else {
53404 + our_addr = ip->addr;
53405 + our_netmask = ip->netmask;
53406 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
53407 + if (ret == 1)
53408 + return 0;
53409 + else if (ret == 2)
53410 + goto denied;
53411 + }
53412 + }
53413 +
53414 +denied:
53415 + if (mode == GR_BIND)
53416 + gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
53417 + else if (mode == GR_CONNECT)
53418 + gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
53419 +
53420 + return -EACCES;
53421 +}
53422 +
53423 +int
53424 +gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
53425 +{
53426 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
53427 +}
53428 +
53429 +int
53430 +gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
53431 +{
53432 + return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
53433 +}
53434 +
53435 +int gr_search_listen(struct socket *sock)
53436 +{
53437 + struct sock *sk = sock->sk;
53438 + struct sockaddr_in addr;
53439 +
53440 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
53441 + addr.sin_port = inet_sk(sk)->inet_sport;
53442 +
53443 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
53444 +}
53445 +
53446 +int gr_search_accept(struct socket *sock)
53447 +{
53448 + struct sock *sk = sock->sk;
53449 + struct sockaddr_in addr;
53450 +
53451 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
53452 + addr.sin_port = inet_sk(sk)->inet_sport;
53453 +
53454 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
53455 +}
53456 +
53457 +int
53458 +gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
53459 +{
53460 + if (addr)
53461 + return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
53462 + else {
53463 + struct sockaddr_in sin;
53464 + const struct inet_sock *inet = inet_sk(sk);
53465 +
53466 + sin.sin_addr.s_addr = inet->inet_daddr;
53467 + sin.sin_port = inet->inet_dport;
53468 +
53469 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
53470 + }
53471 +}
53472 +
53473 +int
53474 +gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
53475 +{
53476 + struct sockaddr_in sin;
53477 +
53478 + if (unlikely(skb->len < sizeof (struct udphdr)))
53479 + return 0; // skip this packet
53480 +
53481 + sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
53482 + sin.sin_port = udp_hdr(skb)->source;
53483 +
53484 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
53485 +}
53486 diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
53487 new file mode 100644
53488 index 0000000..25f54ef
53489 --- /dev/null
53490 +++ b/grsecurity/gracl_learn.c
53491 @@ -0,0 +1,207 @@
53492 +#include <linux/kernel.h>
53493 +#include <linux/mm.h>
53494 +#include <linux/sched.h>
53495 +#include <linux/poll.h>
53496 +#include <linux/string.h>
53497 +#include <linux/file.h>
53498 +#include <linux/types.h>
53499 +#include <linux/vmalloc.h>
53500 +#include <linux/grinternal.h>
53501 +
53502 +extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
53503 + size_t count, loff_t *ppos);
53504 +extern int gr_acl_is_enabled(void);
53505 +
53506 +static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
53507 +static int gr_learn_attached;
53508 +
53509 +/* use a 512k buffer */
53510 +#define LEARN_BUFFER_SIZE (512 * 1024)
53511 +
53512 +static DEFINE_SPINLOCK(gr_learn_lock);
53513 +static DEFINE_MUTEX(gr_learn_user_mutex);
53514 +
53515 +/* we need to maintain two buffers, so that the kernel context of grlearn
53516 + uses a semaphore around the userspace copying, and the other kernel contexts
53517 + use a spinlock when copying into the buffer, since they cannot sleep
53518 +*/
53519 +static char *learn_buffer;
53520 +static char *learn_buffer_user;
53521 +static int learn_buffer_len;
53522 +static int learn_buffer_user_len;
53523 +
53524 +static ssize_t
53525 +read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
53526 +{
53527 + DECLARE_WAITQUEUE(wait, current);
53528 + ssize_t retval = 0;
53529 +
53530 + add_wait_queue(&learn_wait, &wait);
53531 + set_current_state(TASK_INTERRUPTIBLE);
53532 + do {
53533 + mutex_lock(&gr_learn_user_mutex);
53534 + spin_lock(&gr_learn_lock);
53535 + if (learn_buffer_len)
53536 + break;
53537 + spin_unlock(&gr_learn_lock);
53538 + mutex_unlock(&gr_learn_user_mutex);
53539 + if (file->f_flags & O_NONBLOCK) {
53540 + retval = -EAGAIN;
53541 + goto out;
53542 + }
53543 + if (signal_pending(current)) {
53544 + retval = -ERESTARTSYS;
53545 + goto out;
53546 + }
53547 +
53548 + schedule();
53549 + } while (1);
53550 +
53551 + memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
53552 + learn_buffer_user_len = learn_buffer_len;
53553 + retval = learn_buffer_len;
53554 + learn_buffer_len = 0;
53555 +
53556 + spin_unlock(&gr_learn_lock);
53557 +
53558 + if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
53559 + retval = -EFAULT;
53560 +
53561 + mutex_unlock(&gr_learn_user_mutex);
53562 +out:
53563 + set_current_state(TASK_RUNNING);
53564 + remove_wait_queue(&learn_wait, &wait);
53565 + return retval;
53566 +}
53567 +
53568 +static unsigned int
53569 +poll_learn(struct file * file, poll_table * wait)
53570 +{
53571 + poll_wait(file, &learn_wait, wait);
53572 +
53573 + if (learn_buffer_len)
53574 + return (POLLIN | POLLRDNORM);
53575 +
53576 + return 0;
53577 +}
53578 +
53579 +void
53580 +gr_clear_learn_entries(void)
53581 +{
53582 + char *tmp;
53583 +
53584 + mutex_lock(&gr_learn_user_mutex);
53585 + spin_lock(&gr_learn_lock);
53586 + tmp = learn_buffer;
53587 + learn_buffer = NULL;
53588 + spin_unlock(&gr_learn_lock);
53589 + if (tmp)
53590 + vfree(tmp);
53591 + if (learn_buffer_user != NULL) {
53592 + vfree(learn_buffer_user);
53593 + learn_buffer_user = NULL;
53594 + }
53595 + learn_buffer_len = 0;
53596 + mutex_unlock(&gr_learn_user_mutex);
53597 +
53598 + return;
53599 +}
53600 +
53601 +void
53602 +gr_add_learn_entry(const char *fmt, ...)
53603 +{
53604 + va_list args;
53605 + unsigned int len;
53606 +
53607 + if (!gr_learn_attached)
53608 + return;
53609 +
53610 + spin_lock(&gr_learn_lock);
53611 +
53612 + /* leave a gap at the end so we know when it's "full" but don't have to
53613 + compute the exact length of the string we're trying to append
53614 + */
53615 + if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
53616 + spin_unlock(&gr_learn_lock);
53617 + wake_up_interruptible(&learn_wait);
53618 + return;
53619 + }
53620 + if (learn_buffer == NULL) {
53621 + spin_unlock(&gr_learn_lock);
53622 + return;
53623 + }
53624 +
53625 + va_start(args, fmt);
53626 + len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
53627 + va_end(args);
53628 +
53629 + learn_buffer_len += len + 1;
53630 +
53631 + spin_unlock(&gr_learn_lock);
53632 + wake_up_interruptible(&learn_wait);
53633 +
53634 + return;
53635 +}
53636 +
53637 +static int
53638 +open_learn(struct inode *inode, struct file *file)
53639 +{
53640 + if (file->f_mode & FMODE_READ && gr_learn_attached)
53641 + return -EBUSY;
53642 + if (file->f_mode & FMODE_READ) {
53643 + int retval = 0;
53644 + mutex_lock(&gr_learn_user_mutex);
53645 + if (learn_buffer == NULL)
53646 + learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
53647 + if (learn_buffer_user == NULL)
53648 + learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
53649 + if (learn_buffer == NULL) {
53650 + retval = -ENOMEM;
53651 + goto out_error;
53652 + }
53653 + if (learn_buffer_user == NULL) {
53654 + retval = -ENOMEM;
53655 + goto out_error;
53656 + }
53657 + learn_buffer_len = 0;
53658 + learn_buffer_user_len = 0;
53659 + gr_learn_attached = 1;
53660 +out_error:
53661 + mutex_unlock(&gr_learn_user_mutex);
53662 + return retval;
53663 + }
53664 + return 0;
53665 +}
53666 +
53667 +static int
53668 +close_learn(struct inode *inode, struct file *file)
53669 +{
53670 + if (file->f_mode & FMODE_READ) {
53671 + char *tmp = NULL;
53672 + mutex_lock(&gr_learn_user_mutex);
53673 + spin_lock(&gr_learn_lock);
53674 + tmp = learn_buffer;
53675 + learn_buffer = NULL;
53676 + spin_unlock(&gr_learn_lock);
53677 + if (tmp)
53678 + vfree(tmp);
53679 + if (learn_buffer_user != NULL) {
53680 + vfree(learn_buffer_user);
53681 + learn_buffer_user = NULL;
53682 + }
53683 + learn_buffer_len = 0;
53684 + learn_buffer_user_len = 0;
53685 + gr_learn_attached = 0;
53686 + mutex_unlock(&gr_learn_user_mutex);
53687 + }
53688 +
53689 + return 0;
53690 +}
53691 +
53692 +const struct file_operations grsec_fops = {
53693 + .read = read_learn,
53694 + .write = write_grsec_handler,
53695 + .open = open_learn,
53696 + .release = close_learn,
53697 + .poll = poll_learn,
53698 +};
53699 diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
53700 new file mode 100644
53701 index 0000000..39645c9
53702 --- /dev/null
53703 +++ b/grsecurity/gracl_res.c
53704 @@ -0,0 +1,68 @@
53705 +#include <linux/kernel.h>
53706 +#include <linux/sched.h>
53707 +#include <linux/gracl.h>
53708 +#include <linux/grinternal.h>
53709 +
53710 +static const char *restab_log[] = {
53711 + [RLIMIT_CPU] = "RLIMIT_CPU",
53712 + [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
53713 + [RLIMIT_DATA] = "RLIMIT_DATA",
53714 + [RLIMIT_STACK] = "RLIMIT_STACK",
53715 + [RLIMIT_CORE] = "RLIMIT_CORE",
53716 + [RLIMIT_RSS] = "RLIMIT_RSS",
53717 + [RLIMIT_NPROC] = "RLIMIT_NPROC",
53718 + [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
53719 + [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
53720 + [RLIMIT_AS] = "RLIMIT_AS",
53721 + [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
53722 + [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
53723 + [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
53724 + [RLIMIT_NICE] = "RLIMIT_NICE",
53725 + [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
53726 + [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
53727 + [GR_CRASH_RES] = "RLIMIT_CRASH"
53728 +};
53729 +
53730 +void
53731 +gr_log_resource(const struct task_struct *task,
53732 + const int res, const unsigned long wanted, const int gt)
53733 +{
53734 + const struct cred *cred;
53735 + unsigned long rlim;
53736 +
53737 + if (!gr_acl_is_enabled() && !grsec_resource_logging)
53738 + return;
53739 +
53740 + // not yet supported resource
53741 + if (unlikely(!restab_log[res]))
53742 + return;
53743 +
53744 + if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
53745 + rlim = task_rlimit_max(task, res);
53746 + else
53747 + rlim = task_rlimit(task, res);
53748 +
53749 + if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
53750 + return;
53751 +
53752 + rcu_read_lock();
53753 + cred = __task_cred(task);
53754 +
53755 + if (res == RLIMIT_NPROC &&
53756 + (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
53757 + cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
53758 + goto out_rcu_unlock;
53759 + else if (res == RLIMIT_MEMLOCK &&
53760 + cap_raised(cred->cap_effective, CAP_IPC_LOCK))
53761 + goto out_rcu_unlock;
53762 + else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
53763 + goto out_rcu_unlock;
53764 + rcu_read_unlock();
53765 +
53766 + gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
53767 +
53768 + return;
53769 +out_rcu_unlock:
53770 + rcu_read_unlock();
53771 + return;
53772 +}
53773 diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
53774 new file mode 100644
53775 index 0000000..5556be3
53776 --- /dev/null
53777 +++ b/grsecurity/gracl_segv.c
53778 @@ -0,0 +1,299 @@
53779 +#include <linux/kernel.h>
53780 +#include <linux/mm.h>
53781 +#include <asm/uaccess.h>
53782 +#include <asm/errno.h>
53783 +#include <asm/mman.h>
53784 +#include <net/sock.h>
53785 +#include <linux/file.h>
53786 +#include <linux/fs.h>
53787 +#include <linux/net.h>
53788 +#include <linux/in.h>
53789 +#include <linux/slab.h>
53790 +#include <linux/types.h>
53791 +#include <linux/sched.h>
53792 +#include <linux/timer.h>
53793 +#include <linux/gracl.h>
53794 +#include <linux/grsecurity.h>
53795 +#include <linux/grinternal.h>
53796 +
53797 +static struct crash_uid *uid_set;
53798 +static unsigned short uid_used;
53799 +static DEFINE_SPINLOCK(gr_uid_lock);
53800 +extern rwlock_t gr_inode_lock;
53801 +extern struct acl_subject_label *
53802 + lookup_acl_subj_label(const ino_t inode, const dev_t dev,
53803 + struct acl_role_label *role);
53804 +
53805 +#ifdef CONFIG_BTRFS_FS
53806 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
53807 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
53808 +#endif
53809 +
53810 +static inline dev_t __get_dev(const struct dentry *dentry)
53811 +{
53812 +#ifdef CONFIG_BTRFS_FS
53813 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
53814 + return get_btrfs_dev_from_inode(dentry->d_inode);
53815 + else
53816 +#endif
53817 + return dentry->d_inode->i_sb->s_dev;
53818 +}
53819 +
53820 +int
53821 +gr_init_uidset(void)
53822 +{
53823 + uid_set =
53824 + kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
53825 + uid_used = 0;
53826 +
53827 + return uid_set ? 1 : 0;
53828 +}
53829 +
53830 +void
53831 +gr_free_uidset(void)
53832 +{
53833 + if (uid_set)
53834 + kfree(uid_set);
53835 +
53836 + return;
53837 +}
53838 +
53839 +int
53840 +gr_find_uid(const uid_t uid)
53841 +{
53842 + struct crash_uid *tmp = uid_set;
53843 + uid_t buid;
53844 + int low = 0, high = uid_used - 1, mid;
53845 +
53846 + while (high >= low) {
53847 + mid = (low + high) >> 1;
53848 + buid = tmp[mid].uid;
53849 + if (buid == uid)
53850 + return mid;
53851 + if (buid > uid)
53852 + high = mid - 1;
53853 + if (buid < uid)
53854 + low = mid + 1;
53855 + }
53856 +
53857 + return -1;
53858 +}
53859 +
53860 +static __inline__ void
53861 +gr_insertsort(void)
53862 +{
53863 + unsigned short i, j;
53864 + struct crash_uid index;
53865 +
53866 + for (i = 1; i < uid_used; i++) {
53867 + index = uid_set[i];
53868 + j = i;
53869 + while ((j > 0) && uid_set[j - 1].uid > index.uid) {
53870 + uid_set[j] = uid_set[j - 1];
53871 + j--;
53872 + }
53873 + uid_set[j] = index;
53874 + }
53875 +
53876 + return;
53877 +}
53878 +
53879 +static __inline__ void
53880 +gr_insert_uid(const uid_t uid, const unsigned long expires)
53881 +{
53882 + int loc;
53883 +
53884 + if (uid_used == GR_UIDTABLE_MAX)
53885 + return;
53886 +
53887 + loc = gr_find_uid(uid);
53888 +
53889 + if (loc >= 0) {
53890 + uid_set[loc].expires = expires;
53891 + return;
53892 + }
53893 +
53894 + uid_set[uid_used].uid = uid;
53895 + uid_set[uid_used].expires = expires;
53896 + uid_used++;
53897 +
53898 + gr_insertsort();
53899 +
53900 + return;
53901 +}
53902 +
53903 +void
53904 +gr_remove_uid(const unsigned short loc)
53905 +{
53906 + unsigned short i;
53907 +
53908 + for (i = loc + 1; i < uid_used; i++)
53909 + uid_set[i - 1] = uid_set[i];
53910 +
53911 + uid_used--;
53912 +
53913 + return;
53914 +}
53915 +
53916 +int
53917 +gr_check_crash_uid(const uid_t uid)
53918 +{
53919 + int loc;
53920 + int ret = 0;
53921 +
53922 + if (unlikely(!gr_acl_is_enabled()))
53923 + return 0;
53924 +
53925 + spin_lock(&gr_uid_lock);
53926 + loc = gr_find_uid(uid);
53927 +
53928 + if (loc < 0)
53929 + goto out_unlock;
53930 +
53931 + if (time_before_eq(uid_set[loc].expires, get_seconds()))
53932 + gr_remove_uid(loc);
53933 + else
53934 + ret = 1;
53935 +
53936 +out_unlock:
53937 + spin_unlock(&gr_uid_lock);
53938 + return ret;
53939 +}
53940 +
53941 +static __inline__ int
53942 +proc_is_setxid(const struct cred *cred)
53943 +{
53944 + if (cred->uid != cred->euid || cred->uid != cred->suid ||
53945 + cred->uid != cred->fsuid)
53946 + return 1;
53947 + if (cred->gid != cred->egid || cred->gid != cred->sgid ||
53948 + cred->gid != cred->fsgid)
53949 + return 1;
53950 +
53951 + return 0;
53952 +}
53953 +
53954 +extern int gr_fake_force_sig(int sig, struct task_struct *t);
53955 +
53956 +void
53957 +gr_handle_crash(struct task_struct *task, const int sig)
53958 +{
53959 + struct acl_subject_label *curr;
53960 + struct task_struct *tsk, *tsk2;
53961 + const struct cred *cred;
53962 + const struct cred *cred2;
53963 +
53964 + if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
53965 + return;
53966 +
53967 + if (unlikely(!gr_acl_is_enabled()))
53968 + return;
53969 +
53970 + curr = task->acl;
53971 +
53972 + if (!(curr->resmask & (1 << GR_CRASH_RES)))
53973 + return;
53974 +
53975 + if (time_before_eq(curr->expires, get_seconds())) {
53976 + curr->expires = 0;
53977 + curr->crashes = 0;
53978 + }
53979 +
53980 + curr->crashes++;
53981 +
53982 + if (!curr->expires)
53983 + curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
53984 +
53985 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
53986 + time_after(curr->expires, get_seconds())) {
53987 + rcu_read_lock();
53988 + cred = __task_cred(task);
53989 + if (cred->uid && proc_is_setxid(cred)) {
53990 + gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
53991 + spin_lock(&gr_uid_lock);
53992 + gr_insert_uid(cred->uid, curr->expires);
53993 + spin_unlock(&gr_uid_lock);
53994 + curr->expires = 0;
53995 + curr->crashes = 0;
53996 + read_lock(&tasklist_lock);
53997 + do_each_thread(tsk2, tsk) {
53998 + cred2 = __task_cred(tsk);
53999 + if (tsk != task && cred2->uid == cred->uid)
54000 + gr_fake_force_sig(SIGKILL, tsk);
54001 + } while_each_thread(tsk2, tsk);
54002 + read_unlock(&tasklist_lock);
54003 + } else {
54004 + gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
54005 + read_lock(&tasklist_lock);
54006 + read_lock(&grsec_exec_file_lock);
54007 + do_each_thread(tsk2, tsk) {
54008 + if (likely(tsk != task)) {
54009 + // if this thread has the same subject as the one that triggered
54010 + // RES_CRASH and it's the same binary, kill it
54011 + if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
54012 + gr_fake_force_sig(SIGKILL, tsk);
54013 + }
54014 + } while_each_thread(tsk2, tsk);
54015 + read_unlock(&grsec_exec_file_lock);
54016 + read_unlock(&tasklist_lock);
54017 + }
54018 + rcu_read_unlock();
54019 + }
54020 +
54021 + return;
54022 +}
54023 +
54024 +int
54025 +gr_check_crash_exec(const struct file *filp)
54026 +{
54027 + struct acl_subject_label *curr;
54028 +
54029 + if (unlikely(!gr_acl_is_enabled()))
54030 + return 0;
54031 +
54032 + read_lock(&gr_inode_lock);
54033 + curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
54034 + __get_dev(filp->f_path.dentry),
54035 + current->role);
54036 + read_unlock(&gr_inode_lock);
54037 +
54038 + if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
54039 + (!curr->crashes && !curr->expires))
54040 + return 0;
54041 +
54042 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
54043 + time_after(curr->expires, get_seconds()))
54044 + return 1;
54045 + else if (time_before_eq(curr->expires, get_seconds())) {
54046 + curr->crashes = 0;
54047 + curr->expires = 0;
54048 + }
54049 +
54050 + return 0;
54051 +}
54052 +
54053 +void
54054 +gr_handle_alertkill(struct task_struct *task)
54055 +{
54056 + struct acl_subject_label *curracl;
54057 + __u32 curr_ip;
54058 + struct task_struct *p, *p2;
54059 +
54060 + if (unlikely(!gr_acl_is_enabled()))
54061 + return;
54062 +
54063 + curracl = task->acl;
54064 + curr_ip = task->signal->curr_ip;
54065 +
54066 + if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
54067 + read_lock(&tasklist_lock);
54068 + do_each_thread(p2, p) {
54069 + if (p->signal->curr_ip == curr_ip)
54070 + gr_fake_force_sig(SIGKILL, p);
54071 + } while_each_thread(p2, p);
54072 + read_unlock(&tasklist_lock);
54073 + } else if (curracl->mode & GR_KILLPROC)
54074 + gr_fake_force_sig(SIGKILL, task);
54075 +
54076 + return;
54077 +}
54078 diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
54079 new file mode 100644
54080 index 0000000..9d83a69
54081 --- /dev/null
54082 +++ b/grsecurity/gracl_shm.c
54083 @@ -0,0 +1,40 @@
54084 +#include <linux/kernel.h>
54085 +#include <linux/mm.h>
54086 +#include <linux/sched.h>
54087 +#include <linux/file.h>
54088 +#include <linux/ipc.h>
54089 +#include <linux/gracl.h>
54090 +#include <linux/grsecurity.h>
54091 +#include <linux/grinternal.h>
54092 +
54093 +int
54094 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
54095 + const time_t shm_createtime, const uid_t cuid, const int shmid)
54096 +{
54097 + struct task_struct *task;
54098 +
54099 + if (!gr_acl_is_enabled())
54100 + return 1;
54101 +
54102 + rcu_read_lock();
54103 + read_lock(&tasklist_lock);
54104 +
54105 + task = find_task_by_vpid(shm_cprid);
54106 +
54107 + if (unlikely(!task))
54108 + task = find_task_by_vpid(shm_lapid);
54109 +
54110 + if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
54111 + (task->pid == shm_lapid)) &&
54112 + (task->acl->mode & GR_PROTSHM) &&
54113 + (task->acl != current->acl))) {
54114 + read_unlock(&tasklist_lock);
54115 + rcu_read_unlock();
54116 + gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
54117 + return 0;
54118 + }
54119 + read_unlock(&tasklist_lock);
54120 + rcu_read_unlock();
54121 +
54122 + return 1;
54123 +}
54124 diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
54125 new file mode 100644
54126 index 0000000..bc0be01
54127 --- /dev/null
54128 +++ b/grsecurity/grsec_chdir.c
54129 @@ -0,0 +1,19 @@
54130 +#include <linux/kernel.h>
54131 +#include <linux/sched.h>
54132 +#include <linux/fs.h>
54133 +#include <linux/file.h>
54134 +#include <linux/grsecurity.h>
54135 +#include <linux/grinternal.h>
54136 +
54137 +void
54138 +gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
54139 +{
54140 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
54141 + if ((grsec_enable_chdir && grsec_enable_group &&
54142 + in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
54143 + !grsec_enable_group)) {
54144 + gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
54145 + }
54146 +#endif
54147 + return;
54148 +}
54149 diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
54150 new file mode 100644
54151 index 0000000..a2dc675
54152 --- /dev/null
54153 +++ b/grsecurity/grsec_chroot.c
54154 @@ -0,0 +1,351 @@
54155 +#include <linux/kernel.h>
54156 +#include <linux/module.h>
54157 +#include <linux/sched.h>
54158 +#include <linux/file.h>
54159 +#include <linux/fs.h>
54160 +#include <linux/mount.h>
54161 +#include <linux/types.h>
54162 +#include <linux/pid_namespace.h>
54163 +#include <linux/grsecurity.h>
54164 +#include <linux/grinternal.h>
54165 +
54166 +void gr_set_chroot_entries(struct task_struct *task, struct path *path)
54167 +{
54168 +#ifdef CONFIG_GRKERNSEC
54169 + if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
54170 + path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
54171 + task->gr_is_chrooted = 1;
54172 + else
54173 + task->gr_is_chrooted = 0;
54174 +
54175 + task->gr_chroot_dentry = path->dentry;
54176 +#endif
54177 + return;
54178 +}
54179 +
54180 +void gr_clear_chroot_entries(struct task_struct *task)
54181 +{
54182 +#ifdef CONFIG_GRKERNSEC
54183 + task->gr_is_chrooted = 0;
54184 + task->gr_chroot_dentry = NULL;
54185 +#endif
54186 + return;
54187 +}
54188 +
54189 +int
54190 +gr_handle_chroot_unix(const pid_t pid)
54191 +{
54192 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
54193 + struct task_struct *p;
54194 +
54195 + if (unlikely(!grsec_enable_chroot_unix))
54196 + return 1;
54197 +
54198 + if (likely(!proc_is_chrooted(current)))
54199 + return 1;
54200 +
54201 + rcu_read_lock();
54202 + read_lock(&tasklist_lock);
54203 + p = find_task_by_vpid_unrestricted(pid);
54204 + if (unlikely(p && !have_same_root(current, p))) {
54205 + read_unlock(&tasklist_lock);
54206 + rcu_read_unlock();
54207 + gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
54208 + return 0;
54209 + }
54210 + read_unlock(&tasklist_lock);
54211 + rcu_read_unlock();
54212 +#endif
54213 + return 1;
54214 +}
54215 +
54216 +int
54217 +gr_handle_chroot_nice(void)
54218 +{
54219 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
54220 + if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
54221 + gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
54222 + return -EPERM;
54223 + }
54224 +#endif
54225 + return 0;
54226 +}
54227 +
54228 +int
54229 +gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
54230 +{
54231 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
54232 + if (grsec_enable_chroot_nice && (niceval < task_nice(p))
54233 + && proc_is_chrooted(current)) {
54234 + gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
54235 + return -EACCES;
54236 + }
54237 +#endif
54238 + return 0;
54239 +}
54240 +
54241 +int
54242 +gr_handle_chroot_rawio(const struct inode *inode)
54243 +{
54244 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
54245 + if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
54246 + inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
54247 + return 1;
54248 +#endif
54249 + return 0;
54250 +}
54251 +
54252 +int
54253 +gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
54254 +{
54255 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
54256 + struct task_struct *p;
54257 + int ret = 0;
54258 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
54259 + return ret;
54260 +
54261 + read_lock(&tasklist_lock);
54262 + do_each_pid_task(pid, type, p) {
54263 + if (!have_same_root(current, p)) {
54264 + ret = 1;
54265 + goto out;
54266 + }
54267 + } while_each_pid_task(pid, type, p);
54268 +out:
54269 + read_unlock(&tasklist_lock);
54270 + return ret;
54271 +#endif
54272 + return 0;
54273 +}
54274 +
54275 +int
54276 +gr_pid_is_chrooted(struct task_struct *p)
54277 +{
54278 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
54279 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
54280 + return 0;
54281 +
54282 + if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
54283 + !have_same_root(current, p)) {
54284 + return 1;
54285 + }
54286 +#endif
54287 + return 0;
54288 +}
54289 +
54290 +EXPORT_SYMBOL(gr_pid_is_chrooted);
54291 +
54292 +#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
54293 +int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
54294 +{
54295 + struct path path, currentroot;
54296 + int ret = 0;
54297 +
54298 + path.dentry = (struct dentry *)u_dentry;
54299 + path.mnt = (struct vfsmount *)u_mnt;
54300 + get_fs_root(current->fs, &currentroot);
54301 + if (path_is_under(&path, &currentroot))
54302 + ret = 1;
54303 + path_put(&currentroot);
54304 +
54305 + return ret;
54306 +}
54307 +#endif
54308 +
54309 +int
54310 +gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
54311 +{
54312 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
54313 + if (!grsec_enable_chroot_fchdir)
54314 + return 1;
54315 +
54316 + if (!proc_is_chrooted(current))
54317 + return 1;
54318 + else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
54319 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
54320 + return 0;
54321 + }
54322 +#endif
54323 + return 1;
54324 +}
54325 +
54326 +int
54327 +gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
54328 + const time_t shm_createtime)
54329 +{
54330 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
54331 + struct task_struct *p;
54332 + time_t starttime;
54333 +
54334 + if (unlikely(!grsec_enable_chroot_shmat))
54335 + return 1;
54336 +
54337 + if (likely(!proc_is_chrooted(current)))
54338 + return 1;
54339 +
54340 + rcu_read_lock();
54341 + read_lock(&tasklist_lock);
54342 +
54343 + if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
54344 + starttime = p->start_time.tv_sec;
54345 + if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
54346 + if (have_same_root(current, p)) {
54347 + goto allow;
54348 + } else {
54349 + read_unlock(&tasklist_lock);
54350 + rcu_read_unlock();
54351 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
54352 + return 0;
54353 + }
54354 + }
54355 + /* creator exited, pid reuse, fall through to next check */
54356 + }
54357 + if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
54358 + if (unlikely(!have_same_root(current, p))) {
54359 + read_unlock(&tasklist_lock);
54360 + rcu_read_unlock();
54361 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
54362 + return 0;
54363 + }
54364 + }
54365 +
54366 +allow:
54367 + read_unlock(&tasklist_lock);
54368 + rcu_read_unlock();
54369 +#endif
54370 + return 1;
54371 +}
54372 +
54373 +void
54374 +gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
54375 +{
54376 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
54377 + if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
54378 + gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
54379 +#endif
54380 + return;
54381 +}
54382 +
54383 +int
54384 +gr_handle_chroot_mknod(const struct dentry *dentry,
54385 + const struct vfsmount *mnt, const int mode)
54386 +{
54387 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
54388 + if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
54389 + proc_is_chrooted(current)) {
54390 + gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
54391 + return -EPERM;
54392 + }
54393 +#endif
54394 + return 0;
54395 +}
54396 +
54397 +int
54398 +gr_handle_chroot_mount(const struct dentry *dentry,
54399 + const struct vfsmount *mnt, const char *dev_name)
54400 +{
54401 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
54402 + if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
54403 + gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
54404 + return -EPERM;
54405 + }
54406 +#endif
54407 + return 0;
54408 +}
54409 +
54410 +int
54411 +gr_handle_chroot_pivot(void)
54412 +{
54413 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
54414 + if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
54415 + gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
54416 + return -EPERM;
54417 + }
54418 +#endif
54419 + return 0;
54420 +}
54421 +
54422 +int
54423 +gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
54424 +{
54425 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
54426 + if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
54427 + !gr_is_outside_chroot(dentry, mnt)) {
54428 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
54429 + return -EPERM;
54430 + }
54431 +#endif
54432 + return 0;
54433 +}
54434 +
54435 +extern const char *captab_log[];
54436 +extern int captab_log_entries;
54437 +
54438 +int
54439 +gr_chroot_is_capable(const int cap)
54440 +{
54441 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
54442 + if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
54443 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
54444 + if (cap_raised(chroot_caps, cap)) {
54445 + const struct cred *creds = current_cred();
54446 + if (cap_raised(creds->cap_effective, cap) && cap < captab_log_entries) {
54447 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, current, captab_log[cap]);
54448 + }
54449 + return 0;
54450 + }
54451 + }
54452 +#endif
54453 + return 1;
54454 +}
54455 +
54456 +int
54457 +gr_chroot_is_capable_nolog(const int cap)
54458 +{
54459 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
54460 + if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
54461 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
54462 + if (cap_raised(chroot_caps, cap)) {
54463 + return 0;
54464 + }
54465 + }
54466 +#endif
54467 + return 1;
54468 +}
54469 +
54470 +int
54471 +gr_handle_chroot_sysctl(const int op)
54472 +{
54473 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
54474 + if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
54475 + proc_is_chrooted(current))
54476 + return -EACCES;
54477 +#endif
54478 + return 0;
54479 +}
54480 +
54481 +void
54482 +gr_handle_chroot_chdir(struct path *path)
54483 +{
54484 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
54485 + if (grsec_enable_chroot_chdir)
54486 + set_fs_pwd(current->fs, path);
54487 +#endif
54488 + return;
54489 +}
54490 +
54491 +int
54492 +gr_handle_chroot_chmod(const struct dentry *dentry,
54493 + const struct vfsmount *mnt, const int mode)
54494 +{
54495 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
54496 + /* allow chmod +s on directories, but not files */
54497 + if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
54498 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
54499 + proc_is_chrooted(current)) {
54500 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
54501 + return -EPERM;
54502 + }
54503 +#endif
54504 + return 0;
54505 +}
54506 diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
54507 new file mode 100644
54508 index 0000000..d81a586
54509 --- /dev/null
54510 +++ b/grsecurity/grsec_disabled.c
54511 @@ -0,0 +1,439 @@
54512 +#include <linux/kernel.h>
54513 +#include <linux/module.h>
54514 +#include <linux/sched.h>
54515 +#include <linux/file.h>
54516 +#include <linux/fs.h>
54517 +#include <linux/kdev_t.h>
54518 +#include <linux/net.h>
54519 +#include <linux/in.h>
54520 +#include <linux/ip.h>
54521 +#include <linux/skbuff.h>
54522 +#include <linux/sysctl.h>
54523 +
54524 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
54525 +void
54526 +pax_set_initial_flags(struct linux_binprm *bprm)
54527 +{
54528 + return;
54529 +}
54530 +#endif
54531 +
54532 +#ifdef CONFIG_SYSCTL
54533 +__u32
54534 +gr_handle_sysctl(const struct ctl_table * table, const int op)
54535 +{
54536 + return 0;
54537 +}
54538 +#endif
54539 +
54540 +#ifdef CONFIG_TASKSTATS
54541 +int gr_is_taskstats_denied(int pid)
54542 +{
54543 + return 0;
54544 +}
54545 +#endif
54546 +
54547 +int
54548 +gr_acl_is_enabled(void)
54549 +{
54550 + return 0;
54551 +}
54552 +
54553 +void
54554 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
54555 +{
54556 + return;
54557 +}
54558 +
54559 +int
54560 +gr_handle_rawio(const struct inode *inode)
54561 +{
54562 + return 0;
54563 +}
54564 +
54565 +void
54566 +gr_acl_handle_psacct(struct task_struct *task, const long code)
54567 +{
54568 + return;
54569 +}
54570 +
54571 +int
54572 +gr_handle_ptrace(struct task_struct *task, const long request)
54573 +{
54574 + return 0;
54575 +}
54576 +
54577 +int
54578 +gr_handle_proc_ptrace(struct task_struct *task)
54579 +{
54580 + return 0;
54581 +}
54582 +
54583 +void
54584 +gr_learn_resource(const struct task_struct *task,
54585 + const int res, const unsigned long wanted, const int gt)
54586 +{
54587 + return;
54588 +}
54589 +
54590 +int
54591 +gr_set_acls(const int type)
54592 +{
54593 + return 0;
54594 +}
54595 +
54596 +int
54597 +gr_check_hidden_task(const struct task_struct *tsk)
54598 +{
54599 + return 0;
54600 +}
54601 +
54602 +int
54603 +gr_check_protected_task(const struct task_struct *task)
54604 +{
54605 + return 0;
54606 +}
54607 +
54608 +int
54609 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
54610 +{
54611 + return 0;
54612 +}
54613 +
54614 +void
54615 +gr_copy_label(struct task_struct *tsk)
54616 +{
54617 + return;
54618 +}
54619 +
54620 +void
54621 +gr_set_pax_flags(struct task_struct *task)
54622 +{
54623 + return;
54624 +}
54625 +
54626 +int
54627 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
54628 + const int unsafe_share)
54629 +{
54630 + return 0;
54631 +}
54632 +
54633 +void
54634 +gr_handle_delete(const ino_t ino, const dev_t dev)
54635 +{
54636 + return;
54637 +}
54638 +
54639 +void
54640 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
54641 +{
54642 + return;
54643 +}
54644 +
54645 +void
54646 +gr_handle_crash(struct task_struct *task, const int sig)
54647 +{
54648 + return;
54649 +}
54650 +
54651 +int
54652 +gr_check_crash_exec(const struct file *filp)
54653 +{
54654 + return 0;
54655 +}
54656 +
54657 +int
54658 +gr_check_crash_uid(const uid_t uid)
54659 +{
54660 + return 0;
54661 +}
54662 +
54663 +void
54664 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
54665 + struct dentry *old_dentry,
54666 + struct dentry *new_dentry,
54667 + struct vfsmount *mnt, const __u8 replace)
54668 +{
54669 + return;
54670 +}
54671 +
54672 +int
54673 +gr_search_socket(const int family, const int type, const int protocol)
54674 +{
54675 + return 1;
54676 +}
54677 +
54678 +int
54679 +gr_search_connectbind(const int mode, const struct socket *sock,
54680 + const struct sockaddr_in *addr)
54681 +{
54682 + return 0;
54683 +}
54684 +
54685 +void
54686 +gr_handle_alertkill(struct task_struct *task)
54687 +{
54688 + return;
54689 +}
54690 +
54691 +__u32
54692 +gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
54693 +{
54694 + return 1;
54695 +}
54696 +
54697 +__u32
54698 +gr_acl_handle_hidden_file(const struct dentry * dentry,
54699 + const struct vfsmount * mnt)
54700 +{
54701 + return 1;
54702 +}
54703 +
54704 +__u32
54705 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
54706 + int acc_mode)
54707 +{
54708 + return 1;
54709 +}
54710 +
54711 +__u32
54712 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
54713 +{
54714 + return 1;
54715 +}
54716 +
54717 +__u32
54718 +gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
54719 +{
54720 + return 1;
54721 +}
54722 +
54723 +int
54724 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
54725 + unsigned int *vm_flags)
54726 +{
54727 + return 1;
54728 +}
54729 +
54730 +__u32
54731 +gr_acl_handle_truncate(const struct dentry * dentry,
54732 + const struct vfsmount * mnt)
54733 +{
54734 + return 1;
54735 +}
54736 +
54737 +__u32
54738 +gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
54739 +{
54740 + return 1;
54741 +}
54742 +
54743 +__u32
54744 +gr_acl_handle_access(const struct dentry * dentry,
54745 + const struct vfsmount * mnt, const int fmode)
54746 +{
54747 + return 1;
54748 +}
54749 +
54750 +__u32
54751 +gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
54752 + mode_t mode)
54753 +{
54754 + return 1;
54755 +}
54756 +
54757 +__u32
54758 +gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
54759 + mode_t mode)
54760 +{
54761 + return 1;
54762 +}
54763 +
54764 +__u32
54765 +gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
54766 +{
54767 + return 1;
54768 +}
54769 +
54770 +__u32
54771 +gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
54772 +{
54773 + return 1;
54774 +}
54775 +
54776 +void
54777 +grsecurity_init(void)
54778 +{
54779 + return;
54780 +}
54781 +
54782 +__u32
54783 +gr_acl_handle_mknod(const struct dentry * new_dentry,
54784 + const struct dentry * parent_dentry,
54785 + const struct vfsmount * parent_mnt,
54786 + const int mode)
54787 +{
54788 + return 1;
54789 +}
54790 +
54791 +__u32
54792 +gr_acl_handle_mkdir(const struct dentry * new_dentry,
54793 + const struct dentry * parent_dentry,
54794 + const struct vfsmount * parent_mnt)
54795 +{
54796 + return 1;
54797 +}
54798 +
54799 +__u32
54800 +gr_acl_handle_symlink(const struct dentry * new_dentry,
54801 + const struct dentry * parent_dentry,
54802 + const struct vfsmount * parent_mnt, const char *from)
54803 +{
54804 + return 1;
54805 +}
54806 +
54807 +__u32
54808 +gr_acl_handle_link(const struct dentry * new_dentry,
54809 + const struct dentry * parent_dentry,
54810 + const struct vfsmount * parent_mnt,
54811 + const struct dentry * old_dentry,
54812 + const struct vfsmount * old_mnt, const char *to)
54813 +{
54814 + return 1;
54815 +}
54816 +
54817 +int
54818 +gr_acl_handle_rename(const struct dentry *new_dentry,
54819 + const struct dentry *parent_dentry,
54820 + const struct vfsmount *parent_mnt,
54821 + const struct dentry *old_dentry,
54822 + const struct inode *old_parent_inode,
54823 + const struct vfsmount *old_mnt, const char *newname)
54824 +{
54825 + return 0;
54826 +}
54827 +
54828 +int
54829 +gr_acl_handle_filldir(const struct file *file, const char *name,
54830 + const int namelen, const ino_t ino)
54831 +{
54832 + return 1;
54833 +}
54834 +
54835 +int
54836 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
54837 + const time_t shm_createtime, const uid_t cuid, const int shmid)
54838 +{
54839 + return 1;
54840 +}
54841 +
54842 +int
54843 +gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
54844 +{
54845 + return 0;
54846 +}
54847 +
54848 +int
54849 +gr_search_accept(const struct socket *sock)
54850 +{
54851 + return 0;
54852 +}
54853 +
54854 +int
54855 +gr_search_listen(const struct socket *sock)
54856 +{
54857 + return 0;
54858 +}
54859 +
54860 +int
54861 +gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
54862 +{
54863 + return 0;
54864 +}
54865 +
54866 +__u32
54867 +gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
54868 +{
54869 + return 1;
54870 +}
54871 +
54872 +__u32
54873 +gr_acl_handle_creat(const struct dentry * dentry,
54874 + const struct dentry * p_dentry,
54875 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
54876 + const int imode)
54877 +{
54878 + return 1;
54879 +}
54880 +
54881 +void
54882 +gr_acl_handle_exit(void)
54883 +{
54884 + return;
54885 +}
54886 +
54887 +int
54888 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
54889 +{
54890 + return 1;
54891 +}
54892 +
54893 +void
54894 +gr_set_role_label(const uid_t uid, const gid_t gid)
54895 +{
54896 + return;
54897 +}
54898 +
54899 +int
54900 +gr_acl_handle_procpidmem(const struct task_struct *task)
54901 +{
54902 + return 0;
54903 +}
54904 +
54905 +int
54906 +gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
54907 +{
54908 + return 0;
54909 +}
54910 +
54911 +int
54912 +gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
54913 +{
54914 + return 0;
54915 +}
54916 +
54917 +void
54918 +gr_set_kernel_label(struct task_struct *task)
54919 +{
54920 + return;
54921 +}
54922 +
54923 +int
54924 +gr_check_user_change(int real, int effective, int fs)
54925 +{
54926 + return 0;
54927 +}
54928 +
54929 +int
54930 +gr_check_group_change(int real, int effective, int fs)
54931 +{
54932 + return 0;
54933 +}
54934 +
54935 +int gr_acl_enable_at_secure(void)
54936 +{
54937 + return 0;
54938 +}
54939 +
54940 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
54941 +{
54942 + return dentry->d_inode->i_sb->s_dev;
54943 +}
54944 +
54945 +EXPORT_SYMBOL(gr_learn_resource);
54946 +EXPORT_SYMBOL(gr_set_kernel_label);
54947 +#ifdef CONFIG_SECURITY
54948 +EXPORT_SYMBOL(gr_check_user_change);
54949 +EXPORT_SYMBOL(gr_check_group_change);
54950 +#endif
54951 diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
54952 new file mode 100644
54953 index 0000000..2b05ada
54954 --- /dev/null
54955 +++ b/grsecurity/grsec_exec.c
54956 @@ -0,0 +1,146 @@
54957 +#include <linux/kernel.h>
54958 +#include <linux/sched.h>
54959 +#include <linux/file.h>
54960 +#include <linux/binfmts.h>
54961 +#include <linux/fs.h>
54962 +#include <linux/types.h>
54963 +#include <linux/grdefs.h>
54964 +#include <linux/grsecurity.h>
54965 +#include <linux/grinternal.h>
54966 +#include <linux/capability.h>
54967 +#include <linux/module.h>
54968 +
54969 +#include <asm/uaccess.h>
54970 +
54971 +#ifdef CONFIG_GRKERNSEC_EXECLOG
54972 +static char gr_exec_arg_buf[132];
54973 +static DEFINE_MUTEX(gr_exec_arg_mutex);
54974 +#endif
54975 +
54976 +extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
54977 +
54978 +void
54979 +gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
54980 +{
54981 +#ifdef CONFIG_GRKERNSEC_EXECLOG
54982 + char *grarg = gr_exec_arg_buf;
54983 + unsigned int i, x, execlen = 0;
54984 + char c;
54985 +
54986 + if (!((grsec_enable_execlog && grsec_enable_group &&
54987 + in_group_p(grsec_audit_gid))
54988 + || (grsec_enable_execlog && !grsec_enable_group)))
54989 + return;
54990 +
54991 + mutex_lock(&gr_exec_arg_mutex);
54992 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
54993 +
54994 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
54995 + const char __user *p;
54996 + unsigned int len;
54997 +
54998 + p = get_user_arg_ptr(argv, i);
54999 + if (IS_ERR(p))
55000 + goto log;
55001 +
55002 + len = strnlen_user(p, 128 - execlen);
55003 + if (len > 128 - execlen)
55004 + len = 128 - execlen;
55005 + else if (len > 0)
55006 + len--;
55007 + if (copy_from_user(grarg + execlen, p, len))
55008 + goto log;
55009 +
55010 + /* rewrite unprintable characters */
55011 + for (x = 0; x < len; x++) {
55012 + c = *(grarg + execlen + x);
55013 + if (c < 32 || c > 126)
55014 + *(grarg + execlen + x) = ' ';
55015 + }
55016 +
55017 + execlen += len;
55018 + *(grarg + execlen) = ' ';
55019 + *(grarg + execlen + 1) = '\0';
55020 + execlen++;
55021 + }
55022 +
55023 + log:
55024 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
55025 + bprm->file->f_path.mnt, grarg);
55026 + mutex_unlock(&gr_exec_arg_mutex);
55027 +#endif
55028 + return;
55029 +}
55030 +
55031 +#ifdef CONFIG_GRKERNSEC
55032 +extern int gr_acl_is_capable(const int cap);
55033 +extern int gr_acl_is_capable_nolog(const int cap);
55034 +extern int gr_chroot_is_capable(const int cap);
55035 +extern int gr_chroot_is_capable_nolog(const int cap);
55036 +#endif
55037 +
55038 +const char *captab_log[] = {
55039 + "CAP_CHOWN",
55040 + "CAP_DAC_OVERRIDE",
55041 + "CAP_DAC_READ_SEARCH",
55042 + "CAP_FOWNER",
55043 + "CAP_FSETID",
55044 + "CAP_KILL",
55045 + "CAP_SETGID",
55046 + "CAP_SETUID",
55047 + "CAP_SETPCAP",
55048 + "CAP_LINUX_IMMUTABLE",
55049 + "CAP_NET_BIND_SERVICE",
55050 + "CAP_NET_BROADCAST",
55051 + "CAP_NET_ADMIN",
55052 + "CAP_NET_RAW",
55053 + "CAP_IPC_LOCK",
55054 + "CAP_IPC_OWNER",
55055 + "CAP_SYS_MODULE",
55056 + "CAP_SYS_RAWIO",
55057 + "CAP_SYS_CHROOT",
55058 + "CAP_SYS_PTRACE",
55059 + "CAP_SYS_PACCT",
55060 + "CAP_SYS_ADMIN",
55061 + "CAP_SYS_BOOT",
55062 + "CAP_SYS_NICE",
55063 + "CAP_SYS_RESOURCE",
55064 + "CAP_SYS_TIME",
55065 + "CAP_SYS_TTY_CONFIG",
55066 + "CAP_MKNOD",
55067 + "CAP_LEASE",
55068 + "CAP_AUDIT_WRITE",
55069 + "CAP_AUDIT_CONTROL",
55070 + "CAP_SETFCAP",
55071 + "CAP_MAC_OVERRIDE",
55072 + "CAP_MAC_ADMIN",
55073 + "CAP_SYSLOG",
55074 + "CAP_WAKE_ALARM"
55075 +};
55076 +
55077 +int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
55078 +
55079 +int gr_is_capable(const int cap)
55080 +{
55081 +#ifdef CONFIG_GRKERNSEC
55082 + if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
55083 + return 1;
55084 + return 0;
55085 +#else
55086 + return 1;
55087 +#endif
55088 +}
55089 +
55090 +int gr_is_capable_nolog(const int cap)
55091 +{
55092 +#ifdef CONFIG_GRKERNSEC
55093 + if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
55094 + return 1;
55095 + return 0;
55096 +#else
55097 + return 1;
55098 +#endif
55099 +}
55100 +
55101 +EXPORT_SYMBOL(gr_is_capable);
55102 +EXPORT_SYMBOL(gr_is_capable_nolog);
55103 diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
55104 new file mode 100644
55105 index 0000000..d3ee748
55106 --- /dev/null
55107 +++ b/grsecurity/grsec_fifo.c
55108 @@ -0,0 +1,24 @@
55109 +#include <linux/kernel.h>
55110 +#include <linux/sched.h>
55111 +#include <linux/fs.h>
55112 +#include <linux/file.h>
55113 +#include <linux/grinternal.h>
55114 +
55115 +int
55116 +gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
55117 + const struct dentry *dir, const int flag, const int acc_mode)
55118 +{
55119 +#ifdef CONFIG_GRKERNSEC_FIFO
55120 + const struct cred *cred = current_cred();
55121 +
55122 + if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
55123 + !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
55124 + (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
55125 + (cred->fsuid != dentry->d_inode->i_uid)) {
55126 + if (!inode_permission(dentry->d_inode, acc_mode))
55127 + gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
55128 + return -EACCES;
55129 + }
55130 +#endif
55131 + return 0;
55132 +}
55133 diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
55134 new file mode 100644
55135 index 0000000..8ca18bf
55136 --- /dev/null
55137 +++ b/grsecurity/grsec_fork.c
55138 @@ -0,0 +1,23 @@
55139 +#include <linux/kernel.h>
55140 +#include <linux/sched.h>
55141 +#include <linux/grsecurity.h>
55142 +#include <linux/grinternal.h>
55143 +#include <linux/errno.h>
55144 +
55145 +void
55146 +gr_log_forkfail(const int retval)
55147 +{
55148 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
55149 + if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
55150 + switch (retval) {
55151 + case -EAGAIN:
55152 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
55153 + break;
55154 + case -ENOMEM:
55155 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
55156 + break;
55157 + }
55158 + }
55159 +#endif
55160 + return;
55161 +}
55162 diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
55163 new file mode 100644
55164 index 0000000..01ddde4
55165 --- /dev/null
55166 +++ b/grsecurity/grsec_init.c
55167 @@ -0,0 +1,277 @@
55168 +#include <linux/kernel.h>
55169 +#include <linux/sched.h>
55170 +#include <linux/mm.h>
55171 +#include <linux/gracl.h>
55172 +#include <linux/slab.h>
55173 +#include <linux/vmalloc.h>
55174 +#include <linux/percpu.h>
55175 +#include <linux/module.h>
55176 +
55177 +int grsec_enable_ptrace_readexec;
55178 +int grsec_enable_setxid;
55179 +int grsec_enable_brute;
55180 +int grsec_enable_link;
55181 +int grsec_enable_dmesg;
55182 +int grsec_enable_harden_ptrace;
55183 +int grsec_enable_fifo;
55184 +int grsec_enable_execlog;
55185 +int grsec_enable_signal;
55186 +int grsec_enable_forkfail;
55187 +int grsec_enable_audit_ptrace;
55188 +int grsec_enable_time;
55189 +int grsec_enable_audit_textrel;
55190 +int grsec_enable_group;
55191 +int grsec_audit_gid;
55192 +int grsec_enable_chdir;
55193 +int grsec_enable_mount;
55194 +int grsec_enable_rofs;
55195 +int grsec_enable_chroot_findtask;
55196 +int grsec_enable_chroot_mount;
55197 +int grsec_enable_chroot_shmat;
55198 +int grsec_enable_chroot_fchdir;
55199 +int grsec_enable_chroot_double;
55200 +int grsec_enable_chroot_pivot;
55201 +int grsec_enable_chroot_chdir;
55202 +int grsec_enable_chroot_chmod;
55203 +int grsec_enable_chroot_mknod;
55204 +int grsec_enable_chroot_nice;
55205 +int grsec_enable_chroot_execlog;
55206 +int grsec_enable_chroot_caps;
55207 +int grsec_enable_chroot_sysctl;
55208 +int grsec_enable_chroot_unix;
55209 +int grsec_enable_tpe;
55210 +int grsec_tpe_gid;
55211 +int grsec_enable_blackhole;
55212 +#ifdef CONFIG_IPV6_MODULE
55213 +EXPORT_SYMBOL(grsec_enable_blackhole);
55214 +#endif
55215 +int grsec_lastack_retries;
55216 +int grsec_enable_tpe_all;
55217 +int grsec_enable_tpe_invert;
55218 +int grsec_enable_socket_all;
55219 +int grsec_socket_all_gid;
55220 +int grsec_enable_socket_client;
55221 +int grsec_socket_client_gid;
55222 +int grsec_enable_socket_server;
55223 +int grsec_socket_server_gid;
55224 +int grsec_resource_logging;
55225 +int grsec_disable_privio;
55226 +int grsec_enable_log_rwxmaps;
55227 +int grsec_lock;
55228 +
55229 +DEFINE_SPINLOCK(grsec_alert_lock);
55230 +unsigned long grsec_alert_wtime = 0;
55231 +unsigned long grsec_alert_fyet = 0;
55232 +
55233 +DEFINE_SPINLOCK(grsec_audit_lock);
55234 +
55235 +DEFINE_RWLOCK(grsec_exec_file_lock);
55236 +
55237 +char *gr_shared_page[4];
55238 +
55239 +char *gr_alert_log_fmt;
55240 +char *gr_audit_log_fmt;
55241 +char *gr_alert_log_buf;
55242 +char *gr_audit_log_buf;
55243 +
55244 +extern struct gr_arg *gr_usermode;
55245 +extern unsigned char *gr_system_salt;
55246 +extern unsigned char *gr_system_sum;
55247 +
55248 +void __init
55249 +grsecurity_init(void)
55250 +{
55251 + int j;
55252 + /* create the per-cpu shared pages */
55253 +
55254 +#ifdef CONFIG_X86
55255 + memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
55256 +#endif
55257 +
55258 + for (j = 0; j < 4; j++) {
55259 + gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
55260 + if (gr_shared_page[j] == NULL) {
55261 + panic("Unable to allocate grsecurity shared page");
55262 + return;
55263 + }
55264 + }
55265 +
55266 + /* allocate log buffers */
55267 + gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
55268 + if (!gr_alert_log_fmt) {
55269 + panic("Unable to allocate grsecurity alert log format buffer");
55270 + return;
55271 + }
55272 + gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
55273 + if (!gr_audit_log_fmt) {
55274 + panic("Unable to allocate grsecurity audit log format buffer");
55275 + return;
55276 + }
55277 + gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
55278 + if (!gr_alert_log_buf) {
55279 + panic("Unable to allocate grsecurity alert log buffer");
55280 + return;
55281 + }
55282 + gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
55283 + if (!gr_audit_log_buf) {
55284 + panic("Unable to allocate grsecurity audit log buffer");
55285 + return;
55286 + }
55287 +
55288 + /* allocate memory for authentication structure */
55289 + gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
55290 + gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
55291 + gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
55292 +
55293 + if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
55294 + panic("Unable to allocate grsecurity authentication structure");
55295 + return;
55296 + }
55297 +
55298 +
55299 +#ifdef CONFIG_GRKERNSEC_IO
55300 +#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
55301 + grsec_disable_privio = 1;
55302 +#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
55303 + grsec_disable_privio = 1;
55304 +#else
55305 + grsec_disable_privio = 0;
55306 +#endif
55307 +#endif
55308 +
55309 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
55310 + /* for backward compatibility, tpe_invert always defaults to on if
55311 + enabled in the kernel
55312 + */
55313 + grsec_enable_tpe_invert = 1;
55314 +#endif
55315 +
55316 +#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
55317 +#ifndef CONFIG_GRKERNSEC_SYSCTL
55318 + grsec_lock = 1;
55319 +#endif
55320 +
55321 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
55322 + grsec_enable_audit_textrel = 1;
55323 +#endif
55324 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
55325 + grsec_enable_log_rwxmaps = 1;
55326 +#endif
55327 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
55328 + grsec_enable_group = 1;
55329 + grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
55330 +#endif
55331 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
55332 + grsec_enable_ptrace_readexec = 1;
55333 +#endif
55334 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
55335 + grsec_enable_chdir = 1;
55336 +#endif
55337 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
55338 + grsec_enable_harden_ptrace = 1;
55339 +#endif
55340 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
55341 + grsec_enable_mount = 1;
55342 +#endif
55343 +#ifdef CONFIG_GRKERNSEC_LINK
55344 + grsec_enable_link = 1;
55345 +#endif
55346 +#ifdef CONFIG_GRKERNSEC_BRUTE
55347 + grsec_enable_brute = 1;
55348 +#endif
55349 +#ifdef CONFIG_GRKERNSEC_DMESG
55350 + grsec_enable_dmesg = 1;
55351 +#endif
55352 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
55353 + grsec_enable_blackhole = 1;
55354 + grsec_lastack_retries = 4;
55355 +#endif
55356 +#ifdef CONFIG_GRKERNSEC_FIFO
55357 + grsec_enable_fifo = 1;
55358 +#endif
55359 +#ifdef CONFIG_GRKERNSEC_EXECLOG
55360 + grsec_enable_execlog = 1;
55361 +#endif
55362 +#ifdef CONFIG_GRKERNSEC_SETXID
55363 + grsec_enable_setxid = 1;
55364 +#endif
55365 +#ifdef CONFIG_GRKERNSEC_SIGNAL
55366 + grsec_enable_signal = 1;
55367 +#endif
55368 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
55369 + grsec_enable_forkfail = 1;
55370 +#endif
55371 +#ifdef CONFIG_GRKERNSEC_TIME
55372 + grsec_enable_time = 1;
55373 +#endif
55374 +#ifdef CONFIG_GRKERNSEC_RESLOG
55375 + grsec_resource_logging = 1;
55376 +#endif
55377 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
55378 + grsec_enable_chroot_findtask = 1;
55379 +#endif
55380 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
55381 + grsec_enable_chroot_unix = 1;
55382 +#endif
55383 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
55384 + grsec_enable_chroot_mount = 1;
55385 +#endif
55386 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
55387 + grsec_enable_chroot_fchdir = 1;
55388 +#endif
55389 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
55390 + grsec_enable_chroot_shmat = 1;
55391 +#endif
55392 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
55393 + grsec_enable_audit_ptrace = 1;
55394 +#endif
55395 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
55396 + grsec_enable_chroot_double = 1;
55397 +#endif
55398 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
55399 + grsec_enable_chroot_pivot = 1;
55400 +#endif
55401 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
55402 + grsec_enable_chroot_chdir = 1;
55403 +#endif
55404 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
55405 + grsec_enable_chroot_chmod = 1;
55406 +#endif
55407 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
55408 + grsec_enable_chroot_mknod = 1;
55409 +#endif
55410 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
55411 + grsec_enable_chroot_nice = 1;
55412 +#endif
55413 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
55414 + grsec_enable_chroot_execlog = 1;
55415 +#endif
55416 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
55417 + grsec_enable_chroot_caps = 1;
55418 +#endif
55419 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
55420 + grsec_enable_chroot_sysctl = 1;
55421 +#endif
55422 +#ifdef CONFIG_GRKERNSEC_TPE
55423 + grsec_enable_tpe = 1;
55424 + grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
55425 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
55426 + grsec_enable_tpe_all = 1;
55427 +#endif
55428 +#endif
55429 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
55430 + grsec_enable_socket_all = 1;
55431 + grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
55432 +#endif
55433 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
55434 + grsec_enable_socket_client = 1;
55435 + grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
55436 +#endif
55437 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
55438 + grsec_enable_socket_server = 1;
55439 + grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
55440 +#endif
55441 +#endif
55442 +
55443 + return;
55444 +}
55445 diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
55446 new file mode 100644
55447 index 0000000..3efe141
55448 --- /dev/null
55449 +++ b/grsecurity/grsec_link.c
55450 @@ -0,0 +1,43 @@
55451 +#include <linux/kernel.h>
55452 +#include <linux/sched.h>
55453 +#include <linux/fs.h>
55454 +#include <linux/file.h>
55455 +#include <linux/grinternal.h>
55456 +
55457 +int
55458 +gr_handle_follow_link(const struct inode *parent,
55459 + const struct inode *inode,
55460 + const struct dentry *dentry, const struct vfsmount *mnt)
55461 +{
55462 +#ifdef CONFIG_GRKERNSEC_LINK
55463 + const struct cred *cred = current_cred();
55464 +
55465 + if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
55466 + (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
55467 + (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
55468 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
55469 + return -EACCES;
55470 + }
55471 +#endif
55472 + return 0;
55473 +}
55474 +
55475 +int
55476 +gr_handle_hardlink(const struct dentry *dentry,
55477 + const struct vfsmount *mnt,
55478 + struct inode *inode, const int mode, const char *to)
55479 +{
55480 +#ifdef CONFIG_GRKERNSEC_LINK
55481 + const struct cred *cred = current_cred();
55482 +
55483 + if (grsec_enable_link && cred->fsuid != inode->i_uid &&
55484 + (!S_ISREG(mode) || (mode & S_ISUID) ||
55485 + ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
55486 + (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
55487 + !capable(CAP_FOWNER) && cred->uid) {
55488 + gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
55489 + return -EPERM;
55490 + }
55491 +#endif
55492 + return 0;
55493 +}
55494 diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
55495 new file mode 100644
55496 index 0000000..a45d2e9
55497 --- /dev/null
55498 +++ b/grsecurity/grsec_log.c
55499 @@ -0,0 +1,322 @@
55500 +#include <linux/kernel.h>
55501 +#include <linux/sched.h>
55502 +#include <linux/file.h>
55503 +#include <linux/tty.h>
55504 +#include <linux/fs.h>
55505 +#include <linux/grinternal.h>
55506 +
55507 +#ifdef CONFIG_TREE_PREEMPT_RCU
55508 +#define DISABLE_PREEMPT() preempt_disable()
55509 +#define ENABLE_PREEMPT() preempt_enable()
55510 +#else
55511 +#define DISABLE_PREEMPT()
55512 +#define ENABLE_PREEMPT()
55513 +#endif
55514 +
55515 +#define BEGIN_LOCKS(x) \
55516 + DISABLE_PREEMPT(); \
55517 + rcu_read_lock(); \
55518 + read_lock(&tasklist_lock); \
55519 + read_lock(&grsec_exec_file_lock); \
55520 + if (x != GR_DO_AUDIT) \
55521 + spin_lock(&grsec_alert_lock); \
55522 + else \
55523 + spin_lock(&grsec_audit_lock)
55524 +
55525 +#define END_LOCKS(x) \
55526 + if (x != GR_DO_AUDIT) \
55527 + spin_unlock(&grsec_alert_lock); \
55528 + else \
55529 + spin_unlock(&grsec_audit_lock); \
55530 + read_unlock(&grsec_exec_file_lock); \
55531 + read_unlock(&tasklist_lock); \
55532 + rcu_read_unlock(); \
55533 + ENABLE_PREEMPT(); \
55534 + if (x == GR_DONT_AUDIT) \
55535 + gr_handle_alertkill(current)
55536 +
55537 +enum {
55538 + FLOODING,
55539 + NO_FLOODING
55540 +};
55541 +
55542 +extern char *gr_alert_log_fmt;
55543 +extern char *gr_audit_log_fmt;
55544 +extern char *gr_alert_log_buf;
55545 +extern char *gr_audit_log_buf;
55546 +
55547 +static int gr_log_start(int audit)
55548 +{
55549 + char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
55550 + char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
55551 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
55552 +#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
55553 + unsigned long curr_secs = get_seconds();
55554 +
55555 + if (audit == GR_DO_AUDIT)
55556 + goto set_fmt;
55557 +
55558 + if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
55559 + grsec_alert_wtime = curr_secs;
55560 + grsec_alert_fyet = 0;
55561 + } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
55562 + && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
55563 + grsec_alert_fyet++;
55564 + } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
55565 + grsec_alert_wtime = curr_secs;
55566 + grsec_alert_fyet++;
55567 + printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
55568 + return FLOODING;
55569 + }
55570 + else return FLOODING;
55571 +
55572 +set_fmt:
55573 +#endif
55574 + memset(buf, 0, PAGE_SIZE);
55575 + if (current->signal->curr_ip && gr_acl_is_enabled()) {
55576 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
55577 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
55578 + } else if (current->signal->curr_ip) {
55579 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
55580 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
55581 + } else if (gr_acl_is_enabled()) {
55582 + sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
55583 + snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
55584 + } else {
55585 + sprintf(fmt, "%s%s", loglevel, "grsec: ");
55586 + strcpy(buf, fmt);
55587 + }
55588 +
55589 + return NO_FLOODING;
55590 +}
55591 +
55592 +static void gr_log_middle(int audit, const char *msg, va_list ap)
55593 + __attribute__ ((format (printf, 2, 0)));
55594 +
55595 +static void gr_log_middle(int audit, const char *msg, va_list ap)
55596 +{
55597 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
55598 + unsigned int len = strlen(buf);
55599 +
55600 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
55601 +
55602 + return;
55603 +}
55604 +
55605 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
55606 + __attribute__ ((format (printf, 2, 3)));
55607 +
55608 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
55609 +{
55610 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
55611 + unsigned int len = strlen(buf);
55612 + va_list ap;
55613 +
55614 + va_start(ap, msg);
55615 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
55616 + va_end(ap);
55617 +
55618 + return;
55619 +}
55620 +
55621 +static void gr_log_end(int audit, int append_default)
55622 +{
55623 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
55624 +
55625 + if (append_default) {
55626 + unsigned int len = strlen(buf);
55627 + snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
55628 + }
55629 +
55630 + printk("%s\n", buf);
55631 +
55632 + return;
55633 +}
55634 +
55635 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
55636 +{
55637 + int logtype;
55638 + char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
55639 + char *str1 = NULL, *str2 = NULL, *str3 = NULL;
55640 + void *voidptr = NULL;
55641 + int num1 = 0, num2 = 0;
55642 + unsigned long ulong1 = 0, ulong2 = 0;
55643 + struct dentry *dentry = NULL;
55644 + struct vfsmount *mnt = NULL;
55645 + struct file *file = NULL;
55646 + struct task_struct *task = NULL;
55647 + const struct cred *cred, *pcred;
55648 + va_list ap;
55649 +
55650 + BEGIN_LOCKS(audit);
55651 + logtype = gr_log_start(audit);
55652 + if (logtype == FLOODING) {
55653 + END_LOCKS(audit);
55654 + return;
55655 + }
55656 + va_start(ap, argtypes);
55657 + switch (argtypes) {
55658 + case GR_TTYSNIFF:
55659 + task = va_arg(ap, struct task_struct *);
55660 + gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
55661 + break;
55662 + case GR_SYSCTL_HIDDEN:
55663 + str1 = va_arg(ap, char *);
55664 + gr_log_middle_varargs(audit, msg, result, str1);
55665 + break;
55666 + case GR_RBAC:
55667 + dentry = va_arg(ap, struct dentry *);
55668 + mnt = va_arg(ap, struct vfsmount *);
55669 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
55670 + break;
55671 + case GR_RBAC_STR:
55672 + dentry = va_arg(ap, struct dentry *);
55673 + mnt = va_arg(ap, struct vfsmount *);
55674 + str1 = va_arg(ap, char *);
55675 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
55676 + break;
55677 + case GR_STR_RBAC:
55678 + str1 = va_arg(ap, char *);
55679 + dentry = va_arg(ap, struct dentry *);
55680 + mnt = va_arg(ap, struct vfsmount *);
55681 + gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
55682 + break;
55683 + case GR_RBAC_MODE2:
55684 + dentry = va_arg(ap, struct dentry *);
55685 + mnt = va_arg(ap, struct vfsmount *);
55686 + str1 = va_arg(ap, char *);
55687 + str2 = va_arg(ap, char *);
55688 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
55689 + break;
55690 + case GR_RBAC_MODE3:
55691 + dentry = va_arg(ap, struct dentry *);
55692 + mnt = va_arg(ap, struct vfsmount *);
55693 + str1 = va_arg(ap, char *);
55694 + str2 = va_arg(ap, char *);
55695 + str3 = va_arg(ap, char *);
55696 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
55697 + break;
55698 + case GR_FILENAME:
55699 + dentry = va_arg(ap, struct dentry *);
55700 + mnt = va_arg(ap, struct vfsmount *);
55701 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
55702 + break;
55703 + case GR_STR_FILENAME:
55704 + str1 = va_arg(ap, char *);
55705 + dentry = va_arg(ap, struct dentry *);
55706 + mnt = va_arg(ap, struct vfsmount *);
55707 + gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
55708 + break;
55709 + case GR_FILENAME_STR:
55710 + dentry = va_arg(ap, struct dentry *);
55711 + mnt = va_arg(ap, struct vfsmount *);
55712 + str1 = va_arg(ap, char *);
55713 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
55714 + break;
55715 + case GR_FILENAME_TWO_INT:
55716 + dentry = va_arg(ap, struct dentry *);
55717 + mnt = va_arg(ap, struct vfsmount *);
55718 + num1 = va_arg(ap, int);
55719 + num2 = va_arg(ap, int);
55720 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
55721 + break;
55722 + case GR_FILENAME_TWO_INT_STR:
55723 + dentry = va_arg(ap, struct dentry *);
55724 + mnt = va_arg(ap, struct vfsmount *);
55725 + num1 = va_arg(ap, int);
55726 + num2 = va_arg(ap, int);
55727 + str1 = va_arg(ap, char *);
55728 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
55729 + break;
55730 + case GR_TEXTREL:
55731 + file = va_arg(ap, struct file *);
55732 + ulong1 = va_arg(ap, unsigned long);
55733 + ulong2 = va_arg(ap, unsigned long);
55734 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
55735 + break;
55736 + case GR_PTRACE:
55737 + task = va_arg(ap, struct task_struct *);
55738 + gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
55739 + break;
55740 + case GR_RESOURCE:
55741 + task = va_arg(ap, struct task_struct *);
55742 + cred = __task_cred(task);
55743 + pcred = __task_cred(task->real_parent);
55744 + ulong1 = va_arg(ap, unsigned long);
55745 + str1 = va_arg(ap, char *);
55746 + ulong2 = va_arg(ap, unsigned long);
55747 + gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
55748 + break;
55749 + case GR_CAP:
55750 + task = va_arg(ap, struct task_struct *);
55751 + cred = __task_cred(task);
55752 + pcred = __task_cred(task->real_parent);
55753 + str1 = va_arg(ap, char *);
55754 + gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
55755 + break;
55756 + case GR_SIG:
55757 + str1 = va_arg(ap, char *);
55758 + voidptr = va_arg(ap, void *);
55759 + gr_log_middle_varargs(audit, msg, str1, voidptr);
55760 + break;
55761 + case GR_SIG2:
55762 + task = va_arg(ap, struct task_struct *);
55763 + cred = __task_cred(task);
55764 + pcred = __task_cred(task->real_parent);
55765 + num1 = va_arg(ap, int);
55766 + gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
55767 + break;
55768 + case GR_CRASH1:
55769 + task = va_arg(ap, struct task_struct *);
55770 + cred = __task_cred(task);
55771 + pcred = __task_cred(task->real_parent);
55772 + ulong1 = va_arg(ap, unsigned long);
55773 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
55774 + break;
55775 + case GR_CRASH2:
55776 + task = va_arg(ap, struct task_struct *);
55777 + cred = __task_cred(task);
55778 + pcred = __task_cred(task->real_parent);
55779 + ulong1 = va_arg(ap, unsigned long);
55780 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
55781 + break;
55782 + case GR_RWXMAP:
55783 + file = va_arg(ap, struct file *);
55784 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
55785 + break;
55786 + case GR_PSACCT:
55787 + {
55788 + unsigned int wday, cday;
55789 + __u8 whr, chr;
55790 + __u8 wmin, cmin;
55791 + __u8 wsec, csec;
55792 + char cur_tty[64] = { 0 };
55793 + char parent_tty[64] = { 0 };
55794 +
55795 + task = va_arg(ap, struct task_struct *);
55796 + wday = va_arg(ap, unsigned int);
55797 + cday = va_arg(ap, unsigned int);
55798 + whr = va_arg(ap, int);
55799 + chr = va_arg(ap, int);
55800 + wmin = va_arg(ap, int);
55801 + cmin = va_arg(ap, int);
55802 + wsec = va_arg(ap, int);
55803 + csec = va_arg(ap, int);
55804 + ulong1 = va_arg(ap, unsigned long);
55805 + cred = __task_cred(task);
55806 + pcred = __task_cred(task->real_parent);
55807 +
55808 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
55809 + }
55810 + break;
55811 + default:
55812 + gr_log_middle(audit, msg, ap);
55813 + }
55814 + va_end(ap);
55815 + // these don't need DEFAULTSECARGS printed on the end
55816 + if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
55817 + gr_log_end(audit, 0);
55818 + else
55819 + gr_log_end(audit, 1);
55820 + END_LOCKS(audit);
55821 +}
55822 diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
55823 new file mode 100644
55824 index 0000000..6c0416b
55825 --- /dev/null
55826 +++ b/grsecurity/grsec_mem.c
55827 @@ -0,0 +1,33 @@
55828 +#include <linux/kernel.h>
55829 +#include <linux/sched.h>
55830 +#include <linux/mm.h>
55831 +#include <linux/mman.h>
55832 +#include <linux/grinternal.h>
55833 +
55834 +void
55835 +gr_handle_ioperm(void)
55836 +{
55837 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
55838 + return;
55839 +}
55840 +
55841 +void
55842 +gr_handle_iopl(void)
55843 +{
55844 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
55845 + return;
55846 +}
55847 +
55848 +void
55849 +gr_handle_mem_readwrite(u64 from, u64 to)
55850 +{
55851 + gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
55852 + return;
55853 +}
55854 +
55855 +void
55856 +gr_handle_vm86(void)
55857 +{
55858 + gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
55859 + return;
55860 +}
55861 diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
55862 new file mode 100644
55863 index 0000000..2131422
55864 --- /dev/null
55865 +++ b/grsecurity/grsec_mount.c
55866 @@ -0,0 +1,62 @@
55867 +#include <linux/kernel.h>
55868 +#include <linux/sched.h>
55869 +#include <linux/mount.h>
55870 +#include <linux/grsecurity.h>
55871 +#include <linux/grinternal.h>
55872 +
55873 +void
55874 +gr_log_remount(const char *devname, const int retval)
55875 +{
55876 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
55877 + if (grsec_enable_mount && (retval >= 0))
55878 + gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
55879 +#endif
55880 + return;
55881 +}
55882 +
55883 +void
55884 +gr_log_unmount(const char *devname, const int retval)
55885 +{
55886 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
55887 + if (grsec_enable_mount && (retval >= 0))
55888 + gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
55889 +#endif
55890 + return;
55891 +}
55892 +
55893 +void
55894 +gr_log_mount(const char *from, const char *to, const int retval)
55895 +{
55896 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
55897 + if (grsec_enable_mount && (retval >= 0))
55898 + gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
55899 +#endif
55900 + return;
55901 +}
55902 +
55903 +int
55904 +gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
55905 +{
55906 +#ifdef CONFIG_GRKERNSEC_ROFS
55907 + if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
55908 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
55909 + return -EPERM;
55910 + } else
55911 + return 0;
55912 +#endif
55913 + return 0;
55914 +}
55915 +
55916 +int
55917 +gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
55918 +{
55919 +#ifdef CONFIG_GRKERNSEC_ROFS
55920 + if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
55921 + dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
55922 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
55923 + return -EPERM;
55924 + } else
55925 + return 0;
55926 +#endif
55927 + return 0;
55928 +}
55929 diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
55930 new file mode 100644
55931 index 0000000..a3b12a0
55932 --- /dev/null
55933 +++ b/grsecurity/grsec_pax.c
55934 @@ -0,0 +1,36 @@
55935 +#include <linux/kernel.h>
55936 +#include <linux/sched.h>
55937 +#include <linux/mm.h>
55938 +#include <linux/file.h>
55939 +#include <linux/grinternal.h>
55940 +#include <linux/grsecurity.h>
55941 +
55942 +void
55943 +gr_log_textrel(struct vm_area_struct * vma)
55944 +{
55945 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
55946 + if (grsec_enable_audit_textrel)
55947 + gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
55948 +#endif
55949 + return;
55950 +}
55951 +
55952 +void
55953 +gr_log_rwxmmap(struct file *file)
55954 +{
55955 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
55956 + if (grsec_enable_log_rwxmaps)
55957 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
55958 +#endif
55959 + return;
55960 +}
55961 +
55962 +void
55963 +gr_log_rwxmprotect(struct file *file)
55964 +{
55965 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
55966 + if (grsec_enable_log_rwxmaps)
55967 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
55968 +#endif
55969 + return;
55970 +}
55971 diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
55972 new file mode 100644
55973 index 0000000..f7f29aa
55974 --- /dev/null
55975 +++ b/grsecurity/grsec_ptrace.c
55976 @@ -0,0 +1,30 @@
55977 +#include <linux/kernel.h>
55978 +#include <linux/sched.h>
55979 +#include <linux/grinternal.h>
55980 +#include <linux/security.h>
55981 +
55982 +void
55983 +gr_audit_ptrace(struct task_struct *task)
55984 +{
55985 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
55986 + if (grsec_enable_audit_ptrace)
55987 + gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
55988 +#endif
55989 + return;
55990 +}
55991 +
55992 +int
55993 +gr_ptrace_readexec(struct file *file, int unsafe_flags)
55994 +{
55995 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
55996 + const struct dentry *dentry = file->f_path.dentry;
55997 + const struct vfsmount *mnt = file->f_path.mnt;
55998 +
55999 + if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
56000 + (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
56001 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
56002 + return -EACCES;
56003 + }
56004 +#endif
56005 + return 0;
56006 +}
56007 diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
56008 new file mode 100644
56009 index 0000000..7a5b2de
56010 --- /dev/null
56011 +++ b/grsecurity/grsec_sig.c
56012 @@ -0,0 +1,207 @@
56013 +#include <linux/kernel.h>
56014 +#include <linux/sched.h>
56015 +#include <linux/delay.h>
56016 +#include <linux/grsecurity.h>
56017 +#include <linux/grinternal.h>
56018 +#include <linux/hardirq.h>
56019 +
56020 +char *signames[] = {
56021 + [SIGSEGV] = "Segmentation fault",
56022 + [SIGILL] = "Illegal instruction",
56023 + [SIGABRT] = "Abort",
56024 + [SIGBUS] = "Invalid alignment/Bus error"
56025 +};
56026 +
56027 +void
56028 +gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
56029 +{
56030 +#ifdef CONFIG_GRKERNSEC_SIGNAL
56031 + if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
56032 + (sig == SIGABRT) || (sig == SIGBUS))) {
56033 + if (t->pid == current->pid) {
56034 + gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
56035 + } else {
56036 + gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
56037 + }
56038 + }
56039 +#endif
56040 + return;
56041 +}
56042 +
56043 +int
56044 +gr_handle_signal(const struct task_struct *p, const int sig)
56045 +{
56046 +#ifdef CONFIG_GRKERNSEC
56047 + /* ignore the 0 signal for protected task checks */
56048 + if (current->pid > 1 && sig && gr_check_protected_task(p)) {
56049 + gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
56050 + return -EPERM;
56051 + } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
56052 + return -EPERM;
56053 + }
56054 +#endif
56055 + return 0;
56056 +}
56057 +
56058 +#ifdef CONFIG_GRKERNSEC
56059 +extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
56060 +
56061 +int gr_fake_force_sig(int sig, struct task_struct *t)
56062 +{
56063 + unsigned long int flags;
56064 + int ret, blocked, ignored;
56065 + struct k_sigaction *action;
56066 +
56067 + spin_lock_irqsave(&t->sighand->siglock, flags);
56068 + action = &t->sighand->action[sig-1];
56069 + ignored = action->sa.sa_handler == SIG_IGN;
56070 + blocked = sigismember(&t->blocked, sig);
56071 + if (blocked || ignored) {
56072 + action->sa.sa_handler = SIG_DFL;
56073 + if (blocked) {
56074 + sigdelset(&t->blocked, sig);
56075 + recalc_sigpending_and_wake(t);
56076 + }
56077 + }
56078 + if (action->sa.sa_handler == SIG_DFL)
56079 + t->signal->flags &= ~SIGNAL_UNKILLABLE;
56080 + ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
56081 +
56082 + spin_unlock_irqrestore(&t->sighand->siglock, flags);
56083 +
56084 + return ret;
56085 +}
56086 +#endif
56087 +
56088 +#ifdef CONFIG_GRKERNSEC_BRUTE
56089 +#define GR_USER_BAN_TIME (15 * 60)
56090 +
56091 +static int __get_dumpable(unsigned long mm_flags)
56092 +{
56093 + int ret;
56094 +
56095 + ret = mm_flags & MMF_DUMPABLE_MASK;
56096 + return (ret >= 2) ? 2 : ret;
56097 +}
56098 +#endif
56099 +
56100 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
56101 +{
56102 +#ifdef CONFIG_GRKERNSEC_BRUTE
56103 + uid_t uid = 0;
56104 +
56105 + if (!grsec_enable_brute)
56106 + return;
56107 +
56108 + rcu_read_lock();
56109 + read_lock(&tasklist_lock);
56110 + read_lock(&grsec_exec_file_lock);
56111 + if (p->real_parent && p->real_parent->exec_file == p->exec_file)
56112 + p->real_parent->brute = 1;
56113 + else {
56114 + const struct cred *cred = __task_cred(p), *cred2;
56115 + struct task_struct *tsk, *tsk2;
56116 +
56117 + if (!__get_dumpable(mm_flags) && cred->uid) {
56118 + struct user_struct *user;
56119 +
56120 + uid = cred->uid;
56121 +
56122 + /* this is put upon execution past expiration */
56123 + user = find_user(uid);
56124 + if (user == NULL)
56125 + goto unlock;
56126 + user->banned = 1;
56127 + user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
56128 + if (user->ban_expires == ~0UL)
56129 + user->ban_expires--;
56130 +
56131 + do_each_thread(tsk2, tsk) {
56132 + cred2 = __task_cred(tsk);
56133 + if (tsk != p && cred2->uid == uid)
56134 + gr_fake_force_sig(SIGKILL, tsk);
56135 + } while_each_thread(tsk2, tsk);
56136 + }
56137 + }
56138 +unlock:
56139 + read_unlock(&grsec_exec_file_lock);
56140 + read_unlock(&tasklist_lock);
56141 + rcu_read_unlock();
56142 +
56143 + if (uid)
56144 + printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
56145 +
56146 +#endif
56147 + return;
56148 +}
56149 +
56150 +void gr_handle_brute_check(void)
56151 +{
56152 +#ifdef CONFIG_GRKERNSEC_BRUTE
56153 + if (current->brute)
56154 + msleep(30 * 1000);
56155 +#endif
56156 + return;
56157 +}
56158 +
56159 +void gr_handle_kernel_exploit(void)
56160 +{
56161 +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
56162 + const struct cred *cred;
56163 + struct task_struct *tsk, *tsk2;
56164 + struct user_struct *user;
56165 + uid_t uid;
56166 +
56167 + if (in_irq() || in_serving_softirq() || in_nmi())
56168 + panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
56169 +
56170 + uid = current_uid();
56171 +
56172 + if (uid == 0)
56173 + panic("grsec: halting the system due to suspicious kernel crash caused by root");
56174 + else {
56175 + /* kill all the processes of this user, hold a reference
56176 + to their creds struct, and prevent them from creating
56177 + another process until system reset
56178 + */
56179 + printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
56180 + /* we intentionally leak this ref */
56181 + user = get_uid(current->cred->user);
56182 + if (user) {
56183 + user->banned = 1;
56184 + user->ban_expires = ~0UL;
56185 + }
56186 +
56187 + read_lock(&tasklist_lock);
56188 + do_each_thread(tsk2, tsk) {
56189 + cred = __task_cred(tsk);
56190 + if (cred->uid == uid)
56191 + gr_fake_force_sig(SIGKILL, tsk);
56192 + } while_each_thread(tsk2, tsk);
56193 + read_unlock(&tasklist_lock);
56194 + }
56195 +#endif
56196 +}
56197 +
56198 +int __gr_process_user_ban(struct user_struct *user)
56199 +{
56200 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
56201 + if (unlikely(user->banned)) {
56202 + if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
56203 + user->banned = 0;
56204 + user->ban_expires = 0;
56205 + free_uid(user);
56206 + } else
56207 + return -EPERM;
56208 + }
56209 +#endif
56210 + return 0;
56211 +}
56212 +
56213 +int gr_process_user_ban(void)
56214 +{
56215 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
56216 + return __gr_process_user_ban(current->cred->user);
56217 +#endif
56218 + return 0;
56219 +}
56220 diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
56221 new file mode 100644
56222 index 0000000..4030d57
56223 --- /dev/null
56224 +++ b/grsecurity/grsec_sock.c
56225 @@ -0,0 +1,244 @@
56226 +#include <linux/kernel.h>
56227 +#include <linux/module.h>
56228 +#include <linux/sched.h>
56229 +#include <linux/file.h>
56230 +#include <linux/net.h>
56231 +#include <linux/in.h>
56232 +#include <linux/ip.h>
56233 +#include <net/sock.h>
56234 +#include <net/inet_sock.h>
56235 +#include <linux/grsecurity.h>
56236 +#include <linux/grinternal.h>
56237 +#include <linux/gracl.h>
56238 +
56239 +extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
56240 +extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
56241 +
56242 +EXPORT_SYMBOL(gr_search_udp_recvmsg);
56243 +EXPORT_SYMBOL(gr_search_udp_sendmsg);
56244 +
56245 +#ifdef CONFIG_UNIX_MODULE
56246 +EXPORT_SYMBOL(gr_acl_handle_unix);
56247 +EXPORT_SYMBOL(gr_acl_handle_mknod);
56248 +EXPORT_SYMBOL(gr_handle_chroot_unix);
56249 +EXPORT_SYMBOL(gr_handle_create);
56250 +#endif
56251 +
56252 +#ifdef CONFIG_GRKERNSEC
56253 +#define gr_conn_table_size 32749
56254 +struct conn_table_entry {
56255 + struct conn_table_entry *next;
56256 + struct signal_struct *sig;
56257 +};
56258 +
56259 +struct conn_table_entry *gr_conn_table[gr_conn_table_size];
56260 +DEFINE_SPINLOCK(gr_conn_table_lock);
56261 +
56262 +extern const char * gr_socktype_to_name(unsigned char type);
56263 +extern const char * gr_proto_to_name(unsigned char proto);
56264 +extern const char * gr_sockfamily_to_name(unsigned char family);
56265 +
56266 +static __inline__ int
56267 +conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
56268 +{
56269 + return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
56270 +}
56271 +
56272 +static __inline__ int
56273 +conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
56274 + __u16 sport, __u16 dport)
56275 +{
56276 + if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
56277 + sig->gr_sport == sport && sig->gr_dport == dport))
56278 + return 1;
56279 + else
56280 + return 0;
56281 +}
56282 +
56283 +static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
56284 +{
56285 + struct conn_table_entry **match;
56286 + unsigned int index;
56287 +
56288 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
56289 + sig->gr_sport, sig->gr_dport,
56290 + gr_conn_table_size);
56291 +
56292 + newent->sig = sig;
56293 +
56294 + match = &gr_conn_table[index];
56295 + newent->next = *match;
56296 + *match = newent;
56297 +
56298 + return;
56299 +}
56300 +
56301 +static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
56302 +{
56303 + struct conn_table_entry *match, *last = NULL;
56304 + unsigned int index;
56305 +
56306 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
56307 + sig->gr_sport, sig->gr_dport,
56308 + gr_conn_table_size);
56309 +
56310 + match = gr_conn_table[index];
56311 + while (match && !conn_match(match->sig,
56312 + sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
56313 + sig->gr_dport)) {
56314 + last = match;
56315 + match = match->next;
56316 + }
56317 +
56318 + if (match) {
56319 + if (last)
56320 + last->next = match->next;
56321 + else
56322 + gr_conn_table[index] = NULL;
56323 + kfree(match);
56324 + }
56325 +
56326 + return;
56327 +}
56328 +
56329 +static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
56330 + __u16 sport, __u16 dport)
56331 +{
56332 + struct conn_table_entry *match;
56333 + unsigned int index;
56334 +
56335 + index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
56336 +
56337 + match = gr_conn_table[index];
56338 + while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
56339 + match = match->next;
56340 +
56341 + if (match)
56342 + return match->sig;
56343 + else
56344 + return NULL;
56345 +}
56346 +
56347 +#endif
56348 +
56349 +void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
56350 +{
56351 +#ifdef CONFIG_GRKERNSEC
56352 + struct signal_struct *sig = task->signal;
56353 + struct conn_table_entry *newent;
56354 +
56355 + newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
56356 + if (newent == NULL)
56357 + return;
56358 + /* no bh lock needed since we are called with bh disabled */
56359 + spin_lock(&gr_conn_table_lock);
56360 + gr_del_task_from_ip_table_nolock(sig);
56361 + sig->gr_saddr = inet->inet_rcv_saddr;
56362 + sig->gr_daddr = inet->inet_daddr;
56363 + sig->gr_sport = inet->inet_sport;
56364 + sig->gr_dport = inet->inet_dport;
56365 + gr_add_to_task_ip_table_nolock(sig, newent);
56366 + spin_unlock(&gr_conn_table_lock);
56367 +#endif
56368 + return;
56369 +}
56370 +
56371 +void gr_del_task_from_ip_table(struct task_struct *task)
56372 +{
56373 +#ifdef CONFIG_GRKERNSEC
56374 + spin_lock_bh(&gr_conn_table_lock);
56375 + gr_del_task_from_ip_table_nolock(task->signal);
56376 + spin_unlock_bh(&gr_conn_table_lock);
56377 +#endif
56378 + return;
56379 +}
56380 +
56381 +void
56382 +gr_attach_curr_ip(const struct sock *sk)
56383 +{
56384 +#ifdef CONFIG_GRKERNSEC
56385 + struct signal_struct *p, *set;
56386 + const struct inet_sock *inet = inet_sk(sk);
56387 +
56388 + if (unlikely(sk->sk_protocol != IPPROTO_TCP))
56389 + return;
56390 +
56391 + set = current->signal;
56392 +
56393 + spin_lock_bh(&gr_conn_table_lock);
56394 + p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
56395 + inet->inet_dport, inet->inet_sport);
56396 + if (unlikely(p != NULL)) {
56397 + set->curr_ip = p->curr_ip;
56398 + set->used_accept = 1;
56399 + gr_del_task_from_ip_table_nolock(p);
56400 + spin_unlock_bh(&gr_conn_table_lock);
56401 + return;
56402 + }
56403 + spin_unlock_bh(&gr_conn_table_lock);
56404 +
56405 + set->curr_ip = inet->inet_daddr;
56406 + set->used_accept = 1;
56407 +#endif
56408 + return;
56409 +}
56410 +
56411 +int
56412 +gr_handle_sock_all(const int family, const int type, const int protocol)
56413 +{
56414 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
56415 + if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
56416 + (family != AF_UNIX)) {
56417 + if (family == AF_INET)
56418 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
56419 + else
56420 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
56421 + return -EACCES;
56422 + }
56423 +#endif
56424 + return 0;
56425 +}
56426 +
56427 +int
56428 +gr_handle_sock_server(const struct sockaddr *sck)
56429 +{
56430 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
56431 + if (grsec_enable_socket_server &&
56432 + in_group_p(grsec_socket_server_gid) &&
56433 + sck && (sck->sa_family != AF_UNIX) &&
56434 + (sck->sa_family != AF_LOCAL)) {
56435 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
56436 + return -EACCES;
56437 + }
56438 +#endif
56439 + return 0;
56440 +}
56441 +
56442 +int
56443 +gr_handle_sock_server_other(const struct sock *sck)
56444 +{
56445 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
56446 + if (grsec_enable_socket_server &&
56447 + in_group_p(grsec_socket_server_gid) &&
56448 + sck && (sck->sk_family != AF_UNIX) &&
56449 + (sck->sk_family != AF_LOCAL)) {
56450 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
56451 + return -EACCES;
56452 + }
56453 +#endif
56454 + return 0;
56455 +}
56456 +
56457 +int
56458 +gr_handle_sock_client(const struct sockaddr *sck)
56459 +{
56460 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
56461 + if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
56462 + sck && (sck->sa_family != AF_UNIX) &&
56463 + (sck->sa_family != AF_LOCAL)) {
56464 + gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
56465 + return -EACCES;
56466 + }
56467 +#endif
56468 + return 0;
56469 +}
56470 diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
56471 new file mode 100644
56472 index 0000000..a1aedd7
56473 --- /dev/null
56474 +++ b/grsecurity/grsec_sysctl.c
56475 @@ -0,0 +1,451 @@
56476 +#include <linux/kernel.h>
56477 +#include <linux/sched.h>
56478 +#include <linux/sysctl.h>
56479 +#include <linux/grsecurity.h>
56480 +#include <linux/grinternal.h>
56481 +
56482 +int
56483 +gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
56484 +{
56485 +#ifdef CONFIG_GRKERNSEC_SYSCTL
56486 + if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
56487 + gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
56488 + return -EACCES;
56489 + }
56490 +#endif
56491 + return 0;
56492 +}
56493 +
56494 +#ifdef CONFIG_GRKERNSEC_ROFS
56495 +static int __maybe_unused one = 1;
56496 +#endif
56497 +
56498 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
56499 +struct ctl_table grsecurity_table[] = {
56500 +#ifdef CONFIG_GRKERNSEC_SYSCTL
56501 +#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
56502 +#ifdef CONFIG_GRKERNSEC_IO
56503 + {
56504 + .procname = "disable_priv_io",
56505 + .data = &grsec_disable_privio,
56506 + .maxlen = sizeof(int),
56507 + .mode = 0600,
56508 + .proc_handler = &proc_dointvec,
56509 + },
56510 +#endif
56511 +#endif
56512 +#ifdef CONFIG_GRKERNSEC_LINK
56513 + {
56514 + .procname = "linking_restrictions",
56515 + .data = &grsec_enable_link,
56516 + .maxlen = sizeof(int),
56517 + .mode = 0600,
56518 + .proc_handler = &proc_dointvec,
56519 + },
56520 +#endif
56521 +#ifdef CONFIG_GRKERNSEC_BRUTE
56522 + {
56523 + .procname = "deter_bruteforce",
56524 + .data = &grsec_enable_brute,
56525 + .maxlen = sizeof(int),
56526 + .mode = 0600,
56527 + .proc_handler = &proc_dointvec,
56528 + },
56529 +#endif
56530 +#ifdef CONFIG_GRKERNSEC_FIFO
56531 + {
56532 + .procname = "fifo_restrictions",
56533 + .data = &grsec_enable_fifo,
56534 + .maxlen = sizeof(int),
56535 + .mode = 0600,
56536 + .proc_handler = &proc_dointvec,
56537 + },
56538 +#endif
56539 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
56540 + {
56541 + .procname = "ptrace_readexec",
56542 + .data = &grsec_enable_ptrace_readexec,
56543 + .maxlen = sizeof(int),
56544 + .mode = 0600,
56545 + .proc_handler = &proc_dointvec,
56546 + },
56547 +#endif
56548 +#ifdef CONFIG_GRKERNSEC_SETXID
56549 + {
56550 + .procname = "consistent_setxid",
56551 + .data = &grsec_enable_setxid,
56552 + .maxlen = sizeof(int),
56553 + .mode = 0600,
56554 + .proc_handler = &proc_dointvec,
56555 + },
56556 +#endif
56557 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
56558 + {
56559 + .procname = "ip_blackhole",
56560 + .data = &grsec_enable_blackhole,
56561 + .maxlen = sizeof(int),
56562 + .mode = 0600,
56563 + .proc_handler = &proc_dointvec,
56564 + },
56565 + {
56566 + .procname = "lastack_retries",
56567 + .data = &grsec_lastack_retries,
56568 + .maxlen = sizeof(int),
56569 + .mode = 0600,
56570 + .proc_handler = &proc_dointvec,
56571 + },
56572 +#endif
56573 +#ifdef CONFIG_GRKERNSEC_EXECLOG
56574 + {
56575 + .procname = "exec_logging",
56576 + .data = &grsec_enable_execlog,
56577 + .maxlen = sizeof(int),
56578 + .mode = 0600,
56579 + .proc_handler = &proc_dointvec,
56580 + },
56581 +#endif
56582 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
56583 + {
56584 + .procname = "rwxmap_logging",
56585 + .data = &grsec_enable_log_rwxmaps,
56586 + .maxlen = sizeof(int),
56587 + .mode = 0600,
56588 + .proc_handler = &proc_dointvec,
56589 + },
56590 +#endif
56591 +#ifdef CONFIG_GRKERNSEC_SIGNAL
56592 + {
56593 + .procname = "signal_logging",
56594 + .data = &grsec_enable_signal,
56595 + .maxlen = sizeof(int),
56596 + .mode = 0600,
56597 + .proc_handler = &proc_dointvec,
56598 + },
56599 +#endif
56600 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
56601 + {
56602 + .procname = "forkfail_logging",
56603 + .data = &grsec_enable_forkfail,
56604 + .maxlen = sizeof(int),
56605 + .mode = 0600,
56606 + .proc_handler = &proc_dointvec,
56607 + },
56608 +#endif
56609 +#ifdef CONFIG_GRKERNSEC_TIME
56610 + {
56611 + .procname = "timechange_logging",
56612 + .data = &grsec_enable_time,
56613 + .maxlen = sizeof(int),
56614 + .mode = 0600,
56615 + .proc_handler = &proc_dointvec,
56616 + },
56617 +#endif
56618 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
56619 + {
56620 + .procname = "chroot_deny_shmat",
56621 + .data = &grsec_enable_chroot_shmat,
56622 + .maxlen = sizeof(int),
56623 + .mode = 0600,
56624 + .proc_handler = &proc_dointvec,
56625 + },
56626 +#endif
56627 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
56628 + {
56629 + .procname = "chroot_deny_unix",
56630 + .data = &grsec_enable_chroot_unix,
56631 + .maxlen = sizeof(int),
56632 + .mode = 0600,
56633 + .proc_handler = &proc_dointvec,
56634 + },
56635 +#endif
56636 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
56637 + {
56638 + .procname = "chroot_deny_mount",
56639 + .data = &grsec_enable_chroot_mount,
56640 + .maxlen = sizeof(int),
56641 + .mode = 0600,
56642 + .proc_handler = &proc_dointvec,
56643 + },
56644 +#endif
56645 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
56646 + {
56647 + .procname = "chroot_deny_fchdir",
56648 + .data = &grsec_enable_chroot_fchdir,
56649 + .maxlen = sizeof(int),
56650 + .mode = 0600,
56651 + .proc_handler = &proc_dointvec,
56652 + },
56653 +#endif
56654 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
56655 + {
56656 + .procname = "chroot_deny_chroot",
56657 + .data = &grsec_enable_chroot_double,
56658 + .maxlen = sizeof(int),
56659 + .mode = 0600,
56660 + .proc_handler = &proc_dointvec,
56661 + },
56662 +#endif
56663 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
56664 + {
56665 + .procname = "chroot_deny_pivot",
56666 + .data = &grsec_enable_chroot_pivot,
56667 + .maxlen = sizeof(int),
56668 + .mode = 0600,
56669 + .proc_handler = &proc_dointvec,
56670 + },
56671 +#endif
56672 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
56673 + {
56674 + .procname = "chroot_enforce_chdir",
56675 + .data = &grsec_enable_chroot_chdir,
56676 + .maxlen = sizeof(int),
56677 + .mode = 0600,
56678 + .proc_handler = &proc_dointvec,
56679 + },
56680 +#endif
56681 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
56682 + {
56683 + .procname = "chroot_deny_chmod",
56684 + .data = &grsec_enable_chroot_chmod,
56685 + .maxlen = sizeof(int),
56686 + .mode = 0600,
56687 + .proc_handler = &proc_dointvec,
56688 + },
56689 +#endif
56690 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
56691 + {
56692 + .procname = "chroot_deny_mknod",
56693 + .data = &grsec_enable_chroot_mknod,
56694 + .maxlen = sizeof(int),
56695 + .mode = 0600,
56696 + .proc_handler = &proc_dointvec,
56697 + },
56698 +#endif
56699 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
56700 + {
56701 + .procname = "chroot_restrict_nice",
56702 + .data = &grsec_enable_chroot_nice,
56703 + .maxlen = sizeof(int),
56704 + .mode = 0600,
56705 + .proc_handler = &proc_dointvec,
56706 + },
56707 +#endif
56708 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
56709 + {
56710 + .procname = "chroot_execlog",
56711 + .data = &grsec_enable_chroot_execlog,
56712 + .maxlen = sizeof(int),
56713 + .mode = 0600,
56714 + .proc_handler = &proc_dointvec,
56715 + },
56716 +#endif
56717 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56718 + {
56719 + .procname = "chroot_caps",
56720 + .data = &grsec_enable_chroot_caps,
56721 + .maxlen = sizeof(int),
56722 + .mode = 0600,
56723 + .proc_handler = &proc_dointvec,
56724 + },
56725 +#endif
56726 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
56727 + {
56728 + .procname = "chroot_deny_sysctl",
56729 + .data = &grsec_enable_chroot_sysctl,
56730 + .maxlen = sizeof(int),
56731 + .mode = 0600,
56732 + .proc_handler = &proc_dointvec,
56733 + },
56734 +#endif
56735 +#ifdef CONFIG_GRKERNSEC_TPE
56736 + {
56737 + .procname = "tpe",
56738 + .data = &grsec_enable_tpe,
56739 + .maxlen = sizeof(int),
56740 + .mode = 0600,
56741 + .proc_handler = &proc_dointvec,
56742 + },
56743 + {
56744 + .procname = "tpe_gid",
56745 + .data = &grsec_tpe_gid,
56746 + .maxlen = sizeof(int),
56747 + .mode = 0600,
56748 + .proc_handler = &proc_dointvec,
56749 + },
56750 +#endif
56751 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
56752 + {
56753 + .procname = "tpe_invert",
56754 + .data = &grsec_enable_tpe_invert,
56755 + .maxlen = sizeof(int),
56756 + .mode = 0600,
56757 + .proc_handler = &proc_dointvec,
56758 + },
56759 +#endif
56760 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
56761 + {
56762 + .procname = "tpe_restrict_all",
56763 + .data = &grsec_enable_tpe_all,
56764 + .maxlen = sizeof(int),
56765 + .mode = 0600,
56766 + .proc_handler = &proc_dointvec,
56767 + },
56768 +#endif
56769 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
56770 + {
56771 + .procname = "socket_all",
56772 + .data = &grsec_enable_socket_all,
56773 + .maxlen = sizeof(int),
56774 + .mode = 0600,
56775 + .proc_handler = &proc_dointvec,
56776 + },
56777 + {
56778 + .procname = "socket_all_gid",
56779 + .data = &grsec_socket_all_gid,
56780 + .maxlen = sizeof(int),
56781 + .mode = 0600,
56782 + .proc_handler = &proc_dointvec,
56783 + },
56784 +#endif
56785 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
56786 + {
56787 + .procname = "socket_client",
56788 + .data = &grsec_enable_socket_client,
56789 + .maxlen = sizeof(int),
56790 + .mode = 0600,
56791 + .proc_handler = &proc_dointvec,
56792 + },
56793 + {
56794 + .procname = "socket_client_gid",
56795 + .data = &grsec_socket_client_gid,
56796 + .maxlen = sizeof(int),
56797 + .mode = 0600,
56798 + .proc_handler = &proc_dointvec,
56799 + },
56800 +#endif
56801 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
56802 + {
56803 + .procname = "socket_server",
56804 + .data = &grsec_enable_socket_server,
56805 + .maxlen = sizeof(int),
56806 + .mode = 0600,
56807 + .proc_handler = &proc_dointvec,
56808 + },
56809 + {
56810 + .procname = "socket_server_gid",
56811 + .data = &grsec_socket_server_gid,
56812 + .maxlen = sizeof(int),
56813 + .mode = 0600,
56814 + .proc_handler = &proc_dointvec,
56815 + },
56816 +#endif
56817 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
56818 + {
56819 + .procname = "audit_group",
56820 + .data = &grsec_enable_group,
56821 + .maxlen = sizeof(int),
56822 + .mode = 0600,
56823 + .proc_handler = &proc_dointvec,
56824 + },
56825 + {
56826 + .procname = "audit_gid",
56827 + .data = &grsec_audit_gid,
56828 + .maxlen = sizeof(int),
56829 + .mode = 0600,
56830 + .proc_handler = &proc_dointvec,
56831 + },
56832 +#endif
56833 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
56834 + {
56835 + .procname = "audit_chdir",
56836 + .data = &grsec_enable_chdir,
56837 + .maxlen = sizeof(int),
56838 + .mode = 0600,
56839 + .proc_handler = &proc_dointvec,
56840 + },
56841 +#endif
56842 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
56843 + {
56844 + .procname = "audit_mount",
56845 + .data = &grsec_enable_mount,
56846 + .maxlen = sizeof(int),
56847 + .mode = 0600,
56848 + .proc_handler = &proc_dointvec,
56849 + },
56850 +#endif
56851 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
56852 + {
56853 + .procname = "audit_textrel",
56854 + .data = &grsec_enable_audit_textrel,
56855 + .maxlen = sizeof(int),
56856 + .mode = 0600,
56857 + .proc_handler = &proc_dointvec,
56858 + },
56859 +#endif
56860 +#ifdef CONFIG_GRKERNSEC_DMESG
56861 + {
56862 + .procname = "dmesg",
56863 + .data = &grsec_enable_dmesg,
56864 + .maxlen = sizeof(int),
56865 + .mode = 0600,
56866 + .proc_handler = &proc_dointvec,
56867 + },
56868 +#endif
56869 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
56870 + {
56871 + .procname = "chroot_findtask",
56872 + .data = &grsec_enable_chroot_findtask,
56873 + .maxlen = sizeof(int),
56874 + .mode = 0600,
56875 + .proc_handler = &proc_dointvec,
56876 + },
56877 +#endif
56878 +#ifdef CONFIG_GRKERNSEC_RESLOG
56879 + {
56880 + .procname = "resource_logging",
56881 + .data = &grsec_resource_logging,
56882 + .maxlen = sizeof(int),
56883 + .mode = 0600,
56884 + .proc_handler = &proc_dointvec,
56885 + },
56886 +#endif
56887 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
56888 + {
56889 + .procname = "audit_ptrace",
56890 + .data = &grsec_enable_audit_ptrace,
56891 + .maxlen = sizeof(int),
56892 + .mode = 0600,
56893 + .proc_handler = &proc_dointvec,
56894 + },
56895 +#endif
56896 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
56897 + {
56898 + .procname = "harden_ptrace",
56899 + .data = &grsec_enable_harden_ptrace,
56900 + .maxlen = sizeof(int),
56901 + .mode = 0600,
56902 + .proc_handler = &proc_dointvec,
56903 + },
56904 +#endif
56905 + {
56906 + .procname = "grsec_lock",
56907 + .data = &grsec_lock,
56908 + .maxlen = sizeof(int),
56909 + .mode = 0600,
56910 + .proc_handler = &proc_dointvec,
56911 + },
56912 +#endif
56913 +#ifdef CONFIG_GRKERNSEC_ROFS
56914 + {
56915 + .procname = "romount_protect",
56916 + .data = &grsec_enable_rofs,
56917 + .maxlen = sizeof(int),
56918 + .mode = 0600,
56919 + .proc_handler = &proc_dointvec_minmax,
56920 + .extra1 = &one,
56921 + .extra2 = &one,
56922 + },
56923 +#endif
56924 + { }
56925 +};
56926 +#endif
56927 diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
56928 new file mode 100644
56929 index 0000000..0dc13c3
56930 --- /dev/null
56931 +++ b/grsecurity/grsec_time.c
56932 @@ -0,0 +1,16 @@
56933 +#include <linux/kernel.h>
56934 +#include <linux/sched.h>
56935 +#include <linux/grinternal.h>
56936 +#include <linux/module.h>
56937 +
56938 +void
56939 +gr_log_timechange(void)
56940 +{
56941 +#ifdef CONFIG_GRKERNSEC_TIME
56942 + if (grsec_enable_time)
56943 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
56944 +#endif
56945 + return;
56946 +}
56947 +
56948 +EXPORT_SYMBOL(gr_log_timechange);
56949 diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
56950 new file mode 100644
56951 index 0000000..4a78774
56952 --- /dev/null
56953 +++ b/grsecurity/grsec_tpe.c
56954 @@ -0,0 +1,39 @@
56955 +#include <linux/kernel.h>
56956 +#include <linux/sched.h>
56957 +#include <linux/file.h>
56958 +#include <linux/fs.h>
56959 +#include <linux/grinternal.h>
56960 +
56961 +extern int gr_acl_tpe_check(void);
56962 +
56963 +int
56964 +gr_tpe_allow(const struct file *file)
56965 +{
56966 +#ifdef CONFIG_GRKERNSEC
56967 + struct inode *inode = file->f_path.dentry->d_parent->d_inode;
56968 + const struct cred *cred = current_cred();
56969 +
56970 + if (cred->uid && ((grsec_enable_tpe &&
56971 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
56972 + ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ||
56973 + (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)))
56974 +#else
56975 + in_group_p(grsec_tpe_gid)
56976 +#endif
56977 + ) || gr_acl_tpe_check()) &&
56978 + (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
56979 + (inode->i_mode & S_IWOTH))))) {
56980 + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
56981 + return 0;
56982 + }
56983 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
56984 + if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
56985 + ((inode->i_uid && (inode->i_uid != cred->uid)) ||
56986 + (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
56987 + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
56988 + return 0;
56989 + }
56990 +#endif
56991 +#endif
56992 + return 1;
56993 +}
56994 diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
56995 new file mode 100644
56996 index 0000000..9f7b1ac
56997 --- /dev/null
56998 +++ b/grsecurity/grsum.c
56999 @@ -0,0 +1,61 @@
57000 +#include <linux/err.h>
57001 +#include <linux/kernel.h>
57002 +#include <linux/sched.h>
57003 +#include <linux/mm.h>
57004 +#include <linux/scatterlist.h>
57005 +#include <linux/crypto.h>
57006 +#include <linux/gracl.h>
57007 +
57008 +
57009 +#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
57010 +#error "crypto and sha256 must be built into the kernel"
57011 +#endif
57012 +
57013 +int
57014 +chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
57015 +{
57016 + char *p;
57017 + struct crypto_hash *tfm;
57018 + struct hash_desc desc;
57019 + struct scatterlist sg;
57020 + unsigned char temp_sum[GR_SHA_LEN];
57021 + volatile int retval = 0;
57022 + volatile int dummy = 0;
57023 + unsigned int i;
57024 +
57025 + sg_init_table(&sg, 1);
57026 +
57027 + tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
57028 + if (IS_ERR(tfm)) {
57029 + /* should never happen, since sha256 should be built in */
57030 + return 1;
57031 + }
57032 +
57033 + desc.tfm = tfm;
57034 + desc.flags = 0;
57035 +
57036 + crypto_hash_init(&desc);
57037 +
57038 + p = salt;
57039 + sg_set_buf(&sg, p, GR_SALT_LEN);
57040 + crypto_hash_update(&desc, &sg, sg.length);
57041 +
57042 + p = entry->pw;
57043 + sg_set_buf(&sg, p, strlen(p));
57044 +
57045 + crypto_hash_update(&desc, &sg, sg.length);
57046 +
57047 + crypto_hash_final(&desc, temp_sum);
57048 +
57049 + memset(entry->pw, 0, GR_PW_LEN);
57050 +
57051 + for (i = 0; i < GR_SHA_LEN; i++)
57052 + if (sum[i] != temp_sum[i])
57053 + retval = 1;
57054 + else
57055 + dummy = 1; // waste a cycle
57056 +
57057 + crypto_free_hash(tfm);
57058 +
57059 + return retval;
57060 +}
57061 diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
57062 index 6cd5b64..f620d2d 100644
57063 --- a/include/acpi/acpi_bus.h
57064 +++ b/include/acpi/acpi_bus.h
57065 @@ -107,7 +107,7 @@ struct acpi_device_ops {
57066 acpi_op_bind bind;
57067 acpi_op_unbind unbind;
57068 acpi_op_notify notify;
57069 -};
57070 +} __no_const;
57071
57072 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
57073
57074 diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
57075 index b7babf0..71e4e74 100644
57076 --- a/include/asm-generic/atomic-long.h
57077 +++ b/include/asm-generic/atomic-long.h
57078 @@ -22,6 +22,12 @@
57079
57080 typedef atomic64_t atomic_long_t;
57081
57082 +#ifdef CONFIG_PAX_REFCOUNT
57083 +typedef atomic64_unchecked_t atomic_long_unchecked_t;
57084 +#else
57085 +typedef atomic64_t atomic_long_unchecked_t;
57086 +#endif
57087 +
57088 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
57089
57090 static inline long atomic_long_read(atomic_long_t *l)
57091 @@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
57092 return (long)atomic64_read(v);
57093 }
57094
57095 +#ifdef CONFIG_PAX_REFCOUNT
57096 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
57097 +{
57098 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57099 +
57100 + return (long)atomic64_read_unchecked(v);
57101 +}
57102 +#endif
57103 +
57104 static inline void atomic_long_set(atomic_long_t *l, long i)
57105 {
57106 atomic64_t *v = (atomic64_t *)l;
57107 @@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
57108 atomic64_set(v, i);
57109 }
57110
57111 +#ifdef CONFIG_PAX_REFCOUNT
57112 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
57113 +{
57114 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57115 +
57116 + atomic64_set_unchecked(v, i);
57117 +}
57118 +#endif
57119 +
57120 static inline void atomic_long_inc(atomic_long_t *l)
57121 {
57122 atomic64_t *v = (atomic64_t *)l;
57123 @@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
57124 atomic64_inc(v);
57125 }
57126
57127 +#ifdef CONFIG_PAX_REFCOUNT
57128 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
57129 +{
57130 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57131 +
57132 + atomic64_inc_unchecked(v);
57133 +}
57134 +#endif
57135 +
57136 static inline void atomic_long_dec(atomic_long_t *l)
57137 {
57138 atomic64_t *v = (atomic64_t *)l;
57139 @@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
57140 atomic64_dec(v);
57141 }
57142
57143 +#ifdef CONFIG_PAX_REFCOUNT
57144 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
57145 +{
57146 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57147 +
57148 + atomic64_dec_unchecked(v);
57149 +}
57150 +#endif
57151 +
57152 static inline void atomic_long_add(long i, atomic_long_t *l)
57153 {
57154 atomic64_t *v = (atomic64_t *)l;
57155 @@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
57156 atomic64_add(i, v);
57157 }
57158
57159 +#ifdef CONFIG_PAX_REFCOUNT
57160 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
57161 +{
57162 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57163 +
57164 + atomic64_add_unchecked(i, v);
57165 +}
57166 +#endif
57167 +
57168 static inline void atomic_long_sub(long i, atomic_long_t *l)
57169 {
57170 atomic64_t *v = (atomic64_t *)l;
57171 @@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
57172 atomic64_sub(i, v);
57173 }
57174
57175 +#ifdef CONFIG_PAX_REFCOUNT
57176 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
57177 +{
57178 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57179 +
57180 + atomic64_sub_unchecked(i, v);
57181 +}
57182 +#endif
57183 +
57184 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
57185 {
57186 atomic64_t *v = (atomic64_t *)l;
57187 @@ -115,6 +175,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
57188 return (long)atomic64_inc_return(v);
57189 }
57190
57191 +#ifdef CONFIG_PAX_REFCOUNT
57192 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
57193 +{
57194 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57195 +
57196 + return (long)atomic64_inc_return_unchecked(v);
57197 +}
57198 +#endif
57199 +
57200 static inline long atomic_long_dec_return(atomic_long_t *l)
57201 {
57202 atomic64_t *v = (atomic64_t *)l;
57203 @@ -140,6 +209,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
57204
57205 typedef atomic_t atomic_long_t;
57206
57207 +#ifdef CONFIG_PAX_REFCOUNT
57208 +typedef atomic_unchecked_t atomic_long_unchecked_t;
57209 +#else
57210 +typedef atomic_t atomic_long_unchecked_t;
57211 +#endif
57212 +
57213 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
57214 static inline long atomic_long_read(atomic_long_t *l)
57215 {
57216 @@ -148,6 +223,15 @@ static inline long atomic_long_read(atomic_long_t *l)
57217 return (long)atomic_read(v);
57218 }
57219
57220 +#ifdef CONFIG_PAX_REFCOUNT
57221 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
57222 +{
57223 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57224 +
57225 + return (long)atomic_read_unchecked(v);
57226 +}
57227 +#endif
57228 +
57229 static inline void atomic_long_set(atomic_long_t *l, long i)
57230 {
57231 atomic_t *v = (atomic_t *)l;
57232 @@ -155,6 +239,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
57233 atomic_set(v, i);
57234 }
57235
57236 +#ifdef CONFIG_PAX_REFCOUNT
57237 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
57238 +{
57239 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57240 +
57241 + atomic_set_unchecked(v, i);
57242 +}
57243 +#endif
57244 +
57245 static inline void atomic_long_inc(atomic_long_t *l)
57246 {
57247 atomic_t *v = (atomic_t *)l;
57248 @@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
57249 atomic_inc(v);
57250 }
57251
57252 +#ifdef CONFIG_PAX_REFCOUNT
57253 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
57254 +{
57255 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57256 +
57257 + atomic_inc_unchecked(v);
57258 +}
57259 +#endif
57260 +
57261 static inline void atomic_long_dec(atomic_long_t *l)
57262 {
57263 atomic_t *v = (atomic_t *)l;
57264 @@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
57265 atomic_dec(v);
57266 }
57267
57268 +#ifdef CONFIG_PAX_REFCOUNT
57269 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
57270 +{
57271 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57272 +
57273 + atomic_dec_unchecked(v);
57274 +}
57275 +#endif
57276 +
57277 static inline void atomic_long_add(long i, atomic_long_t *l)
57278 {
57279 atomic_t *v = (atomic_t *)l;
57280 @@ -176,6 +287,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
57281 atomic_add(i, v);
57282 }
57283
57284 +#ifdef CONFIG_PAX_REFCOUNT
57285 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
57286 +{
57287 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57288 +
57289 + atomic_add_unchecked(i, v);
57290 +}
57291 +#endif
57292 +
57293 static inline void atomic_long_sub(long i, atomic_long_t *l)
57294 {
57295 atomic_t *v = (atomic_t *)l;
57296 @@ -183,6 +303,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
57297 atomic_sub(i, v);
57298 }
57299
57300 +#ifdef CONFIG_PAX_REFCOUNT
57301 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
57302 +{
57303 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57304 +
57305 + atomic_sub_unchecked(i, v);
57306 +}
57307 +#endif
57308 +
57309 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
57310 {
57311 atomic_t *v = (atomic_t *)l;
57312 @@ -232,6 +361,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
57313 return (long)atomic_inc_return(v);
57314 }
57315
57316 +#ifdef CONFIG_PAX_REFCOUNT
57317 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
57318 +{
57319 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57320 +
57321 + return (long)atomic_inc_return_unchecked(v);
57322 +}
57323 +#endif
57324 +
57325 static inline long atomic_long_dec_return(atomic_long_t *l)
57326 {
57327 atomic_t *v = (atomic_t *)l;
57328 @@ -255,4 +393,49 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
57329
57330 #endif /* BITS_PER_LONG == 64 */
57331
57332 +#ifdef CONFIG_PAX_REFCOUNT
57333 +static inline void pax_refcount_needs_these_functions(void)
57334 +{
57335 + atomic_read_unchecked((atomic_unchecked_t *)NULL);
57336 + atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
57337 + atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
57338 + atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
57339 + atomic_inc_unchecked((atomic_unchecked_t *)NULL);
57340 + (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
57341 + atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
57342 + atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
57343 + atomic_dec_unchecked((atomic_unchecked_t *)NULL);
57344 + atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
57345 + (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
57346 +
57347 + atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
57348 + atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
57349 + atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
57350 + atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
57351 + atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
57352 + atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
57353 + atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
57354 +}
57355 +#else
57356 +#define atomic_read_unchecked(v) atomic_read(v)
57357 +#define atomic_set_unchecked(v, i) atomic_set((v), (i))
57358 +#define atomic_add_unchecked(i, v) atomic_add((i), (v))
57359 +#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
57360 +#define atomic_inc_unchecked(v) atomic_inc(v)
57361 +#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
57362 +#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
57363 +#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
57364 +#define atomic_dec_unchecked(v) atomic_dec(v)
57365 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
57366 +#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
57367 +
57368 +#define atomic_long_read_unchecked(v) atomic_long_read(v)
57369 +#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
57370 +#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
57371 +#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
57372 +#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
57373 +#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
57374 +#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
57375 +#endif
57376 +
57377 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
57378 diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
57379 index b18ce4f..2ee2843 100644
57380 --- a/include/asm-generic/atomic64.h
57381 +++ b/include/asm-generic/atomic64.h
57382 @@ -16,6 +16,8 @@ typedef struct {
57383 long long counter;
57384 } atomic64_t;
57385
57386 +typedef atomic64_t atomic64_unchecked_t;
57387 +
57388 #define ATOMIC64_INIT(i) { (i) }
57389
57390 extern long long atomic64_read(const atomic64_t *v);
57391 @@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
57392 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
57393 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
57394
57395 +#define atomic64_read_unchecked(v) atomic64_read(v)
57396 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
57397 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
57398 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
57399 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
57400 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
57401 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
57402 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
57403 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
57404 +
57405 #endif /* _ASM_GENERIC_ATOMIC64_H */
57406 diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
57407 index 1bfcfe5..e04c5c9 100644
57408 --- a/include/asm-generic/cache.h
57409 +++ b/include/asm-generic/cache.h
57410 @@ -6,7 +6,7 @@
57411 * cache lines need to provide their own cache.h.
57412 */
57413
57414 -#define L1_CACHE_SHIFT 5
57415 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
57416 +#define L1_CACHE_SHIFT 5UL
57417 +#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
57418
57419 #endif /* __ASM_GENERIC_CACHE_H */
57420 diff --git a/include/asm-generic/int-l64.h b/include/asm-generic/int-l64.h
57421 index 1ca3efc..e3dc852 100644
57422 --- a/include/asm-generic/int-l64.h
57423 +++ b/include/asm-generic/int-l64.h
57424 @@ -46,6 +46,8 @@ typedef unsigned int u32;
57425 typedef signed long s64;
57426 typedef unsigned long u64;
57427
57428 +typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
57429 +
57430 #define S8_C(x) x
57431 #define U8_C(x) x ## U
57432 #define S16_C(x) x
57433 diff --git a/include/asm-generic/int-ll64.h b/include/asm-generic/int-ll64.h
57434 index f394147..b6152b9 100644
57435 --- a/include/asm-generic/int-ll64.h
57436 +++ b/include/asm-generic/int-ll64.h
57437 @@ -51,6 +51,8 @@ typedef unsigned int u32;
57438 typedef signed long long s64;
57439 typedef unsigned long long u64;
57440
57441 +typedef unsigned long long intoverflow_t;
57442 +
57443 #define S8_C(x) x
57444 #define U8_C(x) x ## U
57445 #define S16_C(x) x
57446 diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
57447 index 0232ccb..13d9165 100644
57448 --- a/include/asm-generic/kmap_types.h
57449 +++ b/include/asm-generic/kmap_types.h
57450 @@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
57451 KMAP_D(17) KM_NMI,
57452 KMAP_D(18) KM_NMI_PTE,
57453 KMAP_D(19) KM_KDB,
57454 +KMAP_D(20) KM_CLEARPAGE,
57455 /*
57456 * Remember to update debug_kmap_atomic() when adding new kmap types!
57457 */
57458 -KMAP_D(20) KM_TYPE_NR
57459 +KMAP_D(21) KM_TYPE_NR
57460 };
57461
57462 #undef KMAP_D
57463 diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
57464 index 725612b..9cc513a 100644
57465 --- a/include/asm-generic/pgtable-nopmd.h
57466 +++ b/include/asm-generic/pgtable-nopmd.h
57467 @@ -1,14 +1,19 @@
57468 #ifndef _PGTABLE_NOPMD_H
57469 #define _PGTABLE_NOPMD_H
57470
57471 -#ifndef __ASSEMBLY__
57472 -
57473 #include <asm-generic/pgtable-nopud.h>
57474
57475 -struct mm_struct;
57476 -
57477 #define __PAGETABLE_PMD_FOLDED
57478
57479 +#define PMD_SHIFT PUD_SHIFT
57480 +#define PTRS_PER_PMD 1
57481 +#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
57482 +#define PMD_MASK (~(PMD_SIZE-1))
57483 +
57484 +#ifndef __ASSEMBLY__
57485 +
57486 +struct mm_struct;
57487 +
57488 /*
57489 * Having the pmd type consist of a pud gets the size right, and allows
57490 * us to conceptually access the pud entry that this pmd is folded into
57491 @@ -16,11 +21,6 @@ struct mm_struct;
57492 */
57493 typedef struct { pud_t pud; } pmd_t;
57494
57495 -#define PMD_SHIFT PUD_SHIFT
57496 -#define PTRS_PER_PMD 1
57497 -#define PMD_SIZE (1UL << PMD_SHIFT)
57498 -#define PMD_MASK (~(PMD_SIZE-1))
57499 -
57500 /*
57501 * The "pud_xxx()" functions here are trivial for a folded two-level
57502 * setup: the pmd is never bad, and a pmd always exists (as it's folded
57503 diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
57504 index 810431d..ccc3638 100644
57505 --- a/include/asm-generic/pgtable-nopud.h
57506 +++ b/include/asm-generic/pgtable-nopud.h
57507 @@ -1,10 +1,15 @@
57508 #ifndef _PGTABLE_NOPUD_H
57509 #define _PGTABLE_NOPUD_H
57510
57511 -#ifndef __ASSEMBLY__
57512 -
57513 #define __PAGETABLE_PUD_FOLDED
57514
57515 +#define PUD_SHIFT PGDIR_SHIFT
57516 +#define PTRS_PER_PUD 1
57517 +#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
57518 +#define PUD_MASK (~(PUD_SIZE-1))
57519 +
57520 +#ifndef __ASSEMBLY__
57521 +
57522 /*
57523 * Having the pud type consist of a pgd gets the size right, and allows
57524 * us to conceptually access the pgd entry that this pud is folded into
57525 @@ -12,11 +17,6 @@
57526 */
57527 typedef struct { pgd_t pgd; } pud_t;
57528
57529 -#define PUD_SHIFT PGDIR_SHIFT
57530 -#define PTRS_PER_PUD 1
57531 -#define PUD_SIZE (1UL << PUD_SHIFT)
57532 -#define PUD_MASK (~(PUD_SIZE-1))
57533 -
57534 /*
57535 * The "pgd_xxx()" functions here are trivial for a folded two-level
57536 * setup: the pud is never bad, and a pud always exists (as it's folded
57537 diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
57538 index 76bff2b..c7a14e2 100644
57539 --- a/include/asm-generic/pgtable.h
57540 +++ b/include/asm-generic/pgtable.h
57541 @@ -443,6 +443,14 @@ static inline int pmd_write(pmd_t pmd)
57542 #endif /* __HAVE_ARCH_PMD_WRITE */
57543 #endif
57544
57545 +#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
57546 +static inline unsigned long pax_open_kernel(void) { return 0; }
57547 +#endif
57548 +
57549 +#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
57550 +static inline unsigned long pax_close_kernel(void) { return 0; }
57551 +#endif
57552 +
57553 #endif /* !__ASSEMBLY__ */
57554
57555 #endif /* _ASM_GENERIC_PGTABLE_H */
57556 diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
57557 index b5e2e4c..6a5373e 100644
57558 --- a/include/asm-generic/vmlinux.lds.h
57559 +++ b/include/asm-generic/vmlinux.lds.h
57560 @@ -217,6 +217,7 @@
57561 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
57562 VMLINUX_SYMBOL(__start_rodata) = .; \
57563 *(.rodata) *(.rodata.*) \
57564 + *(.data..read_only) \
57565 *(__vermagic) /* Kernel version magic */ \
57566 . = ALIGN(8); \
57567 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
57568 @@ -722,17 +723,18 @@
57569 * section in the linker script will go there too. @phdr should have
57570 * a leading colon.
57571 *
57572 - * Note that this macros defines __per_cpu_load as an absolute symbol.
57573 + * Note that this macros defines per_cpu_load as an absolute symbol.
57574 * If there is no need to put the percpu section at a predetermined
57575 * address, use PERCPU_SECTION.
57576 */
57577 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
57578 - VMLINUX_SYMBOL(__per_cpu_load) = .; \
57579 - .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
57580 + per_cpu_load = .; \
57581 + .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
57582 - LOAD_OFFSET) { \
57583 + VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
57584 PERCPU_INPUT(cacheline) \
57585 } phdr \
57586 - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
57587 + . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
57588
57589 /**
57590 * PERCPU_SECTION - define output section for percpu area, simple version
57591 diff --git a/include/drm/drmP.h b/include/drm/drmP.h
57592 index 1f9e951..14ef517 100644
57593 --- a/include/drm/drmP.h
57594 +++ b/include/drm/drmP.h
57595 @@ -72,6 +72,7 @@
57596 #include <linux/workqueue.h>
57597 #include <linux/poll.h>
57598 #include <asm/pgalloc.h>
57599 +#include <asm/local.h>
57600 #include "drm.h"
57601
57602 #include <linux/idr.h>
57603 @@ -1038,7 +1039,7 @@ struct drm_device {
57604
57605 /** \name Usage Counters */
57606 /*@{ */
57607 - int open_count; /**< Outstanding files open */
57608 + local_t open_count; /**< Outstanding files open */
57609 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
57610 atomic_t vma_count; /**< Outstanding vma areas open */
57611 int buf_use; /**< Buffers in use -- cannot alloc */
57612 @@ -1049,7 +1050,7 @@ struct drm_device {
57613 /*@{ */
57614 unsigned long counters;
57615 enum drm_stat_type types[15];
57616 - atomic_t counts[15];
57617 + atomic_unchecked_t counts[15];
57618 /*@} */
57619
57620 struct list_head filelist;
57621 diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
57622 index 73b0712..0b7ef2f 100644
57623 --- a/include/drm/drm_crtc_helper.h
57624 +++ b/include/drm/drm_crtc_helper.h
57625 @@ -74,7 +74,7 @@ struct drm_crtc_helper_funcs {
57626
57627 /* disable crtc when not in use - more explicit than dpms off */
57628 void (*disable)(struct drm_crtc *crtc);
57629 -};
57630 +} __no_const;
57631
57632 struct drm_encoder_helper_funcs {
57633 void (*dpms)(struct drm_encoder *encoder, int mode);
57634 @@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs {
57635 struct drm_connector *connector);
57636 /* disable encoder when not in use - more explicit than dpms off */
57637 void (*disable)(struct drm_encoder *encoder);
57638 -};
57639 +} __no_const;
57640
57641 struct drm_connector_helper_funcs {
57642 int (*get_modes)(struct drm_connector *connector);
57643 diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
57644 index 26c1f78..6722682 100644
57645 --- a/include/drm/ttm/ttm_memory.h
57646 +++ b/include/drm/ttm/ttm_memory.h
57647 @@ -47,7 +47,7 @@
57648
57649 struct ttm_mem_shrink {
57650 int (*do_shrink) (struct ttm_mem_shrink *);
57651 -};
57652 +} __no_const;
57653
57654 /**
57655 * struct ttm_mem_global - Global memory accounting structure.
57656 diff --git a/include/linux/a.out.h b/include/linux/a.out.h
57657 index e86dfca..40cc55f 100644
57658 --- a/include/linux/a.out.h
57659 +++ b/include/linux/a.out.h
57660 @@ -39,6 +39,14 @@ enum machine_type {
57661 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
57662 };
57663
57664 +/* Constants for the N_FLAGS field */
57665 +#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
57666 +#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
57667 +#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
57668 +#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
57669 +/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
57670 +#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
57671 +
57672 #if !defined (N_MAGIC)
57673 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
57674 #endif
57675 diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
57676 index 49a83ca..df96b54 100644
57677 --- a/include/linux/atmdev.h
57678 +++ b/include/linux/atmdev.h
57679 @@ -237,7 +237,7 @@ struct compat_atm_iobuf {
57680 #endif
57681
57682 struct k_atm_aal_stats {
57683 -#define __HANDLE_ITEM(i) atomic_t i
57684 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
57685 __AAL_STAT_ITEMS
57686 #undef __HANDLE_ITEM
57687 };
57688 diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
57689 index fd88a39..f4d0bad 100644
57690 --- a/include/linux/binfmts.h
57691 +++ b/include/linux/binfmts.h
57692 @@ -88,6 +88,7 @@ struct linux_binfmt {
57693 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
57694 int (*load_shlib)(struct file *);
57695 int (*core_dump)(struct coredump_params *cprm);
57696 + void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
57697 unsigned long min_coredump; /* minimal dump size */
57698 };
57699
57700 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
57701 index 0ed1eb0..3ab569b 100644
57702 --- a/include/linux/blkdev.h
57703 +++ b/include/linux/blkdev.h
57704 @@ -1315,7 +1315,7 @@ struct block_device_operations {
57705 /* this callback is with swap_lock and sometimes page table lock held */
57706 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
57707 struct module *owner;
57708 -};
57709 +} __do_const;
57710
57711 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
57712 unsigned long);
57713 diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
57714 index 4d1a074..88f929a 100644
57715 --- a/include/linux/blktrace_api.h
57716 +++ b/include/linux/blktrace_api.h
57717 @@ -162,7 +162,7 @@ struct blk_trace {
57718 struct dentry *dir;
57719 struct dentry *dropped_file;
57720 struct dentry *msg_file;
57721 - atomic_t dropped;
57722 + atomic_unchecked_t dropped;
57723 };
57724
57725 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
57726 diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
57727 index 83195fb..0b0f77d 100644
57728 --- a/include/linux/byteorder/little_endian.h
57729 +++ b/include/linux/byteorder/little_endian.h
57730 @@ -42,51 +42,51 @@
57731
57732 static inline __le64 __cpu_to_le64p(const __u64 *p)
57733 {
57734 - return (__force __le64)*p;
57735 + return (__force const __le64)*p;
57736 }
57737 static inline __u64 __le64_to_cpup(const __le64 *p)
57738 {
57739 - return (__force __u64)*p;
57740 + return (__force const __u64)*p;
57741 }
57742 static inline __le32 __cpu_to_le32p(const __u32 *p)
57743 {
57744 - return (__force __le32)*p;
57745 + return (__force const __le32)*p;
57746 }
57747 static inline __u32 __le32_to_cpup(const __le32 *p)
57748 {
57749 - return (__force __u32)*p;
57750 + return (__force const __u32)*p;
57751 }
57752 static inline __le16 __cpu_to_le16p(const __u16 *p)
57753 {
57754 - return (__force __le16)*p;
57755 + return (__force const __le16)*p;
57756 }
57757 static inline __u16 __le16_to_cpup(const __le16 *p)
57758 {
57759 - return (__force __u16)*p;
57760 + return (__force const __u16)*p;
57761 }
57762 static inline __be64 __cpu_to_be64p(const __u64 *p)
57763 {
57764 - return (__force __be64)__swab64p(p);
57765 + return (__force const __be64)__swab64p(p);
57766 }
57767 static inline __u64 __be64_to_cpup(const __be64 *p)
57768 {
57769 - return __swab64p((__u64 *)p);
57770 + return __swab64p((const __u64 *)p);
57771 }
57772 static inline __be32 __cpu_to_be32p(const __u32 *p)
57773 {
57774 - return (__force __be32)__swab32p(p);
57775 + return (__force const __be32)__swab32p(p);
57776 }
57777 static inline __u32 __be32_to_cpup(const __be32 *p)
57778 {
57779 - return __swab32p((__u32 *)p);
57780 + return __swab32p((const __u32 *)p);
57781 }
57782 static inline __be16 __cpu_to_be16p(const __u16 *p)
57783 {
57784 - return (__force __be16)__swab16p(p);
57785 + return (__force const __be16)__swab16p(p);
57786 }
57787 static inline __u16 __be16_to_cpup(const __be16 *p)
57788 {
57789 - return __swab16p((__u16 *)p);
57790 + return __swab16p((const __u16 *)p);
57791 }
57792 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
57793 #define __le64_to_cpus(x) do { (void)(x); } while (0)
57794 diff --git a/include/linux/cache.h b/include/linux/cache.h
57795 index 4c57065..4307975 100644
57796 --- a/include/linux/cache.h
57797 +++ b/include/linux/cache.h
57798 @@ -16,6 +16,10 @@
57799 #define __read_mostly
57800 #endif
57801
57802 +#ifndef __read_only
57803 +#define __read_only __read_mostly
57804 +#endif
57805 +
57806 #ifndef ____cacheline_aligned
57807 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
57808 #endif
57809 diff --git a/include/linux/capability.h b/include/linux/capability.h
57810 index a63d13d..069bfd5 100644
57811 --- a/include/linux/capability.h
57812 +++ b/include/linux/capability.h
57813 @@ -548,6 +548,9 @@ extern bool capable(int cap);
57814 extern bool ns_capable(struct user_namespace *ns, int cap);
57815 extern bool task_ns_capable(struct task_struct *t, int cap);
57816 extern bool nsown_capable(int cap);
57817 +extern bool task_ns_capable_nolog(struct task_struct *t, int cap);
57818 +extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
57819 +extern bool capable_nolog(int cap);
57820
57821 /* audit system wants to get cap info from files as well */
57822 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
57823 diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
57824 index 04ffb2e..6799180 100644
57825 --- a/include/linux/cleancache.h
57826 +++ b/include/linux/cleancache.h
57827 @@ -31,7 +31,7 @@ struct cleancache_ops {
57828 void (*flush_page)(int, struct cleancache_filekey, pgoff_t);
57829 void (*flush_inode)(int, struct cleancache_filekey);
57830 void (*flush_fs)(int);
57831 -};
57832 +} __no_const;
57833
57834 extern struct cleancache_ops
57835 cleancache_register_ops(struct cleancache_ops *ops);
57836 diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
57837 index dfadc96..c0e70c1 100644
57838 --- a/include/linux/compiler-gcc4.h
57839 +++ b/include/linux/compiler-gcc4.h
57840 @@ -31,6 +31,12 @@
57841
57842
57843 #if __GNUC_MINOR__ >= 5
57844 +
57845 +#ifdef CONSTIFY_PLUGIN
57846 +#define __no_const __attribute__((no_const))
57847 +#define __do_const __attribute__((do_const))
57848 +#endif
57849 +
57850 /*
57851 * Mark a position in code as unreachable. This can be used to
57852 * suppress control flow warnings after asm blocks that transfer
57853 @@ -46,6 +52,11 @@
57854 #define __noclone __attribute__((__noclone__))
57855
57856 #endif
57857 +
57858 +#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
57859 +#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
57860 +#define __bos0(ptr) __bos((ptr), 0)
57861 +#define __bos1(ptr) __bos((ptr), 1)
57862 #endif
57863
57864 #if __GNUC_MINOR__ > 0
57865 diff --git a/include/linux/compiler.h b/include/linux/compiler.h
57866 index 320d6c9..8573a1c 100644
57867 --- a/include/linux/compiler.h
57868 +++ b/include/linux/compiler.h
57869 @@ -5,31 +5,62 @@
57870
57871 #ifdef __CHECKER__
57872 # define __user __attribute__((noderef, address_space(1)))
57873 +# define __force_user __force __user
57874 # define __kernel __attribute__((address_space(0)))
57875 +# define __force_kernel __force __kernel
57876 # define __safe __attribute__((safe))
57877 # define __force __attribute__((force))
57878 # define __nocast __attribute__((nocast))
57879 # define __iomem __attribute__((noderef, address_space(2)))
57880 +# define __force_iomem __force __iomem
57881 # define __acquires(x) __attribute__((context(x,0,1)))
57882 # define __releases(x) __attribute__((context(x,1,0)))
57883 # define __acquire(x) __context__(x,1)
57884 # define __release(x) __context__(x,-1)
57885 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
57886 # define __percpu __attribute__((noderef, address_space(3)))
57887 +# define __force_percpu __force __percpu
57888 #ifdef CONFIG_SPARSE_RCU_POINTER
57889 # define __rcu __attribute__((noderef, address_space(4)))
57890 +# define __force_rcu __force __rcu
57891 #else
57892 # define __rcu
57893 +# define __force_rcu
57894 #endif
57895 extern void __chk_user_ptr(const volatile void __user *);
57896 extern void __chk_io_ptr(const volatile void __iomem *);
57897 +#elif defined(CHECKER_PLUGIN)
57898 +//# define __user
57899 +//# define __force_user
57900 +//# define __kernel
57901 +//# define __force_kernel
57902 +# define __safe
57903 +# define __force
57904 +# define __nocast
57905 +# define __iomem
57906 +# define __force_iomem
57907 +# define __chk_user_ptr(x) (void)0
57908 +# define __chk_io_ptr(x) (void)0
57909 +# define __builtin_warning(x, y...) (1)
57910 +# define __acquires(x)
57911 +# define __releases(x)
57912 +# define __acquire(x) (void)0
57913 +# define __release(x) (void)0
57914 +# define __cond_lock(x,c) (c)
57915 +# define __percpu
57916 +# define __force_percpu
57917 +# define __rcu
57918 +# define __force_rcu
57919 #else
57920 # define __user
57921 +# define __force_user
57922 # define __kernel
57923 +# define __force_kernel
57924 # define __safe
57925 # define __force
57926 # define __nocast
57927 # define __iomem
57928 +# define __force_iomem
57929 # define __chk_user_ptr(x) (void)0
57930 # define __chk_io_ptr(x) (void)0
57931 # define __builtin_warning(x, y...) (1)
57932 @@ -39,7 +70,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
57933 # define __release(x) (void)0
57934 # define __cond_lock(x,c) (c)
57935 # define __percpu
57936 +# define __force_percpu
57937 # define __rcu
57938 +# define __force_rcu
57939 #endif
57940
57941 #ifdef __KERNEL__
57942 @@ -264,6 +297,14 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
57943 # define __attribute_const__ /* unimplemented */
57944 #endif
57945
57946 +#ifndef __no_const
57947 +# define __no_const
57948 +#endif
57949 +
57950 +#ifndef __do_const
57951 +# define __do_const
57952 +#endif
57953 +
57954 /*
57955 * Tell gcc if a function is cold. The compiler will assume any path
57956 * directly leading to the call is unlikely.
57957 @@ -273,6 +314,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
57958 #define __cold
57959 #endif
57960
57961 +#ifndef __alloc_size
57962 +#define __alloc_size(...)
57963 +#endif
57964 +
57965 +#ifndef __bos
57966 +#define __bos(ptr, arg)
57967 +#endif
57968 +
57969 +#ifndef __bos0
57970 +#define __bos0(ptr)
57971 +#endif
57972 +
57973 +#ifndef __bos1
57974 +#define __bos1(ptr)
57975 +#endif
57976 +
57977 /* Simple shorthand for a section definition */
57978 #ifndef __section
57979 # define __section(S) __attribute__ ((__section__(#S)))
57980 @@ -306,6 +363,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
57981 * use is to mediate communication between process-level code and irq/NMI
57982 * handlers, all running on the same CPU.
57983 */
57984 -#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
57985 +#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
57986 +#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
57987
57988 #endif /* __LINUX_COMPILER_H */
57989 diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
57990 index e9eaec5..bfeb9bb 100644
57991 --- a/include/linux/cpuset.h
57992 +++ b/include/linux/cpuset.h
57993 @@ -118,7 +118,7 @@ static inline void put_mems_allowed(void)
57994 * nodemask.
57995 */
57996 smp_mb();
57997 - --ACCESS_ONCE(current->mems_allowed_change_disable);
57998 + --ACCESS_ONCE_RW(current->mems_allowed_change_disable);
57999 }
58000
58001 static inline void set_mems_allowed(nodemask_t nodemask)
58002 diff --git a/include/linux/cred.h b/include/linux/cred.h
58003 index 4030896..8d6f342 100644
58004 --- a/include/linux/cred.h
58005 +++ b/include/linux/cred.h
58006 @@ -207,6 +207,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
58007 static inline void validate_process_creds(void)
58008 {
58009 }
58010 +static inline void validate_task_creds(struct task_struct *task)
58011 +{
58012 +}
58013 #endif
58014
58015 /**
58016 diff --git a/include/linux/crypto.h b/include/linux/crypto.h
58017 index 8a94217..15d49e3 100644
58018 --- a/include/linux/crypto.h
58019 +++ b/include/linux/crypto.h
58020 @@ -365,7 +365,7 @@ struct cipher_tfm {
58021 const u8 *key, unsigned int keylen);
58022 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
58023 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
58024 -};
58025 +} __no_const;
58026
58027 struct hash_tfm {
58028 int (*init)(struct hash_desc *desc);
58029 @@ -386,13 +386,13 @@ struct compress_tfm {
58030 int (*cot_decompress)(struct crypto_tfm *tfm,
58031 const u8 *src, unsigned int slen,
58032 u8 *dst, unsigned int *dlen);
58033 -};
58034 +} __no_const;
58035
58036 struct rng_tfm {
58037 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
58038 unsigned int dlen);
58039 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
58040 -};
58041 +} __no_const;
58042
58043 #define crt_ablkcipher crt_u.ablkcipher
58044 #define crt_aead crt_u.aead
58045 diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
58046 index 7925bf0..d5143d2 100644
58047 --- a/include/linux/decompress/mm.h
58048 +++ b/include/linux/decompress/mm.h
58049 @@ -77,7 +77,7 @@ static void free(void *where)
58050 * warnings when not needed (indeed large_malloc / large_free are not
58051 * needed by inflate */
58052
58053 -#define malloc(a) kmalloc(a, GFP_KERNEL)
58054 +#define malloc(a) kmalloc((a), GFP_KERNEL)
58055 #define free(a) kfree(a)
58056
58057 #define large_malloc(a) vmalloc(a)
58058 diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
58059 index e13117c..e9fc938 100644
58060 --- a/include/linux/dma-mapping.h
58061 +++ b/include/linux/dma-mapping.h
58062 @@ -46,7 +46,7 @@ struct dma_map_ops {
58063 u64 (*get_required_mask)(struct device *dev);
58064 #endif
58065 int is_phys;
58066 -};
58067 +} __do_const;
58068
58069 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
58070
58071 diff --git a/include/linux/efi.h b/include/linux/efi.h
58072 index 2362a0b..cfaf8fcc 100644
58073 --- a/include/linux/efi.h
58074 +++ b/include/linux/efi.h
58075 @@ -446,7 +446,7 @@ struct efivar_operations {
58076 efi_get_variable_t *get_variable;
58077 efi_get_next_variable_t *get_next_variable;
58078 efi_set_variable_t *set_variable;
58079 -};
58080 +} __no_const;
58081
58082 struct efivars {
58083 /*
58084 diff --git a/include/linux/elf.h b/include/linux/elf.h
58085 index 31f0508..5421c01 100644
58086 --- a/include/linux/elf.h
58087 +++ b/include/linux/elf.h
58088 @@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
58089 #define PT_GNU_EH_FRAME 0x6474e550
58090
58091 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
58092 +#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
58093 +
58094 +#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
58095 +
58096 +/* Constants for the e_flags field */
58097 +#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
58098 +#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
58099 +#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
58100 +#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
58101 +/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
58102 +#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
58103
58104 /*
58105 * Extended Numbering
58106 @@ -106,6 +117,8 @@ typedef __s64 Elf64_Sxword;
58107 #define DT_DEBUG 21
58108 #define DT_TEXTREL 22
58109 #define DT_JMPREL 23
58110 +#define DT_FLAGS 30
58111 + #define DF_TEXTREL 0x00000004
58112 #define DT_ENCODING 32
58113 #define OLD_DT_LOOS 0x60000000
58114 #define DT_LOOS 0x6000000d
58115 @@ -252,6 +265,19 @@ typedef struct elf64_hdr {
58116 #define PF_W 0x2
58117 #define PF_X 0x1
58118
58119 +#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
58120 +#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
58121 +#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
58122 +#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
58123 +#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
58124 +#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
58125 +/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
58126 +/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
58127 +#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
58128 +#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
58129 +#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
58130 +#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
58131 +
58132 typedef struct elf32_phdr{
58133 Elf32_Word p_type;
58134 Elf32_Off p_offset;
58135 @@ -344,6 +370,8 @@ typedef struct elf64_shdr {
58136 #define EI_OSABI 7
58137 #define EI_PAD 8
58138
58139 +#define EI_PAX 14
58140 +
58141 #define ELFMAG0 0x7f /* EI_MAG */
58142 #define ELFMAG1 'E'
58143 #define ELFMAG2 'L'
58144 @@ -423,6 +451,7 @@ extern Elf32_Dyn _DYNAMIC [];
58145 #define elf_note elf32_note
58146 #define elf_addr_t Elf32_Off
58147 #define Elf_Half Elf32_Half
58148 +#define elf_dyn Elf32_Dyn
58149
58150 #else
58151
58152 @@ -433,6 +462,7 @@ extern Elf64_Dyn _DYNAMIC [];
58153 #define elf_note elf64_note
58154 #define elf_addr_t Elf64_Off
58155 #define Elf_Half Elf64_Half
58156 +#define elf_dyn Elf64_Dyn
58157
58158 #endif
58159
58160 diff --git a/include/linux/filter.h b/include/linux/filter.h
58161 index 8eeb205..d59bfa2 100644
58162 --- a/include/linux/filter.h
58163 +++ b/include/linux/filter.h
58164 @@ -134,6 +134,7 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */
58165
58166 struct sk_buff;
58167 struct sock;
58168 +struct bpf_jit_work;
58169
58170 struct sk_filter
58171 {
58172 @@ -141,6 +142,9 @@ struct sk_filter
58173 unsigned int len; /* Number of filter blocks */
58174 unsigned int (*bpf_func)(const struct sk_buff *skb,
58175 const struct sock_filter *filter);
58176 +#ifdef CONFIG_BPF_JIT
58177 + struct bpf_jit_work *work;
58178 +#endif
58179 struct rcu_head rcu;
58180 struct sock_filter insns[0];
58181 };
58182 diff --git a/include/linux/firewire.h b/include/linux/firewire.h
58183 index 84ccf8e..2e9b14c 100644
58184 --- a/include/linux/firewire.h
58185 +++ b/include/linux/firewire.h
58186 @@ -428,7 +428,7 @@ struct fw_iso_context {
58187 union {
58188 fw_iso_callback_t sc;
58189 fw_iso_mc_callback_t mc;
58190 - } callback;
58191 + } __no_const callback;
58192 void *callback_data;
58193 };
58194
58195 diff --git a/include/linux/fs.h b/include/linux/fs.h
58196 index e0bc4ff..d79c2fa 100644
58197 --- a/include/linux/fs.h
58198 +++ b/include/linux/fs.h
58199 @@ -1608,7 +1608,8 @@ struct file_operations {
58200 int (*setlease)(struct file *, long, struct file_lock **);
58201 long (*fallocate)(struct file *file, int mode, loff_t offset,
58202 loff_t len);
58203 -};
58204 +} __do_const;
58205 +typedef struct file_operations __no_const file_operations_no_const;
58206
58207 struct inode_operations {
58208 struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *);
58209 diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
58210 index 003dc0f..3c4ea97 100644
58211 --- a/include/linux/fs_struct.h
58212 +++ b/include/linux/fs_struct.h
58213 @@ -6,7 +6,7 @@
58214 #include <linux/seqlock.h>
58215
58216 struct fs_struct {
58217 - int users;
58218 + atomic_t users;
58219 spinlock_t lock;
58220 seqcount_t seq;
58221 int umask;
58222 diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
58223 index ce31408..b1ad003 100644
58224 --- a/include/linux/fscache-cache.h
58225 +++ b/include/linux/fscache-cache.h
58226 @@ -102,7 +102,7 @@ struct fscache_operation {
58227 fscache_operation_release_t release;
58228 };
58229
58230 -extern atomic_t fscache_op_debug_id;
58231 +extern atomic_unchecked_t fscache_op_debug_id;
58232 extern void fscache_op_work_func(struct work_struct *work);
58233
58234 extern void fscache_enqueue_operation(struct fscache_operation *);
58235 @@ -122,7 +122,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
58236 {
58237 INIT_WORK(&op->work, fscache_op_work_func);
58238 atomic_set(&op->usage, 1);
58239 - op->debug_id = atomic_inc_return(&fscache_op_debug_id);
58240 + op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
58241 op->processor = processor;
58242 op->release = release;
58243 INIT_LIST_HEAD(&op->pend_link);
58244 diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
58245 index 2a53f10..0187fdf 100644
58246 --- a/include/linux/fsnotify.h
58247 +++ b/include/linux/fsnotify.h
58248 @@ -314,7 +314,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
58249 */
58250 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
58251 {
58252 - return kstrdup(name, GFP_KERNEL);
58253 + return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
58254 }
58255
58256 /*
58257 diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
58258 index 91d0e0a3..035666b 100644
58259 --- a/include/linux/fsnotify_backend.h
58260 +++ b/include/linux/fsnotify_backend.h
58261 @@ -105,6 +105,7 @@ struct fsnotify_ops {
58262 void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group);
58263 void (*free_event_priv)(struct fsnotify_event_private_data *priv);
58264 };
58265 +typedef struct fsnotify_ops __no_const fsnotify_ops_no_const;
58266
58267 /*
58268 * A group is a "thing" that wants to receive notification about filesystem
58269 diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
58270 index c3da42d..c70e0df 100644
58271 --- a/include/linux/ftrace_event.h
58272 +++ b/include/linux/ftrace_event.h
58273 @@ -97,7 +97,7 @@ struct trace_event_functions {
58274 trace_print_func raw;
58275 trace_print_func hex;
58276 trace_print_func binary;
58277 -};
58278 +} __no_const;
58279
58280 struct trace_event {
58281 struct hlist_node node;
58282 @@ -254,7 +254,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
58283 extern int trace_add_event_call(struct ftrace_event_call *call);
58284 extern void trace_remove_event_call(struct ftrace_event_call *call);
58285
58286 -#define is_signed_type(type) (((type)(-1)) < 0)
58287 +#define is_signed_type(type) (((type)(-1)) < (type)1)
58288
58289 int trace_set_clr_event(const char *system, const char *event, int set);
58290
58291 diff --git a/include/linux/genhd.h b/include/linux/genhd.h
58292 index 6d18f35..ab71e2c 100644
58293 --- a/include/linux/genhd.h
58294 +++ b/include/linux/genhd.h
58295 @@ -185,7 +185,7 @@ struct gendisk {
58296 struct kobject *slave_dir;
58297
58298 struct timer_rand_state *random;
58299 - atomic_t sync_io; /* RAID */
58300 + atomic_unchecked_t sync_io; /* RAID */
58301 struct disk_events *ev;
58302 #ifdef CONFIG_BLK_DEV_INTEGRITY
58303 struct blk_integrity *integrity;
58304 diff --git a/include/linux/gracl.h b/include/linux/gracl.h
58305 new file mode 100644
58306 index 0000000..0dc3943
58307 --- /dev/null
58308 +++ b/include/linux/gracl.h
58309 @@ -0,0 +1,317 @@
58310 +#ifndef GR_ACL_H
58311 +#define GR_ACL_H
58312 +
58313 +#include <linux/grdefs.h>
58314 +#include <linux/resource.h>
58315 +#include <linux/capability.h>
58316 +#include <linux/dcache.h>
58317 +#include <asm/resource.h>
58318 +
58319 +/* Major status information */
58320 +
58321 +#define GR_VERSION "grsecurity 2.2.2"
58322 +#define GRSECURITY_VERSION 0x2202
58323 +
58324 +enum {
58325 + GR_SHUTDOWN = 0,
58326 + GR_ENABLE = 1,
58327 + GR_SPROLE = 2,
58328 + GR_RELOAD = 3,
58329 + GR_SEGVMOD = 4,
58330 + GR_STATUS = 5,
58331 + GR_UNSPROLE = 6,
58332 + GR_PASSSET = 7,
58333 + GR_SPROLEPAM = 8,
58334 +};
58335 +
58336 +/* Password setup definitions
58337 + * kernel/grhash.c */
58338 +enum {
58339 + GR_PW_LEN = 128,
58340 + GR_SALT_LEN = 16,
58341 + GR_SHA_LEN = 32,
58342 +};
58343 +
58344 +enum {
58345 + GR_SPROLE_LEN = 64,
58346 +};
58347 +
58348 +enum {
58349 + GR_NO_GLOB = 0,
58350 + GR_REG_GLOB,
58351 + GR_CREATE_GLOB
58352 +};
58353 +
58354 +#define GR_NLIMITS 32
58355 +
58356 +/* Begin Data Structures */
58357 +
58358 +struct sprole_pw {
58359 + unsigned char *rolename;
58360 + unsigned char salt[GR_SALT_LEN];
58361 + unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
58362 +};
58363 +
58364 +struct name_entry {
58365 + __u32 key;
58366 + ino_t inode;
58367 + dev_t device;
58368 + char *name;
58369 + __u16 len;
58370 + __u8 deleted;
58371 + struct name_entry *prev;
58372 + struct name_entry *next;
58373 +};
58374 +
58375 +struct inodev_entry {
58376 + struct name_entry *nentry;
58377 + struct inodev_entry *prev;
58378 + struct inodev_entry *next;
58379 +};
58380 +
58381 +struct acl_role_db {
58382 + struct acl_role_label **r_hash;
58383 + __u32 r_size;
58384 +};
58385 +
58386 +struct inodev_db {
58387 + struct inodev_entry **i_hash;
58388 + __u32 i_size;
58389 +};
58390 +
58391 +struct name_db {
58392 + struct name_entry **n_hash;
58393 + __u32 n_size;
58394 +};
58395 +
58396 +struct crash_uid {
58397 + uid_t uid;
58398 + unsigned long expires;
58399 +};
58400 +
58401 +struct gr_hash_struct {
58402 + void **table;
58403 + void **nametable;
58404 + void *first;
58405 + __u32 table_size;
58406 + __u32 used_size;
58407 + int type;
58408 +};
58409 +
58410 +/* Userspace Grsecurity ACL data structures */
58411 +
58412 +struct acl_subject_label {
58413 + char *filename;
58414 + ino_t inode;
58415 + dev_t device;
58416 + __u32 mode;
58417 + kernel_cap_t cap_mask;
58418 + kernel_cap_t cap_lower;
58419 + kernel_cap_t cap_invert_audit;
58420 +
58421 + struct rlimit res[GR_NLIMITS];
58422 + __u32 resmask;
58423 +
58424 + __u8 user_trans_type;
58425 + __u8 group_trans_type;
58426 + uid_t *user_transitions;
58427 + gid_t *group_transitions;
58428 + __u16 user_trans_num;
58429 + __u16 group_trans_num;
58430 +
58431 + __u32 sock_families[2];
58432 + __u32 ip_proto[8];
58433 + __u32 ip_type;
58434 + struct acl_ip_label **ips;
58435 + __u32 ip_num;
58436 + __u32 inaddr_any_override;
58437 +
58438 + __u32 crashes;
58439 + unsigned long expires;
58440 +
58441 + struct acl_subject_label *parent_subject;
58442 + struct gr_hash_struct *hash;
58443 + struct acl_subject_label *prev;
58444 + struct acl_subject_label *next;
58445 +
58446 + struct acl_object_label **obj_hash;
58447 + __u32 obj_hash_size;
58448 + __u16 pax_flags;
58449 +};
58450 +
58451 +struct role_allowed_ip {
58452 + __u32 addr;
58453 + __u32 netmask;
58454 +
58455 + struct role_allowed_ip *prev;
58456 + struct role_allowed_ip *next;
58457 +};
58458 +
58459 +struct role_transition {
58460 + char *rolename;
58461 +
58462 + struct role_transition *prev;
58463 + struct role_transition *next;
58464 +};
58465 +
58466 +struct acl_role_label {
58467 + char *rolename;
58468 + uid_t uidgid;
58469 + __u16 roletype;
58470 +
58471 + __u16 auth_attempts;
58472 + unsigned long expires;
58473 +
58474 + struct acl_subject_label *root_label;
58475 + struct gr_hash_struct *hash;
58476 +
58477 + struct acl_role_label *prev;
58478 + struct acl_role_label *next;
58479 +
58480 + struct role_transition *transitions;
58481 + struct role_allowed_ip *allowed_ips;
58482 + uid_t *domain_children;
58483 + __u16 domain_child_num;
58484 +
58485 + struct acl_subject_label **subj_hash;
58486 + __u32 subj_hash_size;
58487 +};
58488 +
58489 +struct user_acl_role_db {
58490 + struct acl_role_label **r_table;
58491 + __u32 num_pointers; /* Number of allocations to track */
58492 + __u32 num_roles; /* Number of roles */
58493 + __u32 num_domain_children; /* Number of domain children */
58494 + __u32 num_subjects; /* Number of subjects */
58495 + __u32 num_objects; /* Number of objects */
58496 +};
58497 +
58498 +struct acl_object_label {
58499 + char *filename;
58500 + ino_t inode;
58501 + dev_t device;
58502 + __u32 mode;
58503 +
58504 + struct acl_subject_label *nested;
58505 + struct acl_object_label *globbed;
58506 +
58507 + /* next two structures not used */
58508 +
58509 + struct acl_object_label *prev;
58510 + struct acl_object_label *next;
58511 +};
58512 +
58513 +struct acl_ip_label {
58514 + char *iface;
58515 + __u32 addr;
58516 + __u32 netmask;
58517 + __u16 low, high;
58518 + __u8 mode;
58519 + __u32 type;
58520 + __u32 proto[8];
58521 +
58522 + /* next two structures not used */
58523 +
58524 + struct acl_ip_label *prev;
58525 + struct acl_ip_label *next;
58526 +};
58527 +
58528 +struct gr_arg {
58529 + struct user_acl_role_db role_db;
58530 + unsigned char pw[GR_PW_LEN];
58531 + unsigned char salt[GR_SALT_LEN];
58532 + unsigned char sum[GR_SHA_LEN];
58533 + unsigned char sp_role[GR_SPROLE_LEN];
58534 + struct sprole_pw *sprole_pws;
58535 + dev_t segv_device;
58536 + ino_t segv_inode;
58537 + uid_t segv_uid;
58538 + __u16 num_sprole_pws;
58539 + __u16 mode;
58540 +};
58541 +
58542 +struct gr_arg_wrapper {
58543 + struct gr_arg *arg;
58544 + __u32 version;
58545 + __u32 size;
58546 +};
58547 +
58548 +struct subject_map {
58549 + struct acl_subject_label *user;
58550 + struct acl_subject_label *kernel;
58551 + struct subject_map *prev;
58552 + struct subject_map *next;
58553 +};
58554 +
58555 +struct acl_subj_map_db {
58556 + struct subject_map **s_hash;
58557 + __u32 s_size;
58558 +};
58559 +
58560 +/* End Data Structures Section */
58561 +
58562 +/* Hash functions generated by empirical testing by Brad Spengler
58563 + Makes good use of the low bits of the inode. Generally 0-1 times
58564 + in loop for successful match. 0-3 for unsuccessful match.
58565 + Shift/add algorithm with modulus of table size and an XOR*/
58566 +
58567 +static __inline__ unsigned int
58568 +rhash(const uid_t uid, const __u16 type, const unsigned int sz)
58569 +{
58570 + return ((((uid + type) << (16 + type)) ^ uid) % sz);
58571 +}
58572 +
58573 + static __inline__ unsigned int
58574 +shash(const struct acl_subject_label *userp, const unsigned int sz)
58575 +{
58576 + return ((const unsigned long)userp % sz);
58577 +}
58578 +
58579 +static __inline__ unsigned int
58580 +fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
58581 +{
58582 + return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
58583 +}
58584 +
58585 +static __inline__ unsigned int
58586 +nhash(const char *name, const __u16 len, const unsigned int sz)
58587 +{
58588 + return full_name_hash((const unsigned char *)name, len) % sz;
58589 +}
58590 +
58591 +#define FOR_EACH_ROLE_START(role) \
58592 + role = role_list; \
58593 + while (role) {
58594 +
58595 +#define FOR_EACH_ROLE_END(role) \
58596 + role = role->prev; \
58597 + }
58598 +
58599 +#define FOR_EACH_SUBJECT_START(role,subj,iter) \
58600 + subj = NULL; \
58601 + iter = 0; \
58602 + while (iter < role->subj_hash_size) { \
58603 + if (subj == NULL) \
58604 + subj = role->subj_hash[iter]; \
58605 + if (subj == NULL) { \
58606 + iter++; \
58607 + continue; \
58608 + }
58609 +
58610 +#define FOR_EACH_SUBJECT_END(subj,iter) \
58611 + subj = subj->next; \
58612 + if (subj == NULL) \
58613 + iter++; \
58614 + }
58615 +
58616 +
58617 +#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
58618 + subj = role->hash->first; \
58619 + while (subj != NULL) {
58620 +
58621 +#define FOR_EACH_NESTED_SUBJECT_END(subj) \
58622 + subj = subj->next; \
58623 + }
58624 +
58625 +#endif
58626 +
58627 diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
58628 new file mode 100644
58629 index 0000000..323ecf2
58630 --- /dev/null
58631 +++ b/include/linux/gralloc.h
58632 @@ -0,0 +1,9 @@
58633 +#ifndef __GRALLOC_H
58634 +#define __GRALLOC_H
58635 +
58636 +void acl_free_all(void);
58637 +int acl_alloc_stack_init(unsigned long size);
58638 +void *acl_alloc(unsigned long len);
58639 +void *acl_alloc_num(unsigned long num, unsigned long len);
58640 +
58641 +#endif
58642 diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
58643 new file mode 100644
58644 index 0000000..b30e9bc
58645 --- /dev/null
58646 +++ b/include/linux/grdefs.h
58647 @@ -0,0 +1,140 @@
58648 +#ifndef GRDEFS_H
58649 +#define GRDEFS_H
58650 +
58651 +/* Begin grsecurity status declarations */
58652 +
58653 +enum {
58654 + GR_READY = 0x01,
58655 + GR_STATUS_INIT = 0x00 // disabled state
58656 +};
58657 +
58658 +/* Begin ACL declarations */
58659 +
58660 +/* Role flags */
58661 +
58662 +enum {
58663 + GR_ROLE_USER = 0x0001,
58664 + GR_ROLE_GROUP = 0x0002,
58665 + GR_ROLE_DEFAULT = 0x0004,
58666 + GR_ROLE_SPECIAL = 0x0008,
58667 + GR_ROLE_AUTH = 0x0010,
58668 + GR_ROLE_NOPW = 0x0020,
58669 + GR_ROLE_GOD = 0x0040,
58670 + GR_ROLE_LEARN = 0x0080,
58671 + GR_ROLE_TPE = 0x0100,
58672 + GR_ROLE_DOMAIN = 0x0200,
58673 + GR_ROLE_PAM = 0x0400,
58674 + GR_ROLE_PERSIST = 0x0800
58675 +};
58676 +
58677 +/* ACL Subject and Object mode flags */
58678 +enum {
58679 + GR_DELETED = 0x80000000
58680 +};
58681 +
58682 +/* ACL Object-only mode flags */
58683 +enum {
58684 + GR_READ = 0x00000001,
58685 + GR_APPEND = 0x00000002,
58686 + GR_WRITE = 0x00000004,
58687 + GR_EXEC = 0x00000008,
58688 + GR_FIND = 0x00000010,
58689 + GR_INHERIT = 0x00000020,
58690 + GR_SETID = 0x00000040,
58691 + GR_CREATE = 0x00000080,
58692 + GR_DELETE = 0x00000100,
58693 + GR_LINK = 0x00000200,
58694 + GR_AUDIT_READ = 0x00000400,
58695 + GR_AUDIT_APPEND = 0x00000800,
58696 + GR_AUDIT_WRITE = 0x00001000,
58697 + GR_AUDIT_EXEC = 0x00002000,
58698 + GR_AUDIT_FIND = 0x00004000,
58699 + GR_AUDIT_INHERIT= 0x00008000,
58700 + GR_AUDIT_SETID = 0x00010000,
58701 + GR_AUDIT_CREATE = 0x00020000,
58702 + GR_AUDIT_DELETE = 0x00040000,
58703 + GR_AUDIT_LINK = 0x00080000,
58704 + GR_PTRACERD = 0x00100000,
58705 + GR_NOPTRACE = 0x00200000,
58706 + GR_SUPPRESS = 0x00400000,
58707 + GR_NOLEARN = 0x00800000,
58708 + GR_INIT_TRANSFER= 0x01000000
58709 +};
58710 +
58711 +#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
58712 + GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
58713 + GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
58714 +
58715 +/* ACL subject-only mode flags */
58716 +enum {
58717 + GR_KILL = 0x00000001,
58718 + GR_VIEW = 0x00000002,
58719 + GR_PROTECTED = 0x00000004,
58720 + GR_LEARN = 0x00000008,
58721 + GR_OVERRIDE = 0x00000010,
58722 + /* just a placeholder, this mode is only used in userspace */
58723 + GR_DUMMY = 0x00000020,
58724 + GR_PROTSHM = 0x00000040,
58725 + GR_KILLPROC = 0x00000080,
58726 + GR_KILLIPPROC = 0x00000100,
58727 + /* just a placeholder, this mode is only used in userspace */
58728 + GR_NOTROJAN = 0x00000200,
58729 + GR_PROTPROCFD = 0x00000400,
58730 + GR_PROCACCT = 0x00000800,
58731 + GR_RELAXPTRACE = 0x00001000,
58732 + GR_NESTED = 0x00002000,
58733 + GR_INHERITLEARN = 0x00004000,
58734 + GR_PROCFIND = 0x00008000,
58735 + GR_POVERRIDE = 0x00010000,
58736 + GR_KERNELAUTH = 0x00020000,
58737 + GR_ATSECURE = 0x00040000,
58738 + GR_SHMEXEC = 0x00080000
58739 +};
58740 +
58741 +enum {
58742 + GR_PAX_ENABLE_SEGMEXEC = 0x0001,
58743 + GR_PAX_ENABLE_PAGEEXEC = 0x0002,
58744 + GR_PAX_ENABLE_MPROTECT = 0x0004,
58745 + GR_PAX_ENABLE_RANDMMAP = 0x0008,
58746 + GR_PAX_ENABLE_EMUTRAMP = 0x0010,
58747 + GR_PAX_DISABLE_SEGMEXEC = 0x0100,
58748 + GR_PAX_DISABLE_PAGEEXEC = 0x0200,
58749 + GR_PAX_DISABLE_MPROTECT = 0x0400,
58750 + GR_PAX_DISABLE_RANDMMAP = 0x0800,
58751 + GR_PAX_DISABLE_EMUTRAMP = 0x1000,
58752 +};
58753 +
58754 +enum {
58755 + GR_ID_USER = 0x01,
58756 + GR_ID_GROUP = 0x02,
58757 +};
58758 +
58759 +enum {
58760 + GR_ID_ALLOW = 0x01,
58761 + GR_ID_DENY = 0x02,
58762 +};
58763 +
58764 +#define GR_CRASH_RES 31
58765 +#define GR_UIDTABLE_MAX 500
58766 +
58767 +/* begin resource learning section */
58768 +enum {
58769 + GR_RLIM_CPU_BUMP = 60,
58770 + GR_RLIM_FSIZE_BUMP = 50000,
58771 + GR_RLIM_DATA_BUMP = 10000,
58772 + GR_RLIM_STACK_BUMP = 1000,
58773 + GR_RLIM_CORE_BUMP = 10000,
58774 + GR_RLIM_RSS_BUMP = 500000,
58775 + GR_RLIM_NPROC_BUMP = 1,
58776 + GR_RLIM_NOFILE_BUMP = 5,
58777 + GR_RLIM_MEMLOCK_BUMP = 50000,
58778 + GR_RLIM_AS_BUMP = 500000,
58779 + GR_RLIM_LOCKS_BUMP = 2,
58780 + GR_RLIM_SIGPENDING_BUMP = 5,
58781 + GR_RLIM_MSGQUEUE_BUMP = 10000,
58782 + GR_RLIM_NICE_BUMP = 1,
58783 + GR_RLIM_RTPRIO_BUMP = 1,
58784 + GR_RLIM_RTTIME_BUMP = 1000000
58785 +};
58786 +
58787 +#endif
58788 diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
58789 new file mode 100644
58790 index 0000000..da390f1
58791 --- /dev/null
58792 +++ b/include/linux/grinternal.h
58793 @@ -0,0 +1,221 @@
58794 +#ifndef __GRINTERNAL_H
58795 +#define __GRINTERNAL_H
58796 +
58797 +#ifdef CONFIG_GRKERNSEC
58798 +
58799 +#include <linux/fs.h>
58800 +#include <linux/mnt_namespace.h>
58801 +#include <linux/nsproxy.h>
58802 +#include <linux/gracl.h>
58803 +#include <linux/grdefs.h>
58804 +#include <linux/grmsg.h>
58805 +
58806 +void gr_add_learn_entry(const char *fmt, ...)
58807 + __attribute__ ((format (printf, 1, 2)));
58808 +__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
58809 + const struct vfsmount *mnt);
58810 +__u32 gr_check_create(const struct dentry *new_dentry,
58811 + const struct dentry *parent,
58812 + const struct vfsmount *mnt, const __u32 mode);
58813 +int gr_check_protected_task(const struct task_struct *task);
58814 +__u32 to_gr_audit(const __u32 reqmode);
58815 +int gr_set_acls(const int type);
58816 +int gr_apply_subject_to_task(struct task_struct *task);
58817 +int gr_acl_is_enabled(void);
58818 +char gr_roletype_to_char(void);
58819 +
58820 +void gr_handle_alertkill(struct task_struct *task);
58821 +char *gr_to_filename(const struct dentry *dentry,
58822 + const struct vfsmount *mnt);
58823 +char *gr_to_filename1(const struct dentry *dentry,
58824 + const struct vfsmount *mnt);
58825 +char *gr_to_filename2(const struct dentry *dentry,
58826 + const struct vfsmount *mnt);
58827 +char *gr_to_filename3(const struct dentry *dentry,
58828 + const struct vfsmount *mnt);
58829 +
58830 +extern int grsec_enable_ptrace_readexec;
58831 +extern int grsec_enable_harden_ptrace;
58832 +extern int grsec_enable_link;
58833 +extern int grsec_enable_fifo;
58834 +extern int grsec_enable_execve;
58835 +extern int grsec_enable_shm;
58836 +extern int grsec_enable_execlog;
58837 +extern int grsec_enable_signal;
58838 +extern int grsec_enable_audit_ptrace;
58839 +extern int grsec_enable_forkfail;
58840 +extern int grsec_enable_time;
58841 +extern int grsec_enable_rofs;
58842 +extern int grsec_enable_chroot_shmat;
58843 +extern int grsec_enable_chroot_mount;
58844 +extern int grsec_enable_chroot_double;
58845 +extern int grsec_enable_chroot_pivot;
58846 +extern int grsec_enable_chroot_chdir;
58847 +extern int grsec_enable_chroot_chmod;
58848 +extern int grsec_enable_chroot_mknod;
58849 +extern int grsec_enable_chroot_fchdir;
58850 +extern int grsec_enable_chroot_nice;
58851 +extern int grsec_enable_chroot_execlog;
58852 +extern int grsec_enable_chroot_caps;
58853 +extern int grsec_enable_chroot_sysctl;
58854 +extern int grsec_enable_chroot_unix;
58855 +extern int grsec_enable_tpe;
58856 +extern int grsec_tpe_gid;
58857 +extern int grsec_enable_tpe_all;
58858 +extern int grsec_enable_tpe_invert;
58859 +extern int grsec_enable_socket_all;
58860 +extern int grsec_socket_all_gid;
58861 +extern int grsec_enable_socket_client;
58862 +extern int grsec_socket_client_gid;
58863 +extern int grsec_enable_socket_server;
58864 +extern int grsec_socket_server_gid;
58865 +extern int grsec_audit_gid;
58866 +extern int grsec_enable_group;
58867 +extern int grsec_enable_audit_textrel;
58868 +extern int grsec_enable_log_rwxmaps;
58869 +extern int grsec_enable_mount;
58870 +extern int grsec_enable_chdir;
58871 +extern int grsec_resource_logging;
58872 +extern int grsec_enable_blackhole;
58873 +extern int grsec_lastack_retries;
58874 +extern int grsec_enable_brute;
58875 +extern int grsec_lock;
58876 +
58877 +extern spinlock_t grsec_alert_lock;
58878 +extern unsigned long grsec_alert_wtime;
58879 +extern unsigned long grsec_alert_fyet;
58880 +
58881 +extern spinlock_t grsec_audit_lock;
58882 +
58883 +extern rwlock_t grsec_exec_file_lock;
58884 +
58885 +#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
58886 + gr_to_filename2((tsk)->exec_file->f_path.dentry, \
58887 + (tsk)->exec_file->f_vfsmnt) : "/")
58888 +
58889 +#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
58890 + gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
58891 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
58892 +
58893 +#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
58894 + gr_to_filename((tsk)->exec_file->f_path.dentry, \
58895 + (tsk)->exec_file->f_vfsmnt) : "/")
58896 +
58897 +#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
58898 + gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
58899 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
58900 +
58901 +#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
58902 +
58903 +#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
58904 +
58905 +#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
58906 + (task)->pid, (cred)->uid, \
58907 + (cred)->euid, (cred)->gid, (cred)->egid, \
58908 + gr_parent_task_fullpath(task), \
58909 + (task)->real_parent->comm, (task)->real_parent->pid, \
58910 + (pcred)->uid, (pcred)->euid, \
58911 + (pcred)->gid, (pcred)->egid
58912 +
58913 +#define GR_CHROOT_CAPS {{ \
58914 + CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
58915 + CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
58916 + CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
58917 + CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
58918 + CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
58919 + CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
58920 + CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
58921 +
58922 +#define security_learn(normal_msg,args...) \
58923 +({ \
58924 + read_lock(&grsec_exec_file_lock); \
58925 + gr_add_learn_entry(normal_msg "\n", ## args); \
58926 + read_unlock(&grsec_exec_file_lock); \
58927 +})
58928 +
58929 +enum {
58930 + GR_DO_AUDIT,
58931 + GR_DONT_AUDIT,
58932 + /* used for non-audit messages that we shouldn't kill the task on */
58933 + GR_DONT_AUDIT_GOOD
58934 +};
58935 +
58936 +enum {
58937 + GR_TTYSNIFF,
58938 + GR_RBAC,
58939 + GR_RBAC_STR,
58940 + GR_STR_RBAC,
58941 + GR_RBAC_MODE2,
58942 + GR_RBAC_MODE3,
58943 + GR_FILENAME,
58944 + GR_SYSCTL_HIDDEN,
58945 + GR_NOARGS,
58946 + GR_ONE_INT,
58947 + GR_ONE_INT_TWO_STR,
58948 + GR_ONE_STR,
58949 + GR_STR_INT,
58950 + GR_TWO_STR_INT,
58951 + GR_TWO_INT,
58952 + GR_TWO_U64,
58953 + GR_THREE_INT,
58954 + GR_FIVE_INT_TWO_STR,
58955 + GR_TWO_STR,
58956 + GR_THREE_STR,
58957 + GR_FOUR_STR,
58958 + GR_STR_FILENAME,
58959 + GR_FILENAME_STR,
58960 + GR_FILENAME_TWO_INT,
58961 + GR_FILENAME_TWO_INT_STR,
58962 + GR_TEXTREL,
58963 + GR_PTRACE,
58964 + GR_RESOURCE,
58965 + GR_CAP,
58966 + GR_SIG,
58967 + GR_SIG2,
58968 + GR_CRASH1,
58969 + GR_CRASH2,
58970 + GR_PSACCT,
58971 + GR_RWXMAP
58972 +};
58973 +
58974 +#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
58975 +#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
58976 +#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
58977 +#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
58978 +#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
58979 +#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
58980 +#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
58981 +#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
58982 +#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
58983 +#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
58984 +#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
58985 +#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
58986 +#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
58987 +#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
58988 +#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
58989 +#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
58990 +#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
58991 +#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
58992 +#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
58993 +#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
58994 +#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
58995 +#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
58996 +#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
58997 +#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
58998 +#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
58999 +#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
59000 +#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
59001 +#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
59002 +#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
59003 +#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
59004 +#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
59005 +#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
59006 +#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
59007 +#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
59008 +#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
59009 +
59010 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
59011 +
59012 +#endif
59013 +
59014 +#endif
59015 diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
59016 new file mode 100644
59017 index 0000000..dfb15ef
59018 --- /dev/null
59019 +++ b/include/linux/grmsg.h
59020 @@ -0,0 +1,109 @@
59021 +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
59022 +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
59023 +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
59024 +#define GR_STOPMOD_MSG "denied modification of module state by "
59025 +#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
59026 +#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
59027 +#define GR_IOPERM_MSG "denied use of ioperm() by "
59028 +#define GR_IOPL_MSG "denied use of iopl() by "
59029 +#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
59030 +#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
59031 +#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
59032 +#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
59033 +#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
59034 +#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
59035 +#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
59036 +#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
59037 +#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
59038 +#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
59039 +#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
59040 +#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
59041 +#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
59042 +#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
59043 +#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
59044 +#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
59045 +#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
59046 +#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
59047 +#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
59048 +#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
59049 +#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
59050 +#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
59051 +#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
59052 +#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
59053 +#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
59054 +#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
59055 +#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by "
59056 +#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
59057 +#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
59058 +#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
59059 +#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
59060 +#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
59061 +#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
59062 +#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
59063 +#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
59064 +#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
59065 +#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
59066 +#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
59067 +#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
59068 +#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
59069 +#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
59070 +#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
59071 +#define GR_INITF_ACL_MSG "init_variables() failed %s by "
59072 +#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
59073 +#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
59074 +#define GR_SHUTS_ACL_MSG "shutdown auth success for "
59075 +#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
59076 +#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
59077 +#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
59078 +#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
59079 +#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
59080 +#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
59081 +#define GR_ENABLEF_ACL_MSG "unable to load %s for "
59082 +#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
59083 +#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
59084 +#define GR_RELOADF_ACL_MSG "failed reload of %s for "
59085 +#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
59086 +#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
59087 +#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
59088 +#define GR_SPROLEF_ACL_MSG "special role %s failure for "
59089 +#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
59090 +#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
59091 +#define GR_INVMODE_ACL_MSG "invalid mode %d by "
59092 +#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
59093 +#define GR_FAILFORK_MSG "failed fork with errno %s by "
59094 +#define GR_NICE_CHROOT_MSG "denied priority change by "
59095 +#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
59096 +#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
59097 +#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
59098 +#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
59099 +#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
59100 +#define GR_TIME_MSG "time set by "
59101 +#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
59102 +#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
59103 +#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
59104 +#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
59105 +#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
59106 +#define GR_BIND_MSG "denied bind() by "
59107 +#define GR_CONNECT_MSG "denied connect() by "
59108 +#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
59109 +#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
59110 +#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
59111 +#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
59112 +#define GR_CAP_ACL_MSG "use of %s denied for "
59113 +#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
59114 +#define GR_CAP_ACL_MSG2 "use of %s permitted for "
59115 +#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
59116 +#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
59117 +#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
59118 +#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
59119 +#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
59120 +#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
59121 +#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
59122 +#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
59123 +#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
59124 +#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
59125 +#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
59126 +#define GR_VM86_MSG "denied use of vm86 by "
59127 +#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
59128 +#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
59129 +#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
59130 diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
59131 new file mode 100644
59132 index 0000000..eb4885f
59133 --- /dev/null
59134 +++ b/include/linux/grsecurity.h
59135 @@ -0,0 +1,233 @@
59136 +#ifndef GR_SECURITY_H
59137 +#define GR_SECURITY_H
59138 +#include <linux/fs.h>
59139 +#include <linux/fs_struct.h>
59140 +#include <linux/binfmts.h>
59141 +#include <linux/gracl.h>
59142 +
59143 +/* notify of brain-dead configs */
59144 +#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59145 +#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
59146 +#endif
59147 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
59148 +#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
59149 +#endif
59150 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
59151 +#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
59152 +#endif
59153 +#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
59154 +#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
59155 +#endif
59156 +#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
59157 +#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
59158 +#endif
59159 +#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
59160 +#error "CONFIG_PAX enabled, but no PaX options are enabled."
59161 +#endif
59162 +
59163 +#include <linux/compat.h>
59164 +
59165 +struct user_arg_ptr {
59166 +#ifdef CONFIG_COMPAT
59167 + bool is_compat;
59168 +#endif
59169 + union {
59170 + const char __user *const __user *native;
59171 +#ifdef CONFIG_COMPAT
59172 + compat_uptr_t __user *compat;
59173 +#endif
59174 + } ptr;
59175 +};
59176 +
59177 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
59178 +void gr_handle_brute_check(void);
59179 +void gr_handle_kernel_exploit(void);
59180 +int gr_process_user_ban(void);
59181 +
59182 +char gr_roletype_to_char(void);
59183 +
59184 +int gr_acl_enable_at_secure(void);
59185 +
59186 +int gr_check_user_change(int real, int effective, int fs);
59187 +int gr_check_group_change(int real, int effective, int fs);
59188 +
59189 +void gr_del_task_from_ip_table(struct task_struct *p);
59190 +
59191 +int gr_pid_is_chrooted(struct task_struct *p);
59192 +int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
59193 +int gr_handle_chroot_nice(void);
59194 +int gr_handle_chroot_sysctl(const int op);
59195 +int gr_handle_chroot_setpriority(struct task_struct *p,
59196 + const int niceval);
59197 +int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
59198 +int gr_handle_chroot_chroot(const struct dentry *dentry,
59199 + const struct vfsmount *mnt);
59200 +void gr_handle_chroot_chdir(struct path *path);
59201 +int gr_handle_chroot_chmod(const struct dentry *dentry,
59202 + const struct vfsmount *mnt, const int mode);
59203 +int gr_handle_chroot_mknod(const struct dentry *dentry,
59204 + const struct vfsmount *mnt, const int mode);
59205 +int gr_handle_chroot_mount(const struct dentry *dentry,
59206 + const struct vfsmount *mnt,
59207 + const char *dev_name);
59208 +int gr_handle_chroot_pivot(void);
59209 +int gr_handle_chroot_unix(const pid_t pid);
59210 +
59211 +int gr_handle_rawio(const struct inode *inode);
59212 +
59213 +void gr_handle_ioperm(void);
59214 +void gr_handle_iopl(void);
59215 +
59216 +int gr_tpe_allow(const struct file *file);
59217 +
59218 +void gr_set_chroot_entries(struct task_struct *task, struct path *path);
59219 +void gr_clear_chroot_entries(struct task_struct *task);
59220 +
59221 +void gr_log_forkfail(const int retval);
59222 +void gr_log_timechange(void);
59223 +void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
59224 +void gr_log_chdir(const struct dentry *dentry,
59225 + const struct vfsmount *mnt);
59226 +void gr_log_chroot_exec(const struct dentry *dentry,
59227 + const struct vfsmount *mnt);
59228 +void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv);
59229 +void gr_log_remount(const char *devname, const int retval);
59230 +void gr_log_unmount(const char *devname, const int retval);
59231 +void gr_log_mount(const char *from, const char *to, const int retval);
59232 +void gr_log_textrel(struct vm_area_struct *vma);
59233 +void gr_log_rwxmmap(struct file *file);
59234 +void gr_log_rwxmprotect(struct file *file);
59235 +
59236 +int gr_handle_follow_link(const struct inode *parent,
59237 + const struct inode *inode,
59238 + const struct dentry *dentry,
59239 + const struct vfsmount *mnt);
59240 +int gr_handle_fifo(const struct dentry *dentry,
59241 + const struct vfsmount *mnt,
59242 + const struct dentry *dir, const int flag,
59243 + const int acc_mode);
59244 +int gr_handle_hardlink(const struct dentry *dentry,
59245 + const struct vfsmount *mnt,
59246 + struct inode *inode,
59247 + const int mode, const char *to);
59248 +
59249 +int gr_is_capable(const int cap);
59250 +int gr_is_capable_nolog(const int cap);
59251 +void gr_learn_resource(const struct task_struct *task, const int limit,
59252 + const unsigned long wanted, const int gt);
59253 +void gr_copy_label(struct task_struct *tsk);
59254 +void gr_handle_crash(struct task_struct *task, const int sig);
59255 +int gr_handle_signal(const struct task_struct *p, const int sig);
59256 +int gr_check_crash_uid(const uid_t uid);
59257 +int gr_check_protected_task(const struct task_struct *task);
59258 +int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
59259 +int gr_acl_handle_mmap(const struct file *file,
59260 + const unsigned long prot);
59261 +int gr_acl_handle_mprotect(const struct file *file,
59262 + const unsigned long prot);
59263 +int gr_check_hidden_task(const struct task_struct *tsk);
59264 +__u32 gr_acl_handle_truncate(const struct dentry *dentry,
59265 + const struct vfsmount *mnt);
59266 +__u32 gr_acl_handle_utime(const struct dentry *dentry,
59267 + const struct vfsmount *mnt);
59268 +__u32 gr_acl_handle_access(const struct dentry *dentry,
59269 + const struct vfsmount *mnt, const int fmode);
59270 +__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
59271 + const struct vfsmount *mnt, mode_t mode);
59272 +__u32 gr_acl_handle_chmod(const struct dentry *dentry,
59273 + const struct vfsmount *mnt, mode_t mode);
59274 +__u32 gr_acl_handle_chown(const struct dentry *dentry,
59275 + const struct vfsmount *mnt);
59276 +__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
59277 + const struct vfsmount *mnt);
59278 +int gr_handle_ptrace(struct task_struct *task, const long request);
59279 +int gr_handle_proc_ptrace(struct task_struct *task);
59280 +__u32 gr_acl_handle_execve(const struct dentry *dentry,
59281 + const struct vfsmount *mnt);
59282 +int gr_check_crash_exec(const struct file *filp);
59283 +int gr_acl_is_enabled(void);
59284 +void gr_set_kernel_label(struct task_struct *task);
59285 +void gr_set_role_label(struct task_struct *task, const uid_t uid,
59286 + const gid_t gid);
59287 +int gr_set_proc_label(const struct dentry *dentry,
59288 + const struct vfsmount *mnt,
59289 + const int unsafe_flags);
59290 +__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
59291 + const struct vfsmount *mnt);
59292 +__u32 gr_acl_handle_open(const struct dentry *dentry,
59293 + const struct vfsmount *mnt, int acc_mode);
59294 +__u32 gr_acl_handle_creat(const struct dentry *dentry,
59295 + const struct dentry *p_dentry,
59296 + const struct vfsmount *p_mnt,
59297 + int open_flags, int acc_mode, const int imode);
59298 +void gr_handle_create(const struct dentry *dentry,
59299 + const struct vfsmount *mnt);
59300 +void gr_handle_proc_create(const struct dentry *dentry,
59301 + const struct inode *inode);
59302 +__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
59303 + const struct dentry *parent_dentry,
59304 + const struct vfsmount *parent_mnt,
59305 + const int mode);
59306 +__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
59307 + const struct dentry *parent_dentry,
59308 + const struct vfsmount *parent_mnt);
59309 +__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
59310 + const struct vfsmount *mnt);
59311 +void gr_handle_delete(const ino_t ino, const dev_t dev);
59312 +__u32 gr_acl_handle_unlink(const struct dentry *dentry,
59313 + const struct vfsmount *mnt);
59314 +__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
59315 + const struct dentry *parent_dentry,
59316 + const struct vfsmount *parent_mnt,
59317 + const char *from);
59318 +__u32 gr_acl_handle_link(const struct dentry *new_dentry,
59319 + const struct dentry *parent_dentry,
59320 + const struct vfsmount *parent_mnt,
59321 + const struct dentry *old_dentry,
59322 + const struct vfsmount *old_mnt, const char *to);
59323 +int gr_acl_handle_rename(struct dentry *new_dentry,
59324 + struct dentry *parent_dentry,
59325 + const struct vfsmount *parent_mnt,
59326 + struct dentry *old_dentry,
59327 + struct inode *old_parent_inode,
59328 + struct vfsmount *old_mnt, const char *newname);
59329 +void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
59330 + struct dentry *old_dentry,
59331 + struct dentry *new_dentry,
59332 + struct vfsmount *mnt, const __u8 replace);
59333 +__u32 gr_check_link(const struct dentry *new_dentry,
59334 + const struct dentry *parent_dentry,
59335 + const struct vfsmount *parent_mnt,
59336 + const struct dentry *old_dentry,
59337 + const struct vfsmount *old_mnt);
59338 +int gr_acl_handle_filldir(const struct file *file, const char *name,
59339 + const unsigned int namelen, const ino_t ino);
59340 +
59341 +__u32 gr_acl_handle_unix(const struct dentry *dentry,
59342 + const struct vfsmount *mnt);
59343 +void gr_acl_handle_exit(void);
59344 +void gr_acl_handle_psacct(struct task_struct *task, const long code);
59345 +int gr_acl_handle_procpidmem(const struct task_struct *task);
59346 +int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
59347 +int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
59348 +void gr_audit_ptrace(struct task_struct *task);
59349 +dev_t gr_get_dev_from_dentry(struct dentry *dentry);
59350 +
59351 +int gr_ptrace_readexec(struct file *file, int unsafe_flags);
59352 +
59353 +#ifdef CONFIG_GRKERNSEC
59354 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
59355 +void gr_handle_vm86(void);
59356 +void gr_handle_mem_readwrite(u64 from, u64 to);
59357 +
59358 +extern int grsec_enable_dmesg;
59359 +extern int grsec_disable_privio;
59360 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
59361 +extern int grsec_enable_chroot_findtask;
59362 +#endif
59363 +#ifdef CONFIG_GRKERNSEC_SETXID
59364 +extern int grsec_enable_setxid;
59365 +#endif
59366 +#endif
59367 +
59368 +#endif
59369 diff --git a/include/linux/grsock.h b/include/linux/grsock.h
59370 new file mode 100644
59371 index 0000000..e7ffaaf
59372 --- /dev/null
59373 +++ b/include/linux/grsock.h
59374 @@ -0,0 +1,19 @@
59375 +#ifndef __GRSOCK_H
59376 +#define __GRSOCK_H
59377 +
59378 +extern void gr_attach_curr_ip(const struct sock *sk);
59379 +extern int gr_handle_sock_all(const int family, const int type,
59380 + const int protocol);
59381 +extern int gr_handle_sock_server(const struct sockaddr *sck);
59382 +extern int gr_handle_sock_server_other(const struct sock *sck);
59383 +extern int gr_handle_sock_client(const struct sockaddr *sck);
59384 +extern int gr_search_connect(struct socket * sock,
59385 + struct sockaddr_in * addr);
59386 +extern int gr_search_bind(struct socket * sock,
59387 + struct sockaddr_in * addr);
59388 +extern int gr_search_listen(struct socket * sock);
59389 +extern int gr_search_accept(struct socket * sock);
59390 +extern int gr_search_socket(const int domain, const int type,
59391 + const int protocol);
59392 +
59393 +#endif
59394 diff --git a/include/linux/hid.h b/include/linux/hid.h
59395 index c235e4e..f0cf7a0 100644
59396 --- a/include/linux/hid.h
59397 +++ b/include/linux/hid.h
59398 @@ -679,7 +679,7 @@ struct hid_ll_driver {
59399 unsigned int code, int value);
59400
59401 int (*parse)(struct hid_device *hdev);
59402 -};
59403 +} __no_const;
59404
59405 #define PM_HINT_FULLON 1<<5
59406 #define PM_HINT_NORMAL 1<<1
59407 diff --git a/include/linux/highmem.h b/include/linux/highmem.h
59408 index 3a93f73..b19d0b3 100644
59409 --- a/include/linux/highmem.h
59410 +++ b/include/linux/highmem.h
59411 @@ -185,6 +185,18 @@ static inline void clear_highpage(struct page *page)
59412 kunmap_atomic(kaddr, KM_USER0);
59413 }
59414
59415 +static inline void sanitize_highpage(struct page *page)
59416 +{
59417 + void *kaddr;
59418 + unsigned long flags;
59419 +
59420 + local_irq_save(flags);
59421 + kaddr = kmap_atomic(page, KM_CLEARPAGE);
59422 + clear_page(kaddr);
59423 + kunmap_atomic(kaddr, KM_CLEARPAGE);
59424 + local_irq_restore(flags);
59425 +}
59426 +
59427 static inline void zero_user_segments(struct page *page,
59428 unsigned start1, unsigned end1,
59429 unsigned start2, unsigned end2)
59430 diff --git a/include/linux/i2c.h b/include/linux/i2c.h
59431 index 07d103a..04ec65b 100644
59432 --- a/include/linux/i2c.h
59433 +++ b/include/linux/i2c.h
59434 @@ -364,6 +364,7 @@ struct i2c_algorithm {
59435 /* To determine what the adapter supports */
59436 u32 (*functionality) (struct i2c_adapter *);
59437 };
59438 +typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
59439
59440 /*
59441 * i2c_adapter is the structure used to identify a physical i2c bus along
59442 diff --git a/include/linux/i2o.h b/include/linux/i2o.h
59443 index a6deef4..c56a7f2 100644
59444 --- a/include/linux/i2o.h
59445 +++ b/include/linux/i2o.h
59446 @@ -564,7 +564,7 @@ struct i2o_controller {
59447 struct i2o_device *exec; /* Executive */
59448 #if BITS_PER_LONG == 64
59449 spinlock_t context_list_lock; /* lock for context_list */
59450 - atomic_t context_list_counter; /* needed for unique contexts */
59451 + atomic_unchecked_t context_list_counter; /* needed for unique contexts */
59452 struct list_head context_list; /* list of context id's
59453 and pointers */
59454 #endif
59455 diff --git a/include/linux/init.h b/include/linux/init.h
59456 index 9146f39..885354d 100644
59457 --- a/include/linux/init.h
59458 +++ b/include/linux/init.h
59459 @@ -293,13 +293,13 @@ void __init parse_early_options(char *cmdline);
59460
59461 /* Each module must use one module_init(). */
59462 #define module_init(initfn) \
59463 - static inline initcall_t __inittest(void) \
59464 + static inline __used initcall_t __inittest(void) \
59465 { return initfn; } \
59466 int init_module(void) __attribute__((alias(#initfn)));
59467
59468 /* This is only required if you want to be unloadable. */
59469 #define module_exit(exitfn) \
59470 - static inline exitcall_t __exittest(void) \
59471 + static inline __used exitcall_t __exittest(void) \
59472 { return exitfn; } \
59473 void cleanup_module(void) __attribute__((alias(#exitfn)));
59474
59475 diff --git a/include/linux/init_task.h b/include/linux/init_task.h
59476 index 32574ee..00d4ef1 100644
59477 --- a/include/linux/init_task.h
59478 +++ b/include/linux/init_task.h
59479 @@ -128,6 +128,12 @@ extern struct cred init_cred;
59480
59481 #define INIT_TASK_COMM "swapper"
59482
59483 +#ifdef CONFIG_X86
59484 +#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
59485 +#else
59486 +#define INIT_TASK_THREAD_INFO
59487 +#endif
59488 +
59489 /*
59490 * INIT_TASK is used to set up the first task table, touch at
59491 * your own risk!. Base=0, limit=0x1fffff (=2MB)
59492 @@ -166,6 +172,7 @@ extern struct cred init_cred;
59493 RCU_INIT_POINTER(.cred, &init_cred), \
59494 .comm = INIT_TASK_COMM, \
59495 .thread = INIT_THREAD, \
59496 + INIT_TASK_THREAD_INFO \
59497 .fs = &init_fs, \
59498 .files = &init_files, \
59499 .signal = &init_signals, \
59500 diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
59501 index e6ca56d..8583707 100644
59502 --- a/include/linux/intel-iommu.h
59503 +++ b/include/linux/intel-iommu.h
59504 @@ -296,7 +296,7 @@ struct iommu_flush {
59505 u8 fm, u64 type);
59506 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
59507 unsigned int size_order, u64 type);
59508 -};
59509 +} __no_const;
59510
59511 enum {
59512 SR_DMAR_FECTL_REG,
59513 diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
59514 index a64b00e..464d8bc 100644
59515 --- a/include/linux/interrupt.h
59516 +++ b/include/linux/interrupt.h
59517 @@ -441,7 +441,7 @@ enum
59518 /* map softirq index to softirq name. update 'softirq_to_name' in
59519 * kernel/softirq.c when adding a new softirq.
59520 */
59521 -extern char *softirq_to_name[NR_SOFTIRQS];
59522 +extern const char * const softirq_to_name[NR_SOFTIRQS];
59523
59524 /* softirq mask and active fields moved to irq_cpustat_t in
59525 * asm/hardirq.h to get better cache usage. KAO
59526 @@ -449,12 +449,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
59527
59528 struct softirq_action
59529 {
59530 - void (*action)(struct softirq_action *);
59531 + void (*action)(void);
59532 };
59533
59534 asmlinkage void do_softirq(void);
59535 asmlinkage void __do_softirq(void);
59536 -extern void open_softirq(int nr, void (*action)(struct softirq_action *));
59537 +extern void open_softirq(int nr, void (*action)(void));
59538 extern void softirq_init(void);
59539 static inline void __raise_softirq_irqoff(unsigned int nr)
59540 {
59541 diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
59542 index 3875719..4cd454c 100644
59543 --- a/include/linux/kallsyms.h
59544 +++ b/include/linux/kallsyms.h
59545 @@ -15,7 +15,8 @@
59546
59547 struct module;
59548
59549 -#ifdef CONFIG_KALLSYMS
59550 +#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
59551 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
59552 /* Lookup the address for a symbol. Returns 0 if not found. */
59553 unsigned long kallsyms_lookup_name(const char *name);
59554
59555 @@ -99,6 +100,16 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
59556 /* Stupid that this does nothing, but I didn't create this mess. */
59557 #define __print_symbol(fmt, addr)
59558 #endif /*CONFIG_KALLSYMS*/
59559 +#else /* when included by kallsyms.c, vsnprintf.c, or
59560 + arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
59561 +extern void __print_symbol(const char *fmt, unsigned long address);
59562 +extern int sprint_backtrace(char *buffer, unsigned long address);
59563 +extern int sprint_symbol(char *buffer, unsigned long address);
59564 +const char *kallsyms_lookup(unsigned long addr,
59565 + unsigned long *symbolsize,
59566 + unsigned long *offset,
59567 + char **modname, char *namebuf);
59568 +#endif
59569
59570 /* This macro allows us to keep printk typechecking */
59571 static __printf(1, 2)
59572 diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
59573 index fa39183..40160be 100644
59574 --- a/include/linux/kgdb.h
59575 +++ b/include/linux/kgdb.h
59576 @@ -53,7 +53,7 @@ extern int kgdb_connected;
59577 extern int kgdb_io_module_registered;
59578
59579 extern atomic_t kgdb_setting_breakpoint;
59580 -extern atomic_t kgdb_cpu_doing_single_step;
59581 +extern atomic_unchecked_t kgdb_cpu_doing_single_step;
59582
59583 extern struct task_struct *kgdb_usethread;
59584 extern struct task_struct *kgdb_contthread;
59585 @@ -251,7 +251,7 @@ struct kgdb_arch {
59586 void (*disable_hw_break)(struct pt_regs *regs);
59587 void (*remove_all_hw_break)(void);
59588 void (*correct_hw_break)(void);
59589 -};
59590 +} __do_const;
59591
59592 /**
59593 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
59594 @@ -276,7 +276,7 @@ struct kgdb_io {
59595 void (*pre_exception) (void);
59596 void (*post_exception) (void);
59597 int is_console;
59598 -};
59599 +} __do_const;
59600
59601 extern struct kgdb_arch arch_kgdb_ops;
59602
59603 diff --git a/include/linux/kmod.h b/include/linux/kmod.h
59604 index b16f653..eb908f4 100644
59605 --- a/include/linux/kmod.h
59606 +++ b/include/linux/kmod.h
59607 @@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
59608 * usually useless though. */
59609 extern __printf(2, 3)
59610 int __request_module(bool wait, const char *name, ...);
59611 +extern __printf(3, 4)
59612 +int ___request_module(bool wait, char *param_name, const char *name, ...);
59613 #define request_module(mod...) __request_module(true, mod)
59614 #define request_module_nowait(mod...) __request_module(false, mod)
59615 #define try_then_request_module(x, mod...) \
59616 diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
59617 index d526231..086e89b 100644
59618 --- a/include/linux/kvm_host.h
59619 +++ b/include/linux/kvm_host.h
59620 @@ -308,7 +308,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
59621 void vcpu_load(struct kvm_vcpu *vcpu);
59622 void vcpu_put(struct kvm_vcpu *vcpu);
59623
59624 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
59625 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
59626 struct module *module);
59627 void kvm_exit(void);
59628
59629 @@ -454,7 +454,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
59630 struct kvm_guest_debug *dbg);
59631 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
59632
59633 -int kvm_arch_init(void *opaque);
59634 +int kvm_arch_init(const void *opaque);
59635 void kvm_arch_exit(void);
59636
59637 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
59638 diff --git a/include/linux/libata.h b/include/linux/libata.h
59639 index cafc09a..d7e7829 100644
59640 --- a/include/linux/libata.h
59641 +++ b/include/linux/libata.h
59642 @@ -909,7 +909,7 @@ struct ata_port_operations {
59643 * fields must be pointers.
59644 */
59645 const struct ata_port_operations *inherits;
59646 -};
59647 +} __do_const;
59648
59649 struct ata_port_info {
59650 unsigned long flags;
59651 diff --git a/include/linux/mca.h b/include/linux/mca.h
59652 index 3797270..7765ede 100644
59653 --- a/include/linux/mca.h
59654 +++ b/include/linux/mca.h
59655 @@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
59656 int region);
59657 void * (*mca_transform_memory)(struct mca_device *,
59658 void *memory);
59659 -};
59660 +} __no_const;
59661
59662 struct mca_bus {
59663 u64 default_dma_mask;
59664 diff --git a/include/linux/memory.h b/include/linux/memory.h
59665 index 935699b..11042cc 100644
59666 --- a/include/linux/memory.h
59667 +++ b/include/linux/memory.h
59668 @@ -144,7 +144,7 @@ struct memory_accessor {
59669 size_t count);
59670 ssize_t (*write)(struct memory_accessor *, const char *buf,
59671 off_t offset, size_t count);
59672 -};
59673 +} __no_const;
59674
59675 /*
59676 * Kernel text modification mutex, used for code patching. Users of this lock
59677 diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h
59678 index 9970337..9444122 100644
59679 --- a/include/linux/mfd/abx500.h
59680 +++ b/include/linux/mfd/abx500.h
59681 @@ -188,6 +188,7 @@ struct abx500_ops {
59682 int (*event_registers_startup_state_get) (struct device *, u8 *);
59683 int (*startup_irq_enabled) (struct device *, unsigned int);
59684 };
59685 +typedef struct abx500_ops __no_const abx500_ops_no_const;
59686
59687 int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
59688 void abx500_remove_ops(struct device *dev);
59689 diff --git a/include/linux/mm.h b/include/linux/mm.h
59690 index 4baadd1..2e0b45e 100644
59691 --- a/include/linux/mm.h
59692 +++ b/include/linux/mm.h
59693 @@ -115,7 +115,14 @@ extern unsigned int kobjsize(const void *objp);
59694
59695 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
59696 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
59697 +
59698 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
59699 +#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
59700 +#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
59701 +#else
59702 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
59703 +#endif
59704 +
59705 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
59706 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
59707
59708 @@ -1012,34 +1019,6 @@ int set_page_dirty(struct page *page);
59709 int set_page_dirty_lock(struct page *page);
59710 int clear_page_dirty_for_io(struct page *page);
59711
59712 -/* Is the vma a continuation of the stack vma above it? */
59713 -static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
59714 -{
59715 - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
59716 -}
59717 -
59718 -static inline int stack_guard_page_start(struct vm_area_struct *vma,
59719 - unsigned long addr)
59720 -{
59721 - return (vma->vm_flags & VM_GROWSDOWN) &&
59722 - (vma->vm_start == addr) &&
59723 - !vma_growsdown(vma->vm_prev, addr);
59724 -}
59725 -
59726 -/* Is the vma a continuation of the stack vma below it? */
59727 -static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
59728 -{
59729 - return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
59730 -}
59731 -
59732 -static inline int stack_guard_page_end(struct vm_area_struct *vma,
59733 - unsigned long addr)
59734 -{
59735 - return (vma->vm_flags & VM_GROWSUP) &&
59736 - (vma->vm_end == addr) &&
59737 - !vma_growsup(vma->vm_next, addr);
59738 -}
59739 -
59740 extern unsigned long move_page_tables(struct vm_area_struct *vma,
59741 unsigned long old_addr, struct vm_area_struct *new_vma,
59742 unsigned long new_addr, unsigned long len);
59743 @@ -1134,6 +1113,15 @@ static inline void sync_mm_rss(struct task_struct *task, struct mm_struct *mm)
59744 }
59745 #endif
59746
59747 +#ifdef CONFIG_MMU
59748 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
59749 +#else
59750 +static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
59751 +{
59752 + return __pgprot(0);
59753 +}
59754 +#endif
59755 +
59756 int vma_wants_writenotify(struct vm_area_struct *vma);
59757
59758 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
59759 @@ -1419,6 +1407,7 @@ out:
59760 }
59761
59762 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
59763 +extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
59764
59765 extern unsigned long do_brk(unsigned long, unsigned long);
59766
59767 @@ -1476,6 +1465,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
59768 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
59769 struct vm_area_struct **pprev);
59770
59771 +extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
59772 +extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
59773 +extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
59774 +
59775 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
59776 NULL if none. Assume start_addr < end_addr. */
59777 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
59778 @@ -1492,15 +1485,6 @@ static inline unsigned long vma_pages(struct vm_area_struct *vma)
59779 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
59780 }
59781
59782 -#ifdef CONFIG_MMU
59783 -pgprot_t vm_get_page_prot(unsigned long vm_flags);
59784 -#else
59785 -static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
59786 -{
59787 - return __pgprot(0);
59788 -}
59789 -#endif
59790 -
59791 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
59792 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
59793 unsigned long pfn, unsigned long size, pgprot_t);
59794 @@ -1614,7 +1598,7 @@ extern int unpoison_memory(unsigned long pfn);
59795 extern int sysctl_memory_failure_early_kill;
59796 extern int sysctl_memory_failure_recovery;
59797 extern void shake_page(struct page *p, int access);
59798 -extern atomic_long_t mce_bad_pages;
59799 +extern atomic_long_unchecked_t mce_bad_pages;
59800 extern int soft_offline_page(struct page *page, int flags);
59801
59802 extern void dump_page(struct page *page);
59803 @@ -1628,5 +1612,11 @@ extern void copy_user_huge_page(struct page *dst, struct page *src,
59804 unsigned int pages_per_huge_page);
59805 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
59806
59807 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
59808 +extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
59809 +#else
59810 +static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
59811 +#endif
59812 +
59813 #endif /* __KERNEL__ */
59814 #endif /* _LINUX_MM_H */
59815 diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
59816 index 5b42f1b..759e4b4 100644
59817 --- a/include/linux/mm_types.h
59818 +++ b/include/linux/mm_types.h
59819 @@ -253,6 +253,8 @@ struct vm_area_struct {
59820 #ifdef CONFIG_NUMA
59821 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
59822 #endif
59823 +
59824 + struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
59825 };
59826
59827 struct core_thread {
59828 @@ -389,6 +391,24 @@ struct mm_struct {
59829 #ifdef CONFIG_CPUMASK_OFFSTACK
59830 struct cpumask cpumask_allocation;
59831 #endif
59832 +
59833 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
59834 + unsigned long pax_flags;
59835 +#endif
59836 +
59837 +#ifdef CONFIG_PAX_DLRESOLVE
59838 + unsigned long call_dl_resolve;
59839 +#endif
59840 +
59841 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
59842 + unsigned long call_syscall;
59843 +#endif
59844 +
59845 +#ifdef CONFIG_PAX_ASLR
59846 + unsigned long delta_mmap; /* randomized offset */
59847 + unsigned long delta_stack; /* randomized offset */
59848 +#endif
59849 +
59850 };
59851
59852 static inline void mm_init_cpumask(struct mm_struct *mm)
59853 diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
59854 index 1d1b1e1..2a13c78 100644
59855 --- a/include/linux/mmu_notifier.h
59856 +++ b/include/linux/mmu_notifier.h
59857 @@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
59858 */
59859 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
59860 ({ \
59861 - pte_t __pte; \
59862 + pte_t ___pte; \
59863 struct vm_area_struct *___vma = __vma; \
59864 unsigned long ___address = __address; \
59865 - __pte = ptep_clear_flush(___vma, ___address, __ptep); \
59866 + ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
59867 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
59868 - __pte; \
59869 + ___pte; \
59870 })
59871
59872 #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
59873 diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
59874 index 188cb2f..d78409b 100644
59875 --- a/include/linux/mmzone.h
59876 +++ b/include/linux/mmzone.h
59877 @@ -369,7 +369,7 @@ struct zone {
59878 unsigned long flags; /* zone flags, see below */
59879
59880 /* Zone statistics */
59881 - atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
59882 + atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
59883
59884 /*
59885 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
59886 diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
59887 index 468819c..17b9db3 100644
59888 --- a/include/linux/mod_devicetable.h
59889 +++ b/include/linux/mod_devicetable.h
59890 @@ -12,7 +12,7 @@
59891 typedef unsigned long kernel_ulong_t;
59892 #endif
59893
59894 -#define PCI_ANY_ID (~0)
59895 +#define PCI_ANY_ID ((__u16)~0)
59896
59897 struct pci_device_id {
59898 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
59899 @@ -131,7 +131,7 @@ struct usb_device_id {
59900 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
59901 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
59902
59903 -#define HID_ANY_ID (~0)
59904 +#define HID_ANY_ID (~0U)
59905
59906 struct hid_device_id {
59907 __u16 bus;
59908 diff --git a/include/linux/module.h b/include/linux/module.h
59909 index 3cb7839..511cb87 100644
59910 --- a/include/linux/module.h
59911 +++ b/include/linux/module.h
59912 @@ -17,6 +17,7 @@
59913 #include <linux/moduleparam.h>
59914 #include <linux/tracepoint.h>
59915 #include <linux/export.h>
59916 +#include <linux/fs.h>
59917
59918 #include <linux/percpu.h>
59919 #include <asm/module.h>
59920 @@ -261,19 +262,16 @@ struct module
59921 int (*init)(void);
59922
59923 /* If this is non-NULL, vfree after init() returns */
59924 - void *module_init;
59925 + void *module_init_rx, *module_init_rw;
59926
59927 /* Here is the actual code + data, vfree'd on unload. */
59928 - void *module_core;
59929 + void *module_core_rx, *module_core_rw;
59930
59931 /* Here are the sizes of the init and core sections */
59932 - unsigned int init_size, core_size;
59933 + unsigned int init_size_rw, core_size_rw;
59934
59935 /* The size of the executable code in each section. */
59936 - unsigned int init_text_size, core_text_size;
59937 -
59938 - /* Size of RO sections of the module (text+rodata) */
59939 - unsigned int init_ro_size, core_ro_size;
59940 + unsigned int init_size_rx, core_size_rx;
59941
59942 /* Arch-specific module values */
59943 struct mod_arch_specific arch;
59944 @@ -329,6 +327,10 @@ struct module
59945 #ifdef CONFIG_EVENT_TRACING
59946 struct ftrace_event_call **trace_events;
59947 unsigned int num_trace_events;
59948 + struct file_operations trace_id;
59949 + struct file_operations trace_enable;
59950 + struct file_operations trace_format;
59951 + struct file_operations trace_filter;
59952 #endif
59953 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
59954 unsigned int num_ftrace_callsites;
59955 @@ -379,16 +381,46 @@ bool is_module_address(unsigned long addr);
59956 bool is_module_percpu_address(unsigned long addr);
59957 bool is_module_text_address(unsigned long addr);
59958
59959 +static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
59960 +{
59961 +
59962 +#ifdef CONFIG_PAX_KERNEXEC
59963 + if (ktla_ktva(addr) >= (unsigned long)start &&
59964 + ktla_ktva(addr) < (unsigned long)start + size)
59965 + return 1;
59966 +#endif
59967 +
59968 + return ((void *)addr >= start && (void *)addr < start + size);
59969 +}
59970 +
59971 +static inline int within_module_core_rx(unsigned long addr, struct module *mod)
59972 +{
59973 + return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
59974 +}
59975 +
59976 +static inline int within_module_core_rw(unsigned long addr, struct module *mod)
59977 +{
59978 + return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
59979 +}
59980 +
59981 +static inline int within_module_init_rx(unsigned long addr, struct module *mod)
59982 +{
59983 + return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
59984 +}
59985 +
59986 +static inline int within_module_init_rw(unsigned long addr, struct module *mod)
59987 +{
59988 + return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
59989 +}
59990 +
59991 static inline int within_module_core(unsigned long addr, struct module *mod)
59992 {
59993 - return (unsigned long)mod->module_core <= addr &&
59994 - addr < (unsigned long)mod->module_core + mod->core_size;
59995 + return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
59996 }
59997
59998 static inline int within_module_init(unsigned long addr, struct module *mod)
59999 {
60000 - return (unsigned long)mod->module_init <= addr &&
60001 - addr < (unsigned long)mod->module_init + mod->init_size;
60002 + return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
60003 }
60004
60005 /* Search for module by name: must hold module_mutex. */
60006 diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
60007 index b2be02e..6a9fdb1 100644
60008 --- a/include/linux/moduleloader.h
60009 +++ b/include/linux/moduleloader.h
60010 @@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
60011 sections. Returns NULL on failure. */
60012 void *module_alloc(unsigned long size);
60013
60014 +#ifdef CONFIG_PAX_KERNEXEC
60015 +void *module_alloc_exec(unsigned long size);
60016 +#else
60017 +#define module_alloc_exec(x) module_alloc(x)
60018 +#endif
60019 +
60020 /* Free memory returned from module_alloc. */
60021 void module_free(struct module *mod, void *module_region);
60022
60023 +#ifdef CONFIG_PAX_KERNEXEC
60024 +void module_free_exec(struct module *mod, void *module_region);
60025 +#else
60026 +#define module_free_exec(x, y) module_free((x), (y))
60027 +#endif
60028 +
60029 /* Apply the given relocation to the (simplified) ELF. Return -error
60030 or 0. */
60031 int apply_relocate(Elf_Shdr *sechdrs,
60032 diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
60033 index 7939f63..ec6df57 100644
60034 --- a/include/linux/moduleparam.h
60035 +++ b/include/linux/moduleparam.h
60036 @@ -260,7 +260,7 @@ static inline void __kernel_param_unlock(void)
60037 * @len is usually just sizeof(string).
60038 */
60039 #define module_param_string(name, string, len, perm) \
60040 - static const struct kparam_string __param_string_##name \
60041 + static const struct kparam_string __param_string_##name __used \
60042 = { len, string }; \
60043 __module_param_call(MODULE_PARAM_PREFIX, name, \
60044 &param_ops_string, \
60045 @@ -395,7 +395,7 @@ extern int param_get_invbool(char *buffer, const struct kernel_param *kp);
60046 * module_param_named() for why this might be necessary.
60047 */
60048 #define module_param_array_named(name, array, type, nump, perm) \
60049 - static const struct kparam_array __param_arr_##name \
60050 + static const struct kparam_array __param_arr_##name __used \
60051 = { .max = ARRAY_SIZE(array), .num = nump, \
60052 .ops = &param_ops_##type, \
60053 .elemsize = sizeof(array[0]), .elem = array }; \
60054 diff --git a/include/linux/namei.h b/include/linux/namei.h
60055 index ffc0213..2c1f2cb 100644
60056 --- a/include/linux/namei.h
60057 +++ b/include/linux/namei.h
60058 @@ -24,7 +24,7 @@ struct nameidata {
60059 unsigned seq;
60060 int last_type;
60061 unsigned depth;
60062 - char *saved_names[MAX_NESTED_LINKS + 1];
60063 + const char *saved_names[MAX_NESTED_LINKS + 1];
60064
60065 /* Intent data */
60066 union {
60067 @@ -94,12 +94,12 @@ extern int follow_up(struct path *);
60068 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
60069 extern void unlock_rename(struct dentry *, struct dentry *);
60070
60071 -static inline void nd_set_link(struct nameidata *nd, char *path)
60072 +static inline void nd_set_link(struct nameidata *nd, const char *path)
60073 {
60074 nd->saved_names[nd->depth] = path;
60075 }
60076
60077 -static inline char *nd_get_link(struct nameidata *nd)
60078 +static inline const char *nd_get_link(const struct nameidata *nd)
60079 {
60080 return nd->saved_names[nd->depth];
60081 }
60082 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
60083 index a82ad4d..90d15b7 100644
60084 --- a/include/linux/netdevice.h
60085 +++ b/include/linux/netdevice.h
60086 @@ -949,6 +949,7 @@ struct net_device_ops {
60087 int (*ndo_set_features)(struct net_device *dev,
60088 u32 features);
60089 };
60090 +typedef struct net_device_ops __no_const net_device_ops_no_const;
60091
60092 /*
60093 * The DEVICE structure.
60094 @@ -1088,7 +1089,7 @@ struct net_device {
60095 int iflink;
60096
60097 struct net_device_stats stats;
60098 - atomic_long_t rx_dropped; /* dropped packets by core network
60099 + atomic_long_unchecked_t rx_dropped; /* dropped packets by core network
60100 * Do not use this in drivers.
60101 */
60102
60103 diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
60104 new file mode 100644
60105 index 0000000..33f4af8
60106 --- /dev/null
60107 +++ b/include/linux/netfilter/xt_gradm.h
60108 @@ -0,0 +1,9 @@
60109 +#ifndef _LINUX_NETFILTER_XT_GRADM_H
60110 +#define _LINUX_NETFILTER_XT_GRADM_H 1
60111 +
60112 +struct xt_gradm_mtinfo {
60113 + __u16 flags;
60114 + __u16 invflags;
60115 +};
60116 +
60117 +#endif
60118 diff --git a/include/linux/of_pdt.h b/include/linux/of_pdt.h
60119 index c65a18a..0c05f3a 100644
60120 --- a/include/linux/of_pdt.h
60121 +++ b/include/linux/of_pdt.h
60122 @@ -32,7 +32,7 @@ struct of_pdt_ops {
60123
60124 /* return 0 on success; fill in 'len' with number of bytes in path */
60125 int (*pkg2path)(phandle node, char *buf, const int buflen, int *len);
60126 -};
60127 +} __no_const;
60128
60129 extern void *prom_early_alloc(unsigned long size);
60130
60131 diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
60132 index a4c5624..79d6d88 100644
60133 --- a/include/linux/oprofile.h
60134 +++ b/include/linux/oprofile.h
60135 @@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
60136 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
60137 char const * name, ulong * val);
60138
60139 -/** Create a file for read-only access to an atomic_t. */
60140 +/** Create a file for read-only access to an atomic_unchecked_t. */
60141 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
60142 - char const * name, atomic_t * val);
60143 + char const * name, atomic_unchecked_t * val);
60144
60145 /** create a directory */
60146 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
60147 diff --git a/include/linux/padata.h b/include/linux/padata.h
60148 index 4633b2f..988bc08 100644
60149 --- a/include/linux/padata.h
60150 +++ b/include/linux/padata.h
60151 @@ -129,7 +129,7 @@ struct parallel_data {
60152 struct padata_instance *pinst;
60153 struct padata_parallel_queue __percpu *pqueue;
60154 struct padata_serial_queue __percpu *squeue;
60155 - atomic_t seq_nr;
60156 + atomic_unchecked_t seq_nr;
60157 atomic_t reorder_objects;
60158 atomic_t refcnt;
60159 unsigned int max_seq_nr;
60160 diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
60161 index b1f8912..c955bff 100644
60162 --- a/include/linux/perf_event.h
60163 +++ b/include/linux/perf_event.h
60164 @@ -748,8 +748,8 @@ struct perf_event {
60165
60166 enum perf_event_active_state state;
60167 unsigned int attach_state;
60168 - local64_t count;
60169 - atomic64_t child_count;
60170 + local64_t count; /* PaX: fix it one day */
60171 + atomic64_unchecked_t child_count;
60172
60173 /*
60174 * These are the total time in nanoseconds that the event
60175 @@ -800,8 +800,8 @@ struct perf_event {
60176 * These accumulate total time (in nanoseconds) that children
60177 * events have been enabled and running, respectively.
60178 */
60179 - atomic64_t child_total_time_enabled;
60180 - atomic64_t child_total_time_running;
60181 + atomic64_unchecked_t child_total_time_enabled;
60182 + atomic64_unchecked_t child_total_time_running;
60183
60184 /*
60185 * Protect attach/detach and child_list:
60186 diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
60187 index 77257c9..51d473a 100644
60188 --- a/include/linux/pipe_fs_i.h
60189 +++ b/include/linux/pipe_fs_i.h
60190 @@ -46,9 +46,9 @@ struct pipe_buffer {
60191 struct pipe_inode_info {
60192 wait_queue_head_t wait;
60193 unsigned int nrbufs, curbuf, buffers;
60194 - unsigned int readers;
60195 - unsigned int writers;
60196 - unsigned int waiting_writers;
60197 + atomic_t readers;
60198 + atomic_t writers;
60199 + atomic_t waiting_writers;
60200 unsigned int r_counter;
60201 unsigned int w_counter;
60202 struct page *tmp_page;
60203 diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
60204 index d3085e7..fd01052 100644
60205 --- a/include/linux/pm_runtime.h
60206 +++ b/include/linux/pm_runtime.h
60207 @@ -95,7 +95,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
60208
60209 static inline void pm_runtime_mark_last_busy(struct device *dev)
60210 {
60211 - ACCESS_ONCE(dev->power.last_busy) = jiffies;
60212 + ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
60213 }
60214
60215 #else /* !CONFIG_PM_RUNTIME */
60216 diff --git a/include/linux/poison.h b/include/linux/poison.h
60217 index 79159de..f1233a9 100644
60218 --- a/include/linux/poison.h
60219 +++ b/include/linux/poison.h
60220 @@ -19,8 +19,8 @@
60221 * under normal circumstances, used to verify that nobody uses
60222 * non-initialized list entries.
60223 */
60224 -#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
60225 -#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
60226 +#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
60227 +#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
60228
60229 /********** include/linux/timer.h **********/
60230 /*
60231 diff --git a/include/linux/preempt.h b/include/linux/preempt.h
60232 index 58969b2..ead129b 100644
60233 --- a/include/linux/preempt.h
60234 +++ b/include/linux/preempt.h
60235 @@ -123,7 +123,7 @@ struct preempt_ops {
60236 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
60237 void (*sched_out)(struct preempt_notifier *notifier,
60238 struct task_struct *next);
60239 -};
60240 +} __no_const;
60241
60242 /**
60243 * preempt_notifier - key for installing preemption notifiers
60244 diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
60245 index 643b96c..ef55a9c 100644
60246 --- a/include/linux/proc_fs.h
60247 +++ b/include/linux/proc_fs.h
60248 @@ -155,6 +155,19 @@ static inline struct proc_dir_entry *proc_create(const char *name, mode_t mode,
60249 return proc_create_data(name, mode, parent, proc_fops, NULL);
60250 }
60251
60252 +static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
60253 + struct proc_dir_entry *parent, const struct file_operations *proc_fops)
60254 +{
60255 +#ifdef CONFIG_GRKERNSEC_PROC_USER
60256 + return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
60257 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60258 + return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
60259 +#else
60260 + return proc_create_data(name, mode, parent, proc_fops, NULL);
60261 +#endif
60262 +}
60263 +
60264 +
60265 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
60266 mode_t mode, struct proc_dir_entry *base,
60267 read_proc_t *read_proc, void * data)
60268 @@ -258,7 +271,7 @@ union proc_op {
60269 int (*proc_show)(struct seq_file *m,
60270 struct pid_namespace *ns, struct pid *pid,
60271 struct task_struct *task);
60272 -};
60273 +} __no_const;
60274
60275 struct ctl_table_header;
60276 struct ctl_table;
60277 diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
60278 index 800f113..e9ee2e3 100644
60279 --- a/include/linux/ptrace.h
60280 +++ b/include/linux/ptrace.h
60281 @@ -129,10 +129,12 @@ extern void __ptrace_unlink(struct task_struct *child);
60282 extern void exit_ptrace(struct task_struct *tracer);
60283 #define PTRACE_MODE_READ 1
60284 #define PTRACE_MODE_ATTACH 2
60285 -/* Returns 0 on success, -errno on denial. */
60286 -extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
60287 /* Returns true on success, false on denial. */
60288 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
60289 +/* Returns true on success, false on denial. */
60290 +extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
60291 +/* Returns true on success, false on denial. */
60292 +extern bool ptrace_may_access_nolock(struct task_struct *task, unsigned int mode);
60293
60294 static inline int ptrace_reparented(struct task_struct *child)
60295 {
60296 diff --git a/include/linux/random.h b/include/linux/random.h
60297 index 8f74538..02a1012 100644
60298 --- a/include/linux/random.h
60299 +++ b/include/linux/random.h
60300 @@ -69,12 +69,17 @@ void srandom32(u32 seed);
60301
60302 u32 prandom32(struct rnd_state *);
60303
60304 +static inline unsigned long pax_get_random_long(void)
60305 +{
60306 + return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
60307 +}
60308 +
60309 /*
60310 * Handle minimum values for seeds
60311 */
60312 static inline u32 __seed(u32 x, u32 m)
60313 {
60314 - return (x < m) ? x + m : x;
60315 + return (x <= m) ? x + m + 1 : x;
60316 }
60317
60318 /**
60319 diff --git a/include/linux/reboot.h b/include/linux/reboot.h
60320 index e0879a7..a12f962 100644
60321 --- a/include/linux/reboot.h
60322 +++ b/include/linux/reboot.h
60323 @@ -52,9 +52,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
60324 * Architecture-specific implementations of sys_reboot commands.
60325 */
60326
60327 -extern void machine_restart(char *cmd);
60328 -extern void machine_halt(void);
60329 -extern void machine_power_off(void);
60330 +extern void machine_restart(char *cmd) __noreturn;
60331 +extern void machine_halt(void) __noreturn;
60332 +extern void machine_power_off(void) __noreturn;
60333
60334 extern void machine_shutdown(void);
60335 struct pt_regs;
60336 @@ -65,9 +65,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
60337 */
60338
60339 extern void kernel_restart_prepare(char *cmd);
60340 -extern void kernel_restart(char *cmd);
60341 -extern void kernel_halt(void);
60342 -extern void kernel_power_off(void);
60343 +extern void kernel_restart(char *cmd) __noreturn;
60344 +extern void kernel_halt(void) __noreturn;
60345 +extern void kernel_power_off(void) __noreturn;
60346
60347 extern int C_A_D; /* for sysctl */
60348 void ctrl_alt_del(void);
60349 @@ -81,7 +81,7 @@ extern int orderly_poweroff(bool force);
60350 * Emergency restart, callable from an interrupt handler.
60351 */
60352
60353 -extern void emergency_restart(void);
60354 +extern void emergency_restart(void) __noreturn;
60355 #include <asm/emergency-restart.h>
60356
60357 #endif
60358 diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
60359 index 96d465f..b084e05 100644
60360 --- a/include/linux/reiserfs_fs.h
60361 +++ b/include/linux/reiserfs_fs.h
60362 @@ -1406,7 +1406,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
60363 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
60364
60365 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
60366 -#define get_generation(s) atomic_read (&fs_generation(s))
60367 +#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
60368 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
60369 #define __fs_changed(gen,s) (gen != get_generation (s))
60370 #define fs_changed(gen,s) \
60371 diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h
60372 index 52c83b6..18ed7eb 100644
60373 --- a/include/linux/reiserfs_fs_sb.h
60374 +++ b/include/linux/reiserfs_fs_sb.h
60375 @@ -386,7 +386,7 @@ struct reiserfs_sb_info {
60376 /* Comment? -Hans */
60377 wait_queue_head_t s_wait;
60378 /* To be obsoleted soon by per buffer seals.. -Hans */
60379 - atomic_t s_generation_counter; // increased by one every time the
60380 + atomic_unchecked_t s_generation_counter; // increased by one every time the
60381 // tree gets re-balanced
60382 unsigned long s_properties; /* File system properties. Currently holds
60383 on-disk FS format */
60384 diff --git a/include/linux/relay.h b/include/linux/relay.h
60385 index 14a86bc..17d0700 100644
60386 --- a/include/linux/relay.h
60387 +++ b/include/linux/relay.h
60388 @@ -159,7 +159,7 @@ struct rchan_callbacks
60389 * The callback should return 0 if successful, negative if not.
60390 */
60391 int (*remove_buf_file)(struct dentry *dentry);
60392 -};
60393 +} __no_const;
60394
60395 /*
60396 * CONFIG_RELAY kernel API, kernel/relay.c
60397 diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
60398 index c6c6084..5bf1212 100644
60399 --- a/include/linux/rfkill.h
60400 +++ b/include/linux/rfkill.h
60401 @@ -147,6 +147,7 @@ struct rfkill_ops {
60402 void (*query)(struct rfkill *rfkill, void *data);
60403 int (*set_block)(void *data, bool blocked);
60404 };
60405 +typedef struct rfkill_ops __no_const rfkill_ops_no_const;
60406
60407 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
60408 /**
60409 diff --git a/include/linux/rio.h b/include/linux/rio.h
60410 index 4d50611..c6858a2 100644
60411 --- a/include/linux/rio.h
60412 +++ b/include/linux/rio.h
60413 @@ -315,7 +315,7 @@ struct rio_ops {
60414 int mbox, void *buffer, size_t len);
60415 int (*add_inb_buffer)(struct rio_mport *mport, int mbox, void *buf);
60416 void *(*get_inb_message)(struct rio_mport *mport, int mbox);
60417 -};
60418 +} __no_const;
60419
60420 #define RIO_RESOURCE_MEM 0x00000100
60421 #define RIO_RESOURCE_DOORBELL 0x00000200
60422 diff --git a/include/linux/rmap.h b/include/linux/rmap.h
60423 index 2148b12..519b820 100644
60424 --- a/include/linux/rmap.h
60425 +++ b/include/linux/rmap.h
60426 @@ -119,8 +119,8 @@ static inline void anon_vma_unlock(struct anon_vma *anon_vma)
60427 void anon_vma_init(void); /* create anon_vma_cachep */
60428 int anon_vma_prepare(struct vm_area_struct *);
60429 void unlink_anon_vmas(struct vm_area_struct *);
60430 -int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
60431 -int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
60432 +int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
60433 +int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
60434 void __anon_vma_link(struct vm_area_struct *);
60435
60436 static inline void anon_vma_merge(struct vm_area_struct *vma,
60437 diff --git a/include/linux/sched.h b/include/linux/sched.h
60438 index 1c4f3e9..c5b241a 100644
60439 --- a/include/linux/sched.h
60440 +++ b/include/linux/sched.h
60441 @@ -101,6 +101,7 @@ struct bio_list;
60442 struct fs_struct;
60443 struct perf_event_context;
60444 struct blk_plug;
60445 +struct linux_binprm;
60446
60447 /*
60448 * List of flags we want to share for kernel threads,
60449 @@ -380,10 +381,13 @@ struct user_namespace;
60450 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
60451
60452 extern int sysctl_max_map_count;
60453 +extern unsigned long sysctl_heap_stack_gap;
60454
60455 #include <linux/aio.h>
60456
60457 #ifdef CONFIG_MMU
60458 +extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
60459 +extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
60460 extern void arch_pick_mmap_layout(struct mm_struct *mm);
60461 extern unsigned long
60462 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
60463 @@ -629,6 +633,17 @@ struct signal_struct {
60464 #ifdef CONFIG_TASKSTATS
60465 struct taskstats *stats;
60466 #endif
60467 +
60468 +#ifdef CONFIG_GRKERNSEC
60469 + u32 curr_ip;
60470 + u32 saved_ip;
60471 + u32 gr_saddr;
60472 + u32 gr_daddr;
60473 + u16 gr_sport;
60474 + u16 gr_dport;
60475 + u8 used_accept:1;
60476 +#endif
60477 +
60478 #ifdef CONFIG_AUDIT
60479 unsigned audit_tty;
60480 struct tty_audit_buf *tty_audit_buf;
60481 @@ -710,6 +725,11 @@ struct user_struct {
60482 struct key *session_keyring; /* UID's default session keyring */
60483 #endif
60484
60485 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
60486 + unsigned int banned;
60487 + unsigned long ban_expires;
60488 +#endif
60489 +
60490 /* Hash table maintenance information */
60491 struct hlist_node uidhash_node;
60492 uid_t uid;
60493 @@ -1337,8 +1357,8 @@ struct task_struct {
60494 struct list_head thread_group;
60495
60496 struct completion *vfork_done; /* for vfork() */
60497 - int __user *set_child_tid; /* CLONE_CHILD_SETTID */
60498 - int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
60499 + pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
60500 + pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
60501
60502 cputime_t utime, stime, utimescaled, stimescaled;
60503 cputime_t gtime;
60504 @@ -1354,13 +1374,6 @@ struct task_struct {
60505 struct task_cputime cputime_expires;
60506 struct list_head cpu_timers[3];
60507
60508 -/* process credentials */
60509 - const struct cred __rcu *real_cred; /* objective and real subjective task
60510 - * credentials (COW) */
60511 - const struct cred __rcu *cred; /* effective (overridable) subjective task
60512 - * credentials (COW) */
60513 - struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
60514 -
60515 char comm[TASK_COMM_LEN]; /* executable name excluding path
60516 - access with [gs]et_task_comm (which lock
60517 it with task_lock())
60518 @@ -1377,8 +1390,16 @@ struct task_struct {
60519 #endif
60520 /* CPU-specific state of this task */
60521 struct thread_struct thread;
60522 +/* thread_info moved to task_struct */
60523 +#ifdef CONFIG_X86
60524 + struct thread_info tinfo;
60525 +#endif
60526 /* filesystem information */
60527 struct fs_struct *fs;
60528 +
60529 + const struct cred __rcu *cred; /* effective (overridable) subjective task
60530 + * credentials (COW) */
60531 +
60532 /* open file information */
60533 struct files_struct *files;
60534 /* namespaces */
60535 @@ -1425,6 +1446,11 @@ struct task_struct {
60536 struct rt_mutex_waiter *pi_blocked_on;
60537 #endif
60538
60539 +/* process credentials */
60540 + const struct cred __rcu *real_cred; /* objective and real subjective task
60541 + * credentials (COW) */
60542 + struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
60543 +
60544 #ifdef CONFIG_DEBUG_MUTEXES
60545 /* mutex deadlock detection */
60546 struct mutex_waiter *blocked_on;
60547 @@ -1540,6 +1566,24 @@ struct task_struct {
60548 unsigned long default_timer_slack_ns;
60549
60550 struct list_head *scm_work_list;
60551 +
60552 +#ifdef CONFIG_GRKERNSEC
60553 + /* grsecurity */
60554 +#ifdef CONFIG_GRKERNSEC_SETXID
60555 + const struct cred *delayed_cred;
60556 +#endif
60557 + struct dentry *gr_chroot_dentry;
60558 + struct acl_subject_label *acl;
60559 + struct acl_role_label *role;
60560 + struct file *exec_file;
60561 + u16 acl_role_id;
60562 + /* is this the task that authenticated to the special role */
60563 + u8 acl_sp_role;
60564 + u8 is_writable;
60565 + u8 brute;
60566 + u8 gr_is_chrooted;
60567 +#endif
60568 +
60569 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
60570 /* Index of current stored address in ret_stack */
60571 int curr_ret_stack;
60572 @@ -1574,6 +1618,51 @@ struct task_struct {
60573 #endif
60574 };
60575
60576 +#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
60577 +#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
60578 +#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
60579 +#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
60580 +/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
60581 +#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
60582 +
60583 +#ifdef CONFIG_PAX_SOFTMODE
60584 +extern int pax_softmode;
60585 +#endif
60586 +
60587 +extern int pax_check_flags(unsigned long *);
60588 +
60589 +/* if tsk != current then task_lock must be held on it */
60590 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
60591 +static inline unsigned long pax_get_flags(struct task_struct *tsk)
60592 +{
60593 + if (likely(tsk->mm))
60594 + return tsk->mm->pax_flags;
60595 + else
60596 + return 0UL;
60597 +}
60598 +
60599 +/* if tsk != current then task_lock must be held on it */
60600 +static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
60601 +{
60602 + if (likely(tsk->mm)) {
60603 + tsk->mm->pax_flags = flags;
60604 + return 0;
60605 + }
60606 + return -EINVAL;
60607 +}
60608 +#endif
60609 +
60610 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
60611 +extern void pax_set_initial_flags(struct linux_binprm *bprm);
60612 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
60613 +extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
60614 +#endif
60615 +
60616 +extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
60617 +extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
60618 +extern void pax_report_refcount_overflow(struct pt_regs *regs);
60619 +extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
60620 +
60621 /* Future-safe accessor for struct task_struct's cpus_allowed. */
60622 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
60623
60624 @@ -2081,7 +2170,9 @@ void yield(void);
60625 extern struct exec_domain default_exec_domain;
60626
60627 union thread_union {
60628 +#ifndef CONFIG_X86
60629 struct thread_info thread_info;
60630 +#endif
60631 unsigned long stack[THREAD_SIZE/sizeof(long)];
60632 };
60633
60634 @@ -2114,6 +2205,7 @@ extern struct pid_namespace init_pid_ns;
60635 */
60636
60637 extern struct task_struct *find_task_by_vpid(pid_t nr);
60638 +extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
60639 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
60640 struct pid_namespace *ns);
60641
60642 @@ -2251,7 +2343,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
60643 extern void exit_itimers(struct signal_struct *);
60644 extern void flush_itimer_signals(void);
60645
60646 -extern NORET_TYPE void do_group_exit(int);
60647 +extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
60648
60649 extern void daemonize(const char *, ...);
60650 extern int allow_signal(int);
60651 @@ -2416,13 +2508,17 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
60652
60653 #endif
60654
60655 -static inline int object_is_on_stack(void *obj)
60656 +static inline int object_starts_on_stack(void *obj)
60657 {
60658 - void *stack = task_stack_page(current);
60659 + const void *stack = task_stack_page(current);
60660
60661 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
60662 }
60663
60664 +#ifdef CONFIG_PAX_USERCOPY
60665 +extern int object_is_on_stack(const void *obj, unsigned long len);
60666 +#endif
60667 +
60668 extern void thread_info_cache_init(void);
60669
60670 #ifdef CONFIG_DEBUG_STACK_USAGE
60671 diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
60672 index 899fbb4..1cb4138 100644
60673 --- a/include/linux/screen_info.h
60674 +++ b/include/linux/screen_info.h
60675 @@ -43,7 +43,8 @@ struct screen_info {
60676 __u16 pages; /* 0x32 */
60677 __u16 vesa_attributes; /* 0x34 */
60678 __u32 capabilities; /* 0x36 */
60679 - __u8 _reserved[6]; /* 0x3a */
60680 + __u16 vesapm_size; /* 0x3a */
60681 + __u8 _reserved[4]; /* 0x3c */
60682 } __attribute__((packed));
60683
60684 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
60685 diff --git a/include/linux/security.h b/include/linux/security.h
60686 index e8c619d..e0cbd1c 100644
60687 --- a/include/linux/security.h
60688 +++ b/include/linux/security.h
60689 @@ -37,6 +37,7 @@
60690 #include <linux/xfrm.h>
60691 #include <linux/slab.h>
60692 #include <linux/xattr.h>
60693 +#include <linux/grsecurity.h>
60694 #include <net/flow.h>
60695
60696 /* Maximum number of letters for an LSM name string */
60697 diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
60698 index 0b69a46..e9e5538 100644
60699 --- a/include/linux/seq_file.h
60700 +++ b/include/linux/seq_file.h
60701 @@ -33,6 +33,7 @@ struct seq_operations {
60702 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
60703 int (*show) (struct seq_file *m, void *v);
60704 };
60705 +typedef struct seq_operations __no_const seq_operations_no_const;
60706
60707 #define SEQ_SKIP 1
60708
60709 diff --git a/include/linux/shm.h b/include/linux/shm.h
60710 index 92808b8..c28cac4 100644
60711 --- a/include/linux/shm.h
60712 +++ b/include/linux/shm.h
60713 @@ -98,6 +98,10 @@ struct shmid_kernel /* private to the kernel */
60714
60715 /* The task created the shm object. NULL if the task is dead. */
60716 struct task_struct *shm_creator;
60717 +#ifdef CONFIG_GRKERNSEC
60718 + time_t shm_createtime;
60719 + pid_t shm_lapid;
60720 +#endif
60721 };
60722
60723 /* shm_mode upper byte flags */
60724 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
60725 index fe86488..1563c1c 100644
60726 --- a/include/linux/skbuff.h
60727 +++ b/include/linux/skbuff.h
60728 @@ -642,7 +642,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
60729 */
60730 static inline int skb_queue_empty(const struct sk_buff_head *list)
60731 {
60732 - return list->next == (struct sk_buff *)list;
60733 + return list->next == (const struct sk_buff *)list;
60734 }
60735
60736 /**
60737 @@ -655,7 +655,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
60738 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
60739 const struct sk_buff *skb)
60740 {
60741 - return skb->next == (struct sk_buff *)list;
60742 + return skb->next == (const struct sk_buff *)list;
60743 }
60744
60745 /**
60746 @@ -668,7 +668,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
60747 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
60748 const struct sk_buff *skb)
60749 {
60750 - return skb->prev == (struct sk_buff *)list;
60751 + return skb->prev == (const struct sk_buff *)list;
60752 }
60753
60754 /**
60755 @@ -1523,7 +1523,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
60756 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
60757 */
60758 #ifndef NET_SKB_PAD
60759 -#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
60760 +#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
60761 #endif
60762
60763 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
60764 diff --git a/include/linux/slab.h b/include/linux/slab.h
60765 index 573c809..e84c132 100644
60766 --- a/include/linux/slab.h
60767 +++ b/include/linux/slab.h
60768 @@ -11,12 +11,20 @@
60769
60770 #include <linux/gfp.h>
60771 #include <linux/types.h>
60772 +#include <linux/err.h>
60773
60774 /*
60775 * Flags to pass to kmem_cache_create().
60776 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
60777 */
60778 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
60779 +
60780 +#ifdef CONFIG_PAX_USERCOPY
60781 +#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
60782 +#else
60783 +#define SLAB_USERCOPY 0x00000000UL
60784 +#endif
60785 +
60786 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
60787 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
60788 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
60789 @@ -87,10 +95,13 @@
60790 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
60791 * Both make kfree a no-op.
60792 */
60793 -#define ZERO_SIZE_PTR ((void *)16)
60794 +#define ZERO_SIZE_PTR \
60795 +({ \
60796 + BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
60797 + (void *)(-MAX_ERRNO-1L); \
60798 +})
60799
60800 -#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
60801 - (unsigned long)ZERO_SIZE_PTR)
60802 +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
60803
60804 /*
60805 * struct kmem_cache related prototypes
60806 @@ -161,6 +172,7 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
60807 void kfree(const void *);
60808 void kzfree(const void *);
60809 size_t ksize(const void *);
60810 +void check_object_size(const void *ptr, unsigned long n, bool to);
60811
60812 /*
60813 * Allocator specific definitions. These are mainly used to establish optimized
60814 @@ -353,4 +365,59 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
60815
60816 void __init kmem_cache_init_late(void);
60817
60818 +#define kmalloc(x, y) \
60819 +({ \
60820 + void *___retval; \
60821 + intoverflow_t ___x = (intoverflow_t)x; \
60822 + if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n")) \
60823 + ___retval = NULL; \
60824 + else \
60825 + ___retval = kmalloc((size_t)___x, (y)); \
60826 + ___retval; \
60827 +})
60828 +
60829 +#define kmalloc_node(x, y, z) \
60830 +({ \
60831 + void *___retval; \
60832 + intoverflow_t ___x = (intoverflow_t)x; \
60833 + if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
60834 + ___retval = NULL; \
60835 + else \
60836 + ___retval = kmalloc_node((size_t)___x, (y), (z));\
60837 + ___retval; \
60838 +})
60839 +
60840 +#define kzalloc(x, y) \
60841 +({ \
60842 + void *___retval; \
60843 + intoverflow_t ___x = (intoverflow_t)x; \
60844 + if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n")) \
60845 + ___retval = NULL; \
60846 + else \
60847 + ___retval = kzalloc((size_t)___x, (y)); \
60848 + ___retval; \
60849 +})
60850 +
60851 +#define __krealloc(x, y, z) \
60852 +({ \
60853 + void *___retval; \
60854 + intoverflow_t ___y = (intoverflow_t)y; \
60855 + if (WARN(___y > ULONG_MAX, "__krealloc size overflow\n"))\
60856 + ___retval = NULL; \
60857 + else \
60858 + ___retval = __krealloc((x), (size_t)___y, (z)); \
60859 + ___retval; \
60860 +})
60861 +
60862 +#define krealloc(x, y, z) \
60863 +({ \
60864 + void *___retval; \
60865 + intoverflow_t ___y = (intoverflow_t)y; \
60866 + if (WARN(___y > ULONG_MAX, "krealloc size overflow\n")) \
60867 + ___retval = NULL; \
60868 + else \
60869 + ___retval = krealloc((x), (size_t)___y, (z)); \
60870 + ___retval; \
60871 +})
60872 +
60873 #endif /* _LINUX_SLAB_H */
60874 diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
60875 index d00e0ba..1b3bf7b 100644
60876 --- a/include/linux/slab_def.h
60877 +++ b/include/linux/slab_def.h
60878 @@ -68,10 +68,10 @@ struct kmem_cache {
60879 unsigned long node_allocs;
60880 unsigned long node_frees;
60881 unsigned long node_overflow;
60882 - atomic_t allochit;
60883 - atomic_t allocmiss;
60884 - atomic_t freehit;
60885 - atomic_t freemiss;
60886 + atomic_unchecked_t allochit;
60887 + atomic_unchecked_t allocmiss;
60888 + atomic_unchecked_t freehit;
60889 + atomic_unchecked_t freemiss;
60890
60891 /*
60892 * If debugging is enabled, then the allocator can add additional
60893 diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
60894 index a32bcfd..53b71f4 100644
60895 --- a/include/linux/slub_def.h
60896 +++ b/include/linux/slub_def.h
60897 @@ -89,7 +89,7 @@ struct kmem_cache {
60898 struct kmem_cache_order_objects max;
60899 struct kmem_cache_order_objects min;
60900 gfp_t allocflags; /* gfp flags to use on each alloc */
60901 - int refcount; /* Refcount for slab cache destroy */
60902 + atomic_t refcount; /* Refcount for slab cache destroy */
60903 void (*ctor)(void *);
60904 int inuse; /* Offset to metadata */
60905 int align; /* Alignment */
60906 @@ -215,7 +215,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
60907 }
60908
60909 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
60910 -void *__kmalloc(size_t size, gfp_t flags);
60911 +void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
60912
60913 static __always_inline void *
60914 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
60915 diff --git a/include/linux/sonet.h b/include/linux/sonet.h
60916 index de8832d..0147b46 100644
60917 --- a/include/linux/sonet.h
60918 +++ b/include/linux/sonet.h
60919 @@ -61,7 +61,7 @@ struct sonet_stats {
60920 #include <linux/atomic.h>
60921
60922 struct k_sonet_stats {
60923 -#define __HANDLE_ITEM(i) atomic_t i
60924 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
60925 __SONET_ITEMS
60926 #undef __HANDLE_ITEM
60927 };
60928 diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
60929 index 3d8f9c4..69f1c0a 100644
60930 --- a/include/linux/sunrpc/clnt.h
60931 +++ b/include/linux/sunrpc/clnt.h
60932 @@ -172,9 +172,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
60933 {
60934 switch (sap->sa_family) {
60935 case AF_INET:
60936 - return ntohs(((struct sockaddr_in *)sap)->sin_port);
60937 + return ntohs(((const struct sockaddr_in *)sap)->sin_port);
60938 case AF_INET6:
60939 - return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
60940 + return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
60941 }
60942 return 0;
60943 }
60944 @@ -207,7 +207,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
60945 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
60946 const struct sockaddr *src)
60947 {
60948 - const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
60949 + const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
60950 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
60951
60952 dsin->sin_family = ssin->sin_family;
60953 @@ -310,7 +310,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
60954 if (sa->sa_family != AF_INET6)
60955 return 0;
60956
60957 - return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
60958 + return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
60959 }
60960
60961 #endif /* __KERNEL__ */
60962 diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
60963 index e775689..9e206d9 100644
60964 --- a/include/linux/sunrpc/sched.h
60965 +++ b/include/linux/sunrpc/sched.h
60966 @@ -105,6 +105,7 @@ struct rpc_call_ops {
60967 void (*rpc_call_done)(struct rpc_task *, void *);
60968 void (*rpc_release)(void *);
60969 };
60970 +typedef struct rpc_call_ops __no_const rpc_call_ops_no_const;
60971
60972 struct rpc_task_setup {
60973 struct rpc_task *task;
60974 diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
60975 index c14fe86..393245e 100644
60976 --- a/include/linux/sunrpc/svc_rdma.h
60977 +++ b/include/linux/sunrpc/svc_rdma.h
60978 @@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
60979 extern unsigned int svcrdma_max_requests;
60980 extern unsigned int svcrdma_max_req_size;
60981
60982 -extern atomic_t rdma_stat_recv;
60983 -extern atomic_t rdma_stat_read;
60984 -extern atomic_t rdma_stat_write;
60985 -extern atomic_t rdma_stat_sq_starve;
60986 -extern atomic_t rdma_stat_rq_starve;
60987 -extern atomic_t rdma_stat_rq_poll;
60988 -extern atomic_t rdma_stat_rq_prod;
60989 -extern atomic_t rdma_stat_sq_poll;
60990 -extern atomic_t rdma_stat_sq_prod;
60991 +extern atomic_unchecked_t rdma_stat_recv;
60992 +extern atomic_unchecked_t rdma_stat_read;
60993 +extern atomic_unchecked_t rdma_stat_write;
60994 +extern atomic_unchecked_t rdma_stat_sq_starve;
60995 +extern atomic_unchecked_t rdma_stat_rq_starve;
60996 +extern atomic_unchecked_t rdma_stat_rq_poll;
60997 +extern atomic_unchecked_t rdma_stat_rq_prod;
60998 +extern atomic_unchecked_t rdma_stat_sq_poll;
60999 +extern atomic_unchecked_t rdma_stat_sq_prod;
61000
61001 #define RPCRDMA_VERSION 1
61002
61003 diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
61004 index 703cfa3..0b8ca72ac 100644
61005 --- a/include/linux/sysctl.h
61006 +++ b/include/linux/sysctl.h
61007 @@ -155,7 +155,11 @@ enum
61008 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
61009 };
61010
61011 -
61012 +#ifdef CONFIG_PAX_SOFTMODE
61013 +enum {
61014 + PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
61015 +};
61016 +#endif
61017
61018 /* CTL_VM names: */
61019 enum
61020 @@ -968,6 +972,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
61021
61022 extern int proc_dostring(struct ctl_table *, int,
61023 void __user *, size_t *, loff_t *);
61024 +extern int proc_dostring_modpriv(struct ctl_table *, int,
61025 + void __user *, size_t *, loff_t *);
61026 extern int proc_dointvec(struct ctl_table *, int,
61027 void __user *, size_t *, loff_t *);
61028 extern int proc_dointvec_minmax(struct ctl_table *, int,
61029 diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
61030 index ff7dc08..893e1bd 100644
61031 --- a/include/linux/tty_ldisc.h
61032 +++ b/include/linux/tty_ldisc.h
61033 @@ -148,7 +148,7 @@ struct tty_ldisc_ops {
61034
61035 struct module *owner;
61036
61037 - int refcount;
61038 + atomic_t refcount;
61039 };
61040
61041 struct tty_ldisc {
61042 diff --git a/include/linux/types.h b/include/linux/types.h
61043 index 57a9723..dbe234a 100644
61044 --- a/include/linux/types.h
61045 +++ b/include/linux/types.h
61046 @@ -213,10 +213,26 @@ typedef struct {
61047 int counter;
61048 } atomic_t;
61049
61050 +#ifdef CONFIG_PAX_REFCOUNT
61051 +typedef struct {
61052 + int counter;
61053 +} atomic_unchecked_t;
61054 +#else
61055 +typedef atomic_t atomic_unchecked_t;
61056 +#endif
61057 +
61058 #ifdef CONFIG_64BIT
61059 typedef struct {
61060 long counter;
61061 } atomic64_t;
61062 +
61063 +#ifdef CONFIG_PAX_REFCOUNT
61064 +typedef struct {
61065 + long counter;
61066 +} atomic64_unchecked_t;
61067 +#else
61068 +typedef atomic64_t atomic64_unchecked_t;
61069 +#endif
61070 #endif
61071
61072 struct list_head {
61073 diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
61074 index 5ca0951..ab496a5 100644
61075 --- a/include/linux/uaccess.h
61076 +++ b/include/linux/uaccess.h
61077 @@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
61078 long ret; \
61079 mm_segment_t old_fs = get_fs(); \
61080 \
61081 - set_fs(KERNEL_DS); \
61082 pagefault_disable(); \
61083 - ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
61084 - pagefault_enable(); \
61085 + set_fs(KERNEL_DS); \
61086 + ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
61087 set_fs(old_fs); \
61088 + pagefault_enable(); \
61089 ret; \
61090 })
61091
61092 diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
61093 index 99c1b4d..bb94261 100644
61094 --- a/include/linux/unaligned/access_ok.h
61095 +++ b/include/linux/unaligned/access_ok.h
61096 @@ -6,32 +6,32 @@
61097
61098 static inline u16 get_unaligned_le16(const void *p)
61099 {
61100 - return le16_to_cpup((__le16 *)p);
61101 + return le16_to_cpup((const __le16 *)p);
61102 }
61103
61104 static inline u32 get_unaligned_le32(const void *p)
61105 {
61106 - return le32_to_cpup((__le32 *)p);
61107 + return le32_to_cpup((const __le32 *)p);
61108 }
61109
61110 static inline u64 get_unaligned_le64(const void *p)
61111 {
61112 - return le64_to_cpup((__le64 *)p);
61113 + return le64_to_cpup((const __le64 *)p);
61114 }
61115
61116 static inline u16 get_unaligned_be16(const void *p)
61117 {
61118 - return be16_to_cpup((__be16 *)p);
61119 + return be16_to_cpup((const __be16 *)p);
61120 }
61121
61122 static inline u32 get_unaligned_be32(const void *p)
61123 {
61124 - return be32_to_cpup((__be32 *)p);
61125 + return be32_to_cpup((const __be32 *)p);
61126 }
61127
61128 static inline u64 get_unaligned_be64(const void *p)
61129 {
61130 - return be64_to_cpup((__be64 *)p);
61131 + return be64_to_cpup((const __be64 *)p);
61132 }
61133
61134 static inline void put_unaligned_le16(u16 val, void *p)
61135 diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
61136 index e5a40c3..20ab0f6 100644
61137 --- a/include/linux/usb/renesas_usbhs.h
61138 +++ b/include/linux/usb/renesas_usbhs.h
61139 @@ -39,7 +39,7 @@ enum {
61140 */
61141 struct renesas_usbhs_driver_callback {
61142 int (*notify_hotplug)(struct platform_device *pdev);
61143 -};
61144 +} __no_const;
61145
61146 /*
61147 * callback functions for platform
61148 @@ -89,7 +89,7 @@ struct renesas_usbhs_platform_callback {
61149 * VBUS control is needed for Host
61150 */
61151 int (*set_vbus)(struct platform_device *pdev, int enable);
61152 -};
61153 +} __no_const;
61154
61155 /*
61156 * parameters for renesas usbhs
61157 diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
61158 index 6f8fbcf..8259001 100644
61159 --- a/include/linux/vermagic.h
61160 +++ b/include/linux/vermagic.h
61161 @@ -25,9 +25,35 @@
61162 #define MODULE_ARCH_VERMAGIC ""
61163 #endif
61164
61165 +#ifdef CONFIG_PAX_REFCOUNT
61166 +#define MODULE_PAX_REFCOUNT "REFCOUNT "
61167 +#else
61168 +#define MODULE_PAX_REFCOUNT ""
61169 +#endif
61170 +
61171 +#ifdef CONSTIFY_PLUGIN
61172 +#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
61173 +#else
61174 +#define MODULE_CONSTIFY_PLUGIN ""
61175 +#endif
61176 +
61177 +#ifdef STACKLEAK_PLUGIN
61178 +#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
61179 +#else
61180 +#define MODULE_STACKLEAK_PLUGIN ""
61181 +#endif
61182 +
61183 +#ifdef CONFIG_GRKERNSEC
61184 +#define MODULE_GRSEC "GRSEC "
61185 +#else
61186 +#define MODULE_GRSEC ""
61187 +#endif
61188 +
61189 #define VERMAGIC_STRING \
61190 UTS_RELEASE " " \
61191 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
61192 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
61193 - MODULE_ARCH_VERMAGIC
61194 + MODULE_ARCH_VERMAGIC \
61195 + MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
61196 + MODULE_GRSEC
61197
61198 diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
61199 index 4bde182..aec92c1 100644
61200 --- a/include/linux/vmalloc.h
61201 +++ b/include/linux/vmalloc.h
61202 @@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
61203 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
61204 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
61205 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
61206 +
61207 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
61208 +#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
61209 +#endif
61210 +
61211 /* bits [20..32] reserved for arch specific ioremap internals */
61212
61213 /*
61214 @@ -156,4 +161,103 @@ pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
61215 # endif
61216 #endif
61217
61218 +#define vmalloc(x) \
61219 +({ \
61220 + void *___retval; \
61221 + intoverflow_t ___x = (intoverflow_t)x; \
61222 + if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
61223 + ___retval = NULL; \
61224 + else \
61225 + ___retval = vmalloc((unsigned long)___x); \
61226 + ___retval; \
61227 +})
61228 +
61229 +#define vzalloc(x) \
61230 +({ \
61231 + void *___retval; \
61232 + intoverflow_t ___x = (intoverflow_t)x; \
61233 + if (WARN(___x > ULONG_MAX, "vzalloc size overflow\n")) \
61234 + ___retval = NULL; \
61235 + else \
61236 + ___retval = vzalloc((unsigned long)___x); \
61237 + ___retval; \
61238 +})
61239 +
61240 +#define __vmalloc(x, y, z) \
61241 +({ \
61242 + void *___retval; \
61243 + intoverflow_t ___x = (intoverflow_t)x; \
61244 + if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
61245 + ___retval = NULL; \
61246 + else \
61247 + ___retval = __vmalloc((unsigned long)___x, (y), (z));\
61248 + ___retval; \
61249 +})
61250 +
61251 +#define vmalloc_user(x) \
61252 +({ \
61253 + void *___retval; \
61254 + intoverflow_t ___x = (intoverflow_t)x; \
61255 + if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
61256 + ___retval = NULL; \
61257 + else \
61258 + ___retval = vmalloc_user((unsigned long)___x); \
61259 + ___retval; \
61260 +})
61261 +
61262 +#define vmalloc_exec(x) \
61263 +({ \
61264 + void *___retval; \
61265 + intoverflow_t ___x = (intoverflow_t)x; \
61266 + if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
61267 + ___retval = NULL; \
61268 + else \
61269 + ___retval = vmalloc_exec((unsigned long)___x); \
61270 + ___retval; \
61271 +})
61272 +
61273 +#define vmalloc_node(x, y) \
61274 +({ \
61275 + void *___retval; \
61276 + intoverflow_t ___x = (intoverflow_t)x; \
61277 + if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
61278 + ___retval = NULL; \
61279 + else \
61280 + ___retval = vmalloc_node((unsigned long)___x, (y));\
61281 + ___retval; \
61282 +})
61283 +
61284 +#define vzalloc_node(x, y) \
61285 +({ \
61286 + void *___retval; \
61287 + intoverflow_t ___x = (intoverflow_t)x; \
61288 + if (WARN(___x > ULONG_MAX, "vzalloc_node size overflow\n"))\
61289 + ___retval = NULL; \
61290 + else \
61291 + ___retval = vzalloc_node((unsigned long)___x, (y));\
61292 + ___retval; \
61293 +})
61294 +
61295 +#define vmalloc_32(x) \
61296 +({ \
61297 + void *___retval; \
61298 + intoverflow_t ___x = (intoverflow_t)x; \
61299 + if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
61300 + ___retval = NULL; \
61301 + else \
61302 + ___retval = vmalloc_32((unsigned long)___x); \
61303 + ___retval; \
61304 +})
61305 +
61306 +#define vmalloc_32_user(x) \
61307 +({ \
61308 +void *___retval; \
61309 + intoverflow_t ___x = (intoverflow_t)x; \
61310 + if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
61311 + ___retval = NULL; \
61312 + else \
61313 + ___retval = vmalloc_32_user((unsigned long)___x);\
61314 + ___retval; \
61315 +})
61316 +
61317 #endif /* _LINUX_VMALLOC_H */
61318 diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
61319 index 65efb92..137adbb 100644
61320 --- a/include/linux/vmstat.h
61321 +++ b/include/linux/vmstat.h
61322 @@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(int cpu)
61323 /*
61324 * Zone based page accounting with per cpu differentials.
61325 */
61326 -extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
61327 +extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
61328
61329 static inline void zone_page_state_add(long x, struct zone *zone,
61330 enum zone_stat_item item)
61331 {
61332 - atomic_long_add(x, &zone->vm_stat[item]);
61333 - atomic_long_add(x, &vm_stat[item]);
61334 + atomic_long_add_unchecked(x, &zone->vm_stat[item]);
61335 + atomic_long_add_unchecked(x, &vm_stat[item]);
61336 }
61337
61338 static inline unsigned long global_page_state(enum zone_stat_item item)
61339 {
61340 - long x = atomic_long_read(&vm_stat[item]);
61341 + long x = atomic_long_read_unchecked(&vm_stat[item]);
61342 #ifdef CONFIG_SMP
61343 if (x < 0)
61344 x = 0;
61345 @@ -109,7 +109,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
61346 static inline unsigned long zone_page_state(struct zone *zone,
61347 enum zone_stat_item item)
61348 {
61349 - long x = atomic_long_read(&zone->vm_stat[item]);
61350 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
61351 #ifdef CONFIG_SMP
61352 if (x < 0)
61353 x = 0;
61354 @@ -126,7 +126,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
61355 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
61356 enum zone_stat_item item)
61357 {
61358 - long x = atomic_long_read(&zone->vm_stat[item]);
61359 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
61360
61361 #ifdef CONFIG_SMP
61362 int cpu;
61363 @@ -221,8 +221,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
61364
61365 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
61366 {
61367 - atomic_long_inc(&zone->vm_stat[item]);
61368 - atomic_long_inc(&vm_stat[item]);
61369 + atomic_long_inc_unchecked(&zone->vm_stat[item]);
61370 + atomic_long_inc_unchecked(&vm_stat[item]);
61371 }
61372
61373 static inline void __inc_zone_page_state(struct page *page,
61374 @@ -233,8 +233,8 @@ static inline void __inc_zone_page_state(struct page *page,
61375
61376 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
61377 {
61378 - atomic_long_dec(&zone->vm_stat[item]);
61379 - atomic_long_dec(&vm_stat[item]);
61380 + atomic_long_dec_unchecked(&zone->vm_stat[item]);
61381 + atomic_long_dec_unchecked(&vm_stat[item]);
61382 }
61383
61384 static inline void __dec_zone_page_state(struct page *page,
61385 diff --git a/include/linux/xattr.h b/include/linux/xattr.h
61386 index e5d1220..ef6e406 100644
61387 --- a/include/linux/xattr.h
61388 +++ b/include/linux/xattr.h
61389 @@ -57,6 +57,11 @@
61390 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
61391 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
61392
61393 +/* User namespace */
61394 +#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
61395 +#define XATTR_PAX_FLAGS_SUFFIX "flags"
61396 +#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
61397 +
61398 #ifdef __KERNEL__
61399
61400 #include <linux/types.h>
61401 diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
61402 index 4aeff96..b378cdc 100644
61403 --- a/include/media/saa7146_vv.h
61404 +++ b/include/media/saa7146_vv.h
61405 @@ -163,7 +163,7 @@ struct saa7146_ext_vv
61406 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
61407
61408 /* the extension can override this */
61409 - struct v4l2_ioctl_ops ops;
61410 + v4l2_ioctl_ops_no_const ops;
61411 /* pointer to the saa7146 core ops */
61412 const struct v4l2_ioctl_ops *core_ops;
61413
61414 diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
61415 index c7c40f1..4f01585 100644
61416 --- a/include/media/v4l2-dev.h
61417 +++ b/include/media/v4l2-dev.h
61418 @@ -56,7 +56,7 @@ int v4l2_prio_check(struct v4l2_prio_state *global, enum v4l2_priority local);
61419
61420
61421 struct v4l2_file_operations {
61422 - struct module *owner;
61423 + struct module * const owner;
61424 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
61425 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
61426 unsigned int (*poll) (struct file *, struct poll_table_struct *);
61427 @@ -68,6 +68,7 @@ struct v4l2_file_operations {
61428 int (*open) (struct file *);
61429 int (*release) (struct file *);
61430 };
61431 +typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
61432
61433 /*
61434 * Newer version of video_device, handled by videodev2.c
61435 diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
61436 index 4d1c74a..65e1221 100644
61437 --- a/include/media/v4l2-ioctl.h
61438 +++ b/include/media/v4l2-ioctl.h
61439 @@ -274,7 +274,7 @@ struct v4l2_ioctl_ops {
61440 long (*vidioc_default) (struct file *file, void *fh,
61441 bool valid_prio, int cmd, void *arg);
61442 };
61443 -
61444 +typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
61445
61446 /* v4l debugging and diagnostics */
61447
61448 diff --git a/include/net/caif/caif_hsi.h b/include/net/caif/caif_hsi.h
61449 index 8d55251..dfe5b0a 100644
61450 --- a/include/net/caif/caif_hsi.h
61451 +++ b/include/net/caif/caif_hsi.h
61452 @@ -98,7 +98,7 @@ struct cfhsi_drv {
61453 void (*rx_done_cb) (struct cfhsi_drv *drv);
61454 void (*wake_up_cb) (struct cfhsi_drv *drv);
61455 void (*wake_down_cb) (struct cfhsi_drv *drv);
61456 -};
61457 +} __no_const;
61458
61459 /* Structure implemented by HSI device. */
61460 struct cfhsi_dev {
61461 diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
61462 index 9e5425b..8136ffc 100644
61463 --- a/include/net/caif/cfctrl.h
61464 +++ b/include/net/caif/cfctrl.h
61465 @@ -52,7 +52,7 @@ struct cfctrl_rsp {
61466 void (*radioset_rsp)(void);
61467 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
61468 struct cflayer *client_layer);
61469 -};
61470 +} __no_const;
61471
61472 /* Link Setup Parameters for CAIF-Links. */
61473 struct cfctrl_link_param {
61474 @@ -101,8 +101,8 @@ struct cfctrl_request_info {
61475 struct cfctrl {
61476 struct cfsrvl serv;
61477 struct cfctrl_rsp res;
61478 - atomic_t req_seq_no;
61479 - atomic_t rsp_seq_no;
61480 + atomic_unchecked_t req_seq_no;
61481 + atomic_unchecked_t rsp_seq_no;
61482 struct list_head list;
61483 /* Protects from simultaneous access to first_req list */
61484 spinlock_t info_list_lock;
61485 diff --git a/include/net/flow.h b/include/net/flow.h
61486 index 57f15a7..0de26c6 100644
61487 --- a/include/net/flow.h
61488 +++ b/include/net/flow.h
61489 @@ -208,6 +208,6 @@ extern struct flow_cache_object *flow_cache_lookup(
61490
61491 extern void flow_cache_flush(void);
61492 extern void flow_cache_flush_deferred(void);
61493 -extern atomic_t flow_cache_genid;
61494 +extern atomic_unchecked_t flow_cache_genid;
61495
61496 #endif
61497 diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
61498 index e9ff3fc..9d3e5c7 100644
61499 --- a/include/net/inetpeer.h
61500 +++ b/include/net/inetpeer.h
61501 @@ -48,8 +48,8 @@ struct inet_peer {
61502 */
61503 union {
61504 struct {
61505 - atomic_t rid; /* Frag reception counter */
61506 - atomic_t ip_id_count; /* IP ID for the next packet */
61507 + atomic_unchecked_t rid; /* Frag reception counter */
61508 + atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
61509 __u32 tcp_ts;
61510 __u32 tcp_ts_stamp;
61511 };
61512 @@ -113,11 +113,11 @@ static inline int inet_getid(struct inet_peer *p, int more)
61513 more++;
61514 inet_peer_refcheck(p);
61515 do {
61516 - old = atomic_read(&p->ip_id_count);
61517 + old = atomic_read_unchecked(&p->ip_id_count);
61518 new = old + more;
61519 if (!new)
61520 new = 1;
61521 - } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
61522 + } while (atomic_cmpxchg_unchecked(&p->ip_id_count, old, new) != old);
61523 return new;
61524 }
61525
61526 diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
61527 index 10422ef..662570f 100644
61528 --- a/include/net/ip_fib.h
61529 +++ b/include/net/ip_fib.h
61530 @@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
61531
61532 #define FIB_RES_SADDR(net, res) \
61533 ((FIB_RES_NH(res).nh_saddr_genid == \
61534 - atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
61535 + atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
61536 FIB_RES_NH(res).nh_saddr : \
61537 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
61538 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
61539 diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
61540 index e5a7b9a..f4fc44b 100644
61541 --- a/include/net/ip_vs.h
61542 +++ b/include/net/ip_vs.h
61543 @@ -509,7 +509,7 @@ struct ip_vs_conn {
61544 struct ip_vs_conn *control; /* Master control connection */
61545 atomic_t n_control; /* Number of controlled ones */
61546 struct ip_vs_dest *dest; /* real server */
61547 - atomic_t in_pkts; /* incoming packet counter */
61548 + atomic_unchecked_t in_pkts; /* incoming packet counter */
61549
61550 /* packet transmitter for different forwarding methods. If it
61551 mangles the packet, it must return NF_DROP or better NF_STOLEN,
61552 @@ -647,7 +647,7 @@ struct ip_vs_dest {
61553 __be16 port; /* port number of the server */
61554 union nf_inet_addr addr; /* IP address of the server */
61555 volatile unsigned flags; /* dest status flags */
61556 - atomic_t conn_flags; /* flags to copy to conn */
61557 + atomic_unchecked_t conn_flags; /* flags to copy to conn */
61558 atomic_t weight; /* server weight */
61559
61560 atomic_t refcnt; /* reference counter */
61561 diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h
61562 index 69b610a..fe3962c 100644
61563 --- a/include/net/irda/ircomm_core.h
61564 +++ b/include/net/irda/ircomm_core.h
61565 @@ -51,7 +51,7 @@ typedef struct {
61566 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
61567 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
61568 struct ircomm_info *);
61569 -} call_t;
61570 +} __no_const call_t;
61571
61572 struct ircomm_cb {
61573 irda_queue_t queue;
61574 diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
61575 index 59ba38bc..d515662 100644
61576 --- a/include/net/irda/ircomm_tty.h
61577 +++ b/include/net/irda/ircomm_tty.h
61578 @@ -35,6 +35,7 @@
61579 #include <linux/termios.h>
61580 #include <linux/timer.h>
61581 #include <linux/tty.h> /* struct tty_struct */
61582 +#include <asm/local.h>
61583
61584 #include <net/irda/irias_object.h>
61585 #include <net/irda/ircomm_core.h>
61586 @@ -105,8 +106,8 @@ struct ircomm_tty_cb {
61587 unsigned short close_delay;
61588 unsigned short closing_wait; /* time to wait before closing */
61589
61590 - int open_count;
61591 - int blocked_open; /* # of blocked opens */
61592 + local_t open_count;
61593 + local_t blocked_open; /* # of blocked opens */
61594
61595 /* Protect concurent access to :
61596 * o self->open_count
61597 diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
61598 index f2419cf..473679f 100644
61599 --- a/include/net/iucv/af_iucv.h
61600 +++ b/include/net/iucv/af_iucv.h
61601 @@ -139,7 +139,7 @@ struct iucv_sock {
61602 struct iucv_sock_list {
61603 struct hlist_head head;
61604 rwlock_t lock;
61605 - atomic_t autobind_name;
61606 + atomic_unchecked_t autobind_name;
61607 };
61608
61609 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
61610 diff --git a/include/net/neighbour.h b/include/net/neighbour.h
61611 index 2720884..3aa5c25 100644
61612 --- a/include/net/neighbour.h
61613 +++ b/include/net/neighbour.h
61614 @@ -122,7 +122,7 @@ struct neigh_ops {
61615 void (*error_report)(struct neighbour *, struct sk_buff *);
61616 int (*output)(struct neighbour *, struct sk_buff *);
61617 int (*connected_output)(struct neighbour *, struct sk_buff *);
61618 -};
61619 +} __do_const;
61620
61621 struct pneigh_entry {
61622 struct pneigh_entry *next;
61623 diff --git a/include/net/netlink.h b/include/net/netlink.h
61624 index cb1f350..3279d2c 100644
61625 --- a/include/net/netlink.h
61626 +++ b/include/net/netlink.h
61627 @@ -569,7 +569,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
61628 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
61629 {
61630 if (mark)
61631 - skb_trim(skb, (unsigned char *) mark - skb->data);
61632 + skb_trim(skb, (const unsigned char *) mark - skb->data);
61633 }
61634
61635 /**
61636 diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
61637 index d786b4f..4c3dd41 100644
61638 --- a/include/net/netns/ipv4.h
61639 +++ b/include/net/netns/ipv4.h
61640 @@ -56,8 +56,8 @@ struct netns_ipv4 {
61641
61642 unsigned int sysctl_ping_group_range[2];
61643
61644 - atomic_t rt_genid;
61645 - atomic_t dev_addr_genid;
61646 + atomic_unchecked_t rt_genid;
61647 + atomic_unchecked_t dev_addr_genid;
61648
61649 #ifdef CONFIG_IP_MROUTE
61650 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
61651 diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
61652 index 6a72a58..e6a127d 100644
61653 --- a/include/net/sctp/sctp.h
61654 +++ b/include/net/sctp/sctp.h
61655 @@ -318,9 +318,9 @@ do { \
61656
61657 #else /* SCTP_DEBUG */
61658
61659 -#define SCTP_DEBUG_PRINTK(whatever...)
61660 -#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
61661 -#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
61662 +#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
61663 +#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
61664 +#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
61665 #define SCTP_ENABLE_DEBUG
61666 #define SCTP_DISABLE_DEBUG
61667 #define SCTP_ASSERT(expr, str, func)
61668 diff --git a/include/net/sock.h b/include/net/sock.h
61669 index 32e3937..87a1dbc 100644
61670 --- a/include/net/sock.h
61671 +++ b/include/net/sock.h
61672 @@ -277,7 +277,7 @@ struct sock {
61673 #ifdef CONFIG_RPS
61674 __u32 sk_rxhash;
61675 #endif
61676 - atomic_t sk_drops;
61677 + atomic_unchecked_t sk_drops;
61678 int sk_rcvbuf;
61679
61680 struct sk_filter __rcu *sk_filter;
61681 @@ -1402,7 +1402,7 @@ static inline void sk_nocaps_add(struct sock *sk, int flags)
61682 }
61683
61684 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
61685 - char __user *from, char *to,
61686 + char __user *from, unsigned char *to,
61687 int copy, int offset)
61688 {
61689 if (skb->ip_summed == CHECKSUM_NONE) {
61690 diff --git a/include/net/tcp.h b/include/net/tcp.h
61691 index bb18c4d..bb87972 100644
61692 --- a/include/net/tcp.h
61693 +++ b/include/net/tcp.h
61694 @@ -1409,7 +1409,7 @@ struct tcp_seq_afinfo {
61695 char *name;
61696 sa_family_t family;
61697 const struct file_operations *seq_fops;
61698 - struct seq_operations seq_ops;
61699 + seq_operations_no_const seq_ops;
61700 };
61701
61702 struct tcp_iter_state {
61703 diff --git a/include/net/udp.h b/include/net/udp.h
61704 index 3b285f4..0219639 100644
61705 --- a/include/net/udp.h
61706 +++ b/include/net/udp.h
61707 @@ -237,7 +237,7 @@ struct udp_seq_afinfo {
61708 sa_family_t family;
61709 struct udp_table *udp_table;
61710 const struct file_operations *seq_fops;
61711 - struct seq_operations seq_ops;
61712 + seq_operations_no_const seq_ops;
61713 };
61714
61715 struct udp_iter_state {
61716 diff --git a/include/net/xfrm.h b/include/net/xfrm.h
61717 index b203e14..1df3991 100644
61718 --- a/include/net/xfrm.h
61719 +++ b/include/net/xfrm.h
61720 @@ -505,7 +505,7 @@ struct xfrm_policy {
61721 struct timer_list timer;
61722
61723 struct flow_cache_object flo;
61724 - atomic_t genid;
61725 + atomic_unchecked_t genid;
61726 u32 priority;
61727 u32 index;
61728 struct xfrm_mark mark;
61729 diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
61730 index 1a046b1..ee0bef0 100644
61731 --- a/include/rdma/iw_cm.h
61732 +++ b/include/rdma/iw_cm.h
61733 @@ -122,7 +122,7 @@ struct iw_cm_verbs {
61734 int backlog);
61735
61736 int (*destroy_listen)(struct iw_cm_id *cm_id);
61737 -};
61738 +} __no_const;
61739
61740 /**
61741 * iw_create_cm_id - Create an IW CM identifier.
61742 diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
61743 index 5d1a758..1dbf795 100644
61744 --- a/include/scsi/libfc.h
61745 +++ b/include/scsi/libfc.h
61746 @@ -748,6 +748,7 @@ struct libfc_function_template {
61747 */
61748 void (*disc_stop_final) (struct fc_lport *);
61749 };
61750 +typedef struct libfc_function_template __no_const libfc_function_template_no_const;
61751
61752 /**
61753 * struct fc_disc - Discovery context
61754 @@ -851,7 +852,7 @@ struct fc_lport {
61755 struct fc_vport *vport;
61756
61757 /* Operational Information */
61758 - struct libfc_function_template tt;
61759 + libfc_function_template_no_const tt;
61760 u8 link_up;
61761 u8 qfull;
61762 enum fc_lport_state state;
61763 diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
61764 index 5591ed5..13eb457 100644
61765 --- a/include/scsi/scsi_device.h
61766 +++ b/include/scsi/scsi_device.h
61767 @@ -161,9 +161,9 @@ struct scsi_device {
61768 unsigned int max_device_blocked; /* what device_blocked counts down from */
61769 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
61770
61771 - atomic_t iorequest_cnt;
61772 - atomic_t iodone_cnt;
61773 - atomic_t ioerr_cnt;
61774 + atomic_unchecked_t iorequest_cnt;
61775 + atomic_unchecked_t iodone_cnt;
61776 + atomic_unchecked_t ioerr_cnt;
61777
61778 struct device sdev_gendev,
61779 sdev_dev;
61780 diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
61781 index 2a65167..91e01f8 100644
61782 --- a/include/scsi/scsi_transport_fc.h
61783 +++ b/include/scsi/scsi_transport_fc.h
61784 @@ -711,7 +711,7 @@ struct fc_function_template {
61785 unsigned long show_host_system_hostname:1;
61786
61787 unsigned long disable_target_scan:1;
61788 -};
61789 +} __do_const;
61790
61791
61792 /**
61793 diff --git a/include/sound/ak4xxx-adda.h b/include/sound/ak4xxx-adda.h
61794 index 030b87c..98a6954 100644
61795 --- a/include/sound/ak4xxx-adda.h
61796 +++ b/include/sound/ak4xxx-adda.h
61797 @@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
61798 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
61799 unsigned char val);
61800 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
61801 -};
61802 +} __no_const;
61803
61804 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
61805
61806 diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
61807 index 8c05e47..2b5df97 100644
61808 --- a/include/sound/hwdep.h
61809 +++ b/include/sound/hwdep.h
61810 @@ -49,7 +49,7 @@ struct snd_hwdep_ops {
61811 struct snd_hwdep_dsp_status *status);
61812 int (*dsp_load)(struct snd_hwdep *hw,
61813 struct snd_hwdep_dsp_image *image);
61814 -};
61815 +} __no_const;
61816
61817 struct snd_hwdep {
61818 struct snd_card *card;
61819 diff --git a/include/sound/info.h b/include/sound/info.h
61820 index 5492cc4..1a65278 100644
61821 --- a/include/sound/info.h
61822 +++ b/include/sound/info.h
61823 @@ -44,7 +44,7 @@ struct snd_info_entry_text {
61824 struct snd_info_buffer *buffer);
61825 void (*write)(struct snd_info_entry *entry,
61826 struct snd_info_buffer *buffer);
61827 -};
61828 +} __no_const;
61829
61830 struct snd_info_entry_ops {
61831 int (*open)(struct snd_info_entry *entry,
61832 diff --git a/include/sound/pcm.h b/include/sound/pcm.h
61833 index 0cf91b2..b70cae4 100644
61834 --- a/include/sound/pcm.h
61835 +++ b/include/sound/pcm.h
61836 @@ -81,6 +81,7 @@ struct snd_pcm_ops {
61837 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
61838 int (*ack)(struct snd_pcm_substream *substream);
61839 };
61840 +typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
61841
61842 /*
61843 *
61844 diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h
61845 index af1b49e..a5d55a5 100644
61846 --- a/include/sound/sb16_csp.h
61847 +++ b/include/sound/sb16_csp.h
61848 @@ -146,7 +146,7 @@ struct snd_sb_csp_ops {
61849 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
61850 int (*csp_stop) (struct snd_sb_csp * p);
61851 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
61852 -};
61853 +} __no_const;
61854
61855 /*
61856 * CSP private data
61857 diff --git a/include/sound/soc.h b/include/sound/soc.h
61858 index 11cfb59..e3f93f4 100644
61859 --- a/include/sound/soc.h
61860 +++ b/include/sound/soc.h
61861 @@ -683,7 +683,7 @@ struct snd_soc_platform_driver {
61862 /* platform IO - used for platform DAPM */
61863 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
61864 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
61865 -};
61866 +} __do_const;
61867
61868 struct snd_soc_platform {
61869 const char *name;
61870 diff --git a/include/sound/ymfpci.h b/include/sound/ymfpci.h
61871 index 444cd6b..3327cc5 100644
61872 --- a/include/sound/ymfpci.h
61873 +++ b/include/sound/ymfpci.h
61874 @@ -358,7 +358,7 @@ struct snd_ymfpci {
61875 spinlock_t reg_lock;
61876 spinlock_t voice_lock;
61877 wait_queue_head_t interrupt_sleep;
61878 - atomic_t interrupt_sleep_count;
61879 + atomic_unchecked_t interrupt_sleep_count;
61880 struct snd_info_entry *proc_entry;
61881 const struct firmware *dsp_microcode;
61882 const struct firmware *controller_microcode;
61883 diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
61884 index a79886c..b483af6 100644
61885 --- a/include/target/target_core_base.h
61886 +++ b/include/target/target_core_base.h
61887 @@ -346,7 +346,7 @@ struct t10_reservation_ops {
61888 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
61889 int (*t10_pr_register)(struct se_cmd *);
61890 int (*t10_pr_clear)(struct se_cmd *);
61891 -};
61892 +} __no_const;
61893
61894 struct t10_reservation {
61895 /* Reservation effects all target ports */
61896 @@ -465,8 +465,8 @@ struct se_cmd {
61897 atomic_t t_se_count;
61898 atomic_t t_task_cdbs_left;
61899 atomic_t t_task_cdbs_ex_left;
61900 - atomic_t t_task_cdbs_sent;
61901 - atomic_t t_transport_aborted;
61902 + atomic_unchecked_t t_task_cdbs_sent;
61903 + atomic_unchecked_t t_transport_aborted;
61904 atomic_t t_transport_active;
61905 atomic_t t_transport_complete;
61906 atomic_t t_transport_queue_active;
61907 @@ -704,7 +704,7 @@ struct se_device {
61908 /* Active commands on this virtual SE device */
61909 atomic_t simple_cmds;
61910 atomic_t depth_left;
61911 - atomic_t dev_ordered_id;
61912 + atomic_unchecked_t dev_ordered_id;
61913 atomic_t execute_tasks;
61914 atomic_t dev_ordered_sync;
61915 atomic_t dev_qf_count;
61916 diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
61917 index 1c09820..7f5ec79 100644
61918 --- a/include/trace/events/irq.h
61919 +++ b/include/trace/events/irq.h
61920 @@ -36,7 +36,7 @@ struct softirq_action;
61921 */
61922 TRACE_EVENT(irq_handler_entry,
61923
61924 - TP_PROTO(int irq, struct irqaction *action),
61925 + TP_PROTO(int irq, const struct irqaction *action),
61926
61927 TP_ARGS(irq, action),
61928
61929 @@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
61930 */
61931 TRACE_EVENT(irq_handler_exit,
61932
61933 - TP_PROTO(int irq, struct irqaction *action, int ret),
61934 + TP_PROTO(int irq, const struct irqaction *action, int ret),
61935
61936 TP_ARGS(irq, action, ret),
61937
61938 diff --git a/include/video/udlfb.h b/include/video/udlfb.h
61939 index c41f308..6918de3 100644
61940 --- a/include/video/udlfb.h
61941 +++ b/include/video/udlfb.h
61942 @@ -52,10 +52,10 @@ struct dlfb_data {
61943 u32 pseudo_palette[256];
61944 int blank_mode; /*one of FB_BLANK_ */
61945 /* blit-only rendering path metrics, exposed through sysfs */
61946 - atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
61947 - atomic_t bytes_identical; /* saved effort with backbuffer comparison */
61948 - atomic_t bytes_sent; /* to usb, after compression including overhead */
61949 - atomic_t cpu_kcycles_used; /* transpired during pixel processing */
61950 + atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
61951 + atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
61952 + atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
61953 + atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
61954 };
61955
61956 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
61957 diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
61958 index 0993a22..32ba2fe 100644
61959 --- a/include/video/uvesafb.h
61960 +++ b/include/video/uvesafb.h
61961 @@ -177,6 +177,7 @@ struct uvesafb_par {
61962 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
61963 u8 pmi_setpal; /* PMI for palette changes */
61964 u16 *pmi_base; /* protected mode interface location */
61965 + u8 *pmi_code; /* protected mode code location */
61966 void *pmi_start;
61967 void *pmi_pal;
61968 u8 *vbe_state_orig; /*
61969 diff --git a/init/Kconfig b/init/Kconfig
61970 index 43298f9..2f56c12 100644
61971 --- a/init/Kconfig
61972 +++ b/init/Kconfig
61973 @@ -1214,7 +1214,7 @@ config SLUB_DEBUG
61974
61975 config COMPAT_BRK
61976 bool "Disable heap randomization"
61977 - default y
61978 + default n
61979 help
61980 Randomizing heap placement makes heap exploits harder, but it
61981 also breaks ancient binaries (including anything libc5 based).
61982 diff --git a/init/do_mounts.c b/init/do_mounts.c
61983 index db6e5ee..7677ff7 100644
61984 --- a/init/do_mounts.c
61985 +++ b/init/do_mounts.c
61986 @@ -325,11 +325,11 @@ static void __init get_fs_names(char *page)
61987
61988 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
61989 {
61990 - int err = sys_mount(name, "/root", fs, flags, data);
61991 + int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
61992 if (err)
61993 return err;
61994
61995 - sys_chdir((const char __user __force *)"/root");
61996 + sys_chdir((const char __force_user*)"/root");
61997 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
61998 printk(KERN_INFO
61999 "VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
62000 @@ -448,18 +448,18 @@ void __init change_floppy(char *fmt, ...)
62001 va_start(args, fmt);
62002 vsprintf(buf, fmt, args);
62003 va_end(args);
62004 - fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
62005 + fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
62006 if (fd >= 0) {
62007 sys_ioctl(fd, FDEJECT, 0);
62008 sys_close(fd);
62009 }
62010 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
62011 - fd = sys_open("/dev/console", O_RDWR, 0);
62012 + fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
62013 if (fd >= 0) {
62014 sys_ioctl(fd, TCGETS, (long)&termios);
62015 termios.c_lflag &= ~ICANON;
62016 sys_ioctl(fd, TCSETSF, (long)&termios);
62017 - sys_read(fd, &c, 1);
62018 + sys_read(fd, (char __user *)&c, 1);
62019 termios.c_lflag |= ICANON;
62020 sys_ioctl(fd, TCSETSF, (long)&termios);
62021 sys_close(fd);
62022 @@ -553,6 +553,6 @@ void __init prepare_namespace(void)
62023 mount_root();
62024 out:
62025 devtmpfs_mount("dev");
62026 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
62027 - sys_chroot((const char __user __force *)".");
62028 + sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
62029 + sys_chroot((const char __force_user *)".");
62030 }
62031 diff --git a/init/do_mounts.h b/init/do_mounts.h
62032 index f5b978a..69dbfe8 100644
62033 --- a/init/do_mounts.h
62034 +++ b/init/do_mounts.h
62035 @@ -15,15 +15,15 @@ extern int root_mountflags;
62036
62037 static inline int create_dev(char *name, dev_t dev)
62038 {
62039 - sys_unlink(name);
62040 - return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
62041 + sys_unlink((char __force_user *)name);
62042 + return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
62043 }
62044
62045 #if BITS_PER_LONG == 32
62046 static inline u32 bstat(char *name)
62047 {
62048 struct stat64 stat;
62049 - if (sys_stat64(name, &stat) != 0)
62050 + if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
62051 return 0;
62052 if (!S_ISBLK(stat.st_mode))
62053 return 0;
62054 @@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
62055 static inline u32 bstat(char *name)
62056 {
62057 struct stat stat;
62058 - if (sys_newstat(name, &stat) != 0)
62059 + if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
62060 return 0;
62061 if (!S_ISBLK(stat.st_mode))
62062 return 0;
62063 diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
62064 index 3098a38..253064e 100644
62065 --- a/init/do_mounts_initrd.c
62066 +++ b/init/do_mounts_initrd.c
62067 @@ -44,13 +44,13 @@ static void __init handle_initrd(void)
62068 create_dev("/dev/root.old", Root_RAM0);
62069 /* mount initrd on rootfs' /root */
62070 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
62071 - sys_mkdir("/old", 0700);
62072 - root_fd = sys_open("/", 0, 0);
62073 - old_fd = sys_open("/old", 0, 0);
62074 + sys_mkdir((const char __force_user *)"/old", 0700);
62075 + root_fd = sys_open((const char __force_user *)"/", 0, 0);
62076 + old_fd = sys_open((const char __force_user *)"/old", 0, 0);
62077 /* move initrd over / and chdir/chroot in initrd root */
62078 - sys_chdir("/root");
62079 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
62080 - sys_chroot(".");
62081 + sys_chdir((const char __force_user *)"/root");
62082 + sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
62083 + sys_chroot((const char __force_user *)".");
62084
62085 /*
62086 * In case that a resume from disk is carried out by linuxrc or one of
62087 @@ -67,15 +67,15 @@ static void __init handle_initrd(void)
62088
62089 /* move initrd to rootfs' /old */
62090 sys_fchdir(old_fd);
62091 - sys_mount("/", ".", NULL, MS_MOVE, NULL);
62092 + sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
62093 /* switch root and cwd back to / of rootfs */
62094 sys_fchdir(root_fd);
62095 - sys_chroot(".");
62096 + sys_chroot((const char __force_user *)".");
62097 sys_close(old_fd);
62098 sys_close(root_fd);
62099
62100 if (new_decode_dev(real_root_dev) == Root_RAM0) {
62101 - sys_chdir("/old");
62102 + sys_chdir((const char __force_user *)"/old");
62103 return;
62104 }
62105
62106 @@ -83,17 +83,17 @@ static void __init handle_initrd(void)
62107 mount_root();
62108
62109 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
62110 - error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
62111 + error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
62112 if (!error)
62113 printk("okay\n");
62114 else {
62115 - int fd = sys_open("/dev/root.old", O_RDWR, 0);
62116 + int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
62117 if (error == -ENOENT)
62118 printk("/initrd does not exist. Ignored.\n");
62119 else
62120 printk("failed\n");
62121 printk(KERN_NOTICE "Unmounting old root\n");
62122 - sys_umount("/old", MNT_DETACH);
62123 + sys_umount((char __force_user *)"/old", MNT_DETACH);
62124 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
62125 if (fd < 0) {
62126 error = fd;
62127 @@ -116,11 +116,11 @@ int __init initrd_load(void)
62128 * mounted in the normal path.
62129 */
62130 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
62131 - sys_unlink("/initrd.image");
62132 + sys_unlink((const char __force_user *)"/initrd.image");
62133 handle_initrd();
62134 return 1;
62135 }
62136 }
62137 - sys_unlink("/initrd.image");
62138 + sys_unlink((const char __force_user *)"/initrd.image");
62139 return 0;
62140 }
62141 diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
62142 index 32c4799..c27ee74 100644
62143 --- a/init/do_mounts_md.c
62144 +++ b/init/do_mounts_md.c
62145 @@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
62146 partitioned ? "_d" : "", minor,
62147 md_setup_args[ent].device_names);
62148
62149 - fd = sys_open(name, 0, 0);
62150 + fd = sys_open((char __force_user *)name, 0, 0);
62151 if (fd < 0) {
62152 printk(KERN_ERR "md: open failed - cannot start "
62153 "array %s\n", name);
62154 @@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
62155 * array without it
62156 */
62157 sys_close(fd);
62158 - fd = sys_open(name, 0, 0);
62159 + fd = sys_open((char __force_user *)name, 0, 0);
62160 sys_ioctl(fd, BLKRRPART, 0);
62161 }
62162 sys_close(fd);
62163 @@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
62164
62165 wait_for_device_probe();
62166
62167 - fd = sys_open((const char __user __force *) "/dev/md0", 0, 0);
62168 + fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
62169 if (fd >= 0) {
62170 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
62171 sys_close(fd);
62172 diff --git a/init/initramfs.c b/init/initramfs.c
62173 index 2531811..040d4d4 100644
62174 --- a/init/initramfs.c
62175 +++ b/init/initramfs.c
62176 @@ -74,7 +74,7 @@ static void __init free_hash(void)
62177 }
62178 }
62179
62180 -static long __init do_utime(char __user *filename, time_t mtime)
62181 +static long __init do_utime(__force char __user *filename, time_t mtime)
62182 {
62183 struct timespec t[2];
62184
62185 @@ -109,7 +109,7 @@ static void __init dir_utime(void)
62186 struct dir_entry *de, *tmp;
62187 list_for_each_entry_safe(de, tmp, &dir_list, list) {
62188 list_del(&de->list);
62189 - do_utime(de->name, de->mtime);
62190 + do_utime((char __force_user *)de->name, de->mtime);
62191 kfree(de->name);
62192 kfree(de);
62193 }
62194 @@ -271,7 +271,7 @@ static int __init maybe_link(void)
62195 if (nlink >= 2) {
62196 char *old = find_link(major, minor, ino, mode, collected);
62197 if (old)
62198 - return (sys_link(old, collected) < 0) ? -1 : 1;
62199 + return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
62200 }
62201 return 0;
62202 }
62203 @@ -280,11 +280,11 @@ static void __init clean_path(char *path, mode_t mode)
62204 {
62205 struct stat st;
62206
62207 - if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
62208 + if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
62209 if (S_ISDIR(st.st_mode))
62210 - sys_rmdir(path);
62211 + sys_rmdir((char __force_user *)path);
62212 else
62213 - sys_unlink(path);
62214 + sys_unlink((char __force_user *)path);
62215 }
62216 }
62217
62218 @@ -305,7 +305,7 @@ static int __init do_name(void)
62219 int openflags = O_WRONLY|O_CREAT;
62220 if (ml != 1)
62221 openflags |= O_TRUNC;
62222 - wfd = sys_open(collected, openflags, mode);
62223 + wfd = sys_open((char __force_user *)collected, openflags, mode);
62224
62225 if (wfd >= 0) {
62226 sys_fchown(wfd, uid, gid);
62227 @@ -317,17 +317,17 @@ static int __init do_name(void)
62228 }
62229 }
62230 } else if (S_ISDIR(mode)) {
62231 - sys_mkdir(collected, mode);
62232 - sys_chown(collected, uid, gid);
62233 - sys_chmod(collected, mode);
62234 + sys_mkdir((char __force_user *)collected, mode);
62235 + sys_chown((char __force_user *)collected, uid, gid);
62236 + sys_chmod((char __force_user *)collected, mode);
62237 dir_add(collected, mtime);
62238 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
62239 S_ISFIFO(mode) || S_ISSOCK(mode)) {
62240 if (maybe_link() == 0) {
62241 - sys_mknod(collected, mode, rdev);
62242 - sys_chown(collected, uid, gid);
62243 - sys_chmod(collected, mode);
62244 - do_utime(collected, mtime);
62245 + sys_mknod((char __force_user *)collected, mode, rdev);
62246 + sys_chown((char __force_user *)collected, uid, gid);
62247 + sys_chmod((char __force_user *)collected, mode);
62248 + do_utime((char __force_user *)collected, mtime);
62249 }
62250 }
62251 return 0;
62252 @@ -336,15 +336,15 @@ static int __init do_name(void)
62253 static int __init do_copy(void)
62254 {
62255 if (count >= body_len) {
62256 - sys_write(wfd, victim, body_len);
62257 + sys_write(wfd, (char __force_user *)victim, body_len);
62258 sys_close(wfd);
62259 - do_utime(vcollected, mtime);
62260 + do_utime((char __force_user *)vcollected, mtime);
62261 kfree(vcollected);
62262 eat(body_len);
62263 state = SkipIt;
62264 return 0;
62265 } else {
62266 - sys_write(wfd, victim, count);
62267 + sys_write(wfd, (char __force_user *)victim, count);
62268 body_len -= count;
62269 eat(count);
62270 return 1;
62271 @@ -355,9 +355,9 @@ static int __init do_symlink(void)
62272 {
62273 collected[N_ALIGN(name_len) + body_len] = '\0';
62274 clean_path(collected, 0);
62275 - sys_symlink(collected + N_ALIGN(name_len), collected);
62276 - sys_lchown(collected, uid, gid);
62277 - do_utime(collected, mtime);
62278 + sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
62279 + sys_lchown((char __force_user *)collected, uid, gid);
62280 + do_utime((char __force_user *)collected, mtime);
62281 state = SkipIt;
62282 next_state = Reset;
62283 return 0;
62284 diff --git a/init/main.c b/init/main.c
62285 index 217ed23..32e5731 100644
62286 --- a/init/main.c
62287 +++ b/init/main.c
62288 @@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void) { }
62289 extern void tc_init(void);
62290 #endif
62291
62292 +extern void grsecurity_init(void);
62293 +
62294 /*
62295 * Debug helper: via this flag we know that we are in 'early bootup code'
62296 * where only the boot processor is running with IRQ disabled. This means
62297 @@ -149,6 +151,49 @@ static int __init set_reset_devices(char *str)
62298
62299 __setup("reset_devices", set_reset_devices);
62300
62301 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
62302 +extern char pax_enter_kernel_user[];
62303 +extern char pax_exit_kernel_user[];
62304 +extern pgdval_t clone_pgd_mask;
62305 +#endif
62306 +
62307 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
62308 +static int __init setup_pax_nouderef(char *str)
62309 +{
62310 +#ifdef CONFIG_X86_32
62311 + unsigned int cpu;
62312 + struct desc_struct *gdt;
62313 +
62314 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
62315 + gdt = get_cpu_gdt_table(cpu);
62316 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
62317 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
62318 + gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
62319 + gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
62320 + }
62321 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
62322 +#else
62323 + memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
62324 + memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
62325 + clone_pgd_mask = ~(pgdval_t)0UL;
62326 +#endif
62327 +
62328 + return 0;
62329 +}
62330 +early_param("pax_nouderef", setup_pax_nouderef);
62331 +#endif
62332 +
62333 +#ifdef CONFIG_PAX_SOFTMODE
62334 +int pax_softmode;
62335 +
62336 +static int __init setup_pax_softmode(char *str)
62337 +{
62338 + get_option(&str, &pax_softmode);
62339 + return 1;
62340 +}
62341 +__setup("pax_softmode=", setup_pax_softmode);
62342 +#endif
62343 +
62344 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
62345 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
62346 static const char *panic_later, *panic_param;
62347 @@ -681,6 +726,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
62348 {
62349 int count = preempt_count();
62350 int ret;
62351 + const char *msg1 = "", *msg2 = "";
62352
62353 if (initcall_debug)
62354 ret = do_one_initcall_debug(fn);
62355 @@ -693,15 +739,15 @@ int __init_or_module do_one_initcall(initcall_t fn)
62356 sprintf(msgbuf, "error code %d ", ret);
62357
62358 if (preempt_count() != count) {
62359 - strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
62360 + msg1 = " preemption imbalance";
62361 preempt_count() = count;
62362 }
62363 if (irqs_disabled()) {
62364 - strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
62365 + msg2 = " disabled interrupts";
62366 local_irq_enable();
62367 }
62368 - if (msgbuf[0]) {
62369 - printk("initcall %pF returned with %s\n", fn, msgbuf);
62370 + if (msgbuf[0] || *msg1 || *msg2) {
62371 + printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
62372 }
62373
62374 return ret;
62375 @@ -820,7 +866,7 @@ static int __init kernel_init(void * unused)
62376 do_basic_setup();
62377
62378 /* Open the /dev/console on the rootfs, this should never fail */
62379 - if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
62380 + if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
62381 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
62382
62383 (void) sys_dup(0);
62384 @@ -833,11 +879,13 @@ static int __init kernel_init(void * unused)
62385 if (!ramdisk_execute_command)
62386 ramdisk_execute_command = "/init";
62387
62388 - if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
62389 + if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
62390 ramdisk_execute_command = NULL;
62391 prepare_namespace();
62392 }
62393
62394 + grsecurity_init();
62395 +
62396 /*
62397 * Ok, we have completed the initial bootup, and
62398 * we're essentially up and running. Get rid of the
62399 diff --git a/ipc/mqueue.c b/ipc/mqueue.c
62400 index 5b4293d..f179875 100644
62401 --- a/ipc/mqueue.c
62402 +++ b/ipc/mqueue.c
62403 @@ -156,6 +156,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
62404 mq_bytes = (mq_msg_tblsz +
62405 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
62406
62407 + gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
62408 spin_lock(&mq_lock);
62409 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
62410 u->mq_bytes + mq_bytes > task_rlimit(p, RLIMIT_MSGQUEUE)) {
62411 diff --git a/ipc/msg.c b/ipc/msg.c
62412 index 7385de2..a8180e0 100644
62413 --- a/ipc/msg.c
62414 +++ b/ipc/msg.c
62415 @@ -309,18 +309,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
62416 return security_msg_queue_associate(msq, msgflg);
62417 }
62418
62419 +static struct ipc_ops msg_ops = {
62420 + .getnew = newque,
62421 + .associate = msg_security,
62422 + .more_checks = NULL
62423 +};
62424 +
62425 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
62426 {
62427 struct ipc_namespace *ns;
62428 - struct ipc_ops msg_ops;
62429 struct ipc_params msg_params;
62430
62431 ns = current->nsproxy->ipc_ns;
62432
62433 - msg_ops.getnew = newque;
62434 - msg_ops.associate = msg_security;
62435 - msg_ops.more_checks = NULL;
62436 -
62437 msg_params.key = key;
62438 msg_params.flg = msgflg;
62439
62440 diff --git a/ipc/sem.c b/ipc/sem.c
62441 index 5215a81..cfc0cac 100644
62442 --- a/ipc/sem.c
62443 +++ b/ipc/sem.c
62444 @@ -364,10 +364,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
62445 return 0;
62446 }
62447
62448 +static struct ipc_ops sem_ops = {
62449 + .getnew = newary,
62450 + .associate = sem_security,
62451 + .more_checks = sem_more_checks
62452 +};
62453 +
62454 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
62455 {
62456 struct ipc_namespace *ns;
62457 - struct ipc_ops sem_ops;
62458 struct ipc_params sem_params;
62459
62460 ns = current->nsproxy->ipc_ns;
62461 @@ -375,10 +380,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
62462 if (nsems < 0 || nsems > ns->sc_semmsl)
62463 return -EINVAL;
62464
62465 - sem_ops.getnew = newary;
62466 - sem_ops.associate = sem_security;
62467 - sem_ops.more_checks = sem_more_checks;
62468 -
62469 sem_params.key = key;
62470 sem_params.flg = semflg;
62471 sem_params.u.nsems = nsems;
62472 diff --git a/ipc/shm.c b/ipc/shm.c
62473 index b76be5b..859e750 100644
62474 --- a/ipc/shm.c
62475 +++ b/ipc/shm.c
62476 @@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
62477 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
62478 #endif
62479
62480 +#ifdef CONFIG_GRKERNSEC
62481 +extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62482 + const time_t shm_createtime, const uid_t cuid,
62483 + const int shmid);
62484 +extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62485 + const time_t shm_createtime);
62486 +#endif
62487 +
62488 void shm_init_ns(struct ipc_namespace *ns)
62489 {
62490 ns->shm_ctlmax = SHMMAX;
62491 @@ -508,6 +516,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
62492 shp->shm_lprid = 0;
62493 shp->shm_atim = shp->shm_dtim = 0;
62494 shp->shm_ctim = get_seconds();
62495 +#ifdef CONFIG_GRKERNSEC
62496 + {
62497 + struct timespec timeval;
62498 + do_posix_clock_monotonic_gettime(&timeval);
62499 +
62500 + shp->shm_createtime = timeval.tv_sec;
62501 + }
62502 +#endif
62503 shp->shm_segsz = size;
62504 shp->shm_nattch = 0;
62505 shp->shm_file = file;
62506 @@ -559,18 +575,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
62507 return 0;
62508 }
62509
62510 +static struct ipc_ops shm_ops = {
62511 + .getnew = newseg,
62512 + .associate = shm_security,
62513 + .more_checks = shm_more_checks
62514 +};
62515 +
62516 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
62517 {
62518 struct ipc_namespace *ns;
62519 - struct ipc_ops shm_ops;
62520 struct ipc_params shm_params;
62521
62522 ns = current->nsproxy->ipc_ns;
62523
62524 - shm_ops.getnew = newseg;
62525 - shm_ops.associate = shm_security;
62526 - shm_ops.more_checks = shm_more_checks;
62527 -
62528 shm_params.key = key;
62529 shm_params.flg = shmflg;
62530 shm_params.u.size = size;
62531 @@ -988,6 +1005,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
62532 f_mode = FMODE_READ | FMODE_WRITE;
62533 }
62534 if (shmflg & SHM_EXEC) {
62535 +
62536 +#ifdef CONFIG_PAX_MPROTECT
62537 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
62538 + goto out;
62539 +#endif
62540 +
62541 prot |= PROT_EXEC;
62542 acc_mode |= S_IXUGO;
62543 }
62544 @@ -1011,9 +1034,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
62545 if (err)
62546 goto out_unlock;
62547
62548 +#ifdef CONFIG_GRKERNSEC
62549 + if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
62550 + shp->shm_perm.cuid, shmid) ||
62551 + !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
62552 + err = -EACCES;
62553 + goto out_unlock;
62554 + }
62555 +#endif
62556 +
62557 path = shp->shm_file->f_path;
62558 path_get(&path);
62559 shp->shm_nattch++;
62560 +#ifdef CONFIG_GRKERNSEC
62561 + shp->shm_lapid = current->pid;
62562 +#endif
62563 size = i_size_read(path.dentry->d_inode);
62564 shm_unlock(shp);
62565
62566 diff --git a/kernel/acct.c b/kernel/acct.c
62567 index fa7eb3d..7faf116 100644
62568 --- a/kernel/acct.c
62569 +++ b/kernel/acct.c
62570 @@ -570,7 +570,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
62571 */
62572 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
62573 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
62574 - file->f_op->write(file, (char *)&ac,
62575 + file->f_op->write(file, (char __force_user *)&ac,
62576 sizeof(acct_t), &file->f_pos);
62577 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
62578 set_fs(fs);
62579 diff --git a/kernel/audit.c b/kernel/audit.c
62580 index 09fae26..ed71d5b 100644
62581 --- a/kernel/audit.c
62582 +++ b/kernel/audit.c
62583 @@ -115,7 +115,7 @@ u32 audit_sig_sid = 0;
62584 3) suppressed due to audit_rate_limit
62585 4) suppressed due to audit_backlog_limit
62586 */
62587 -static atomic_t audit_lost = ATOMIC_INIT(0);
62588 +static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
62589
62590 /* The netlink socket. */
62591 static struct sock *audit_sock;
62592 @@ -237,7 +237,7 @@ void audit_log_lost(const char *message)
62593 unsigned long now;
62594 int print;
62595
62596 - atomic_inc(&audit_lost);
62597 + atomic_inc_unchecked(&audit_lost);
62598
62599 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
62600
62601 @@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
62602 printk(KERN_WARNING
62603 "audit: audit_lost=%d audit_rate_limit=%d "
62604 "audit_backlog_limit=%d\n",
62605 - atomic_read(&audit_lost),
62606 + atomic_read_unchecked(&audit_lost),
62607 audit_rate_limit,
62608 audit_backlog_limit);
62609 audit_panic(message);
62610 @@ -689,7 +689,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
62611 status_set.pid = audit_pid;
62612 status_set.rate_limit = audit_rate_limit;
62613 status_set.backlog_limit = audit_backlog_limit;
62614 - status_set.lost = atomic_read(&audit_lost);
62615 + status_set.lost = atomic_read_unchecked(&audit_lost);
62616 status_set.backlog = skb_queue_len(&audit_skb_queue);
62617 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
62618 &status_set, sizeof(status_set));
62619 @@ -1260,12 +1260,13 @@ static void audit_log_vformat(struct audit_buffer *ab, const char *fmt,
62620 avail = audit_expand(ab,
62621 max_t(unsigned, AUDIT_BUFSIZ, 1+len-avail));
62622 if (!avail)
62623 - goto out;
62624 + goto out_va_end;
62625 len = vsnprintf(skb_tail_pointer(skb), avail, fmt, args2);
62626 }
62627 - va_end(args2);
62628 if (len > 0)
62629 skb_put(skb, len);
62630 +out_va_end:
62631 + va_end(args2);
62632 out:
62633 return;
62634 }
62635 diff --git a/kernel/auditsc.c b/kernel/auditsc.c
62636 index 47b7fc1..c003c33 100644
62637 --- a/kernel/auditsc.c
62638 +++ b/kernel/auditsc.c
62639 @@ -1166,8 +1166,8 @@ static void audit_log_execve_info(struct audit_context *context,
62640 struct audit_buffer **ab,
62641 struct audit_aux_data_execve *axi)
62642 {
62643 - int i;
62644 - size_t len, len_sent = 0;
62645 + int i, len;
62646 + size_t len_sent = 0;
62647 const char __user *p;
62648 char *buf;
62649
62650 @@ -2118,7 +2118,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
62651 }
62652
62653 /* global counter which is incremented every time something logs in */
62654 -static atomic_t session_id = ATOMIC_INIT(0);
62655 +static atomic_unchecked_t session_id = ATOMIC_INIT(0);
62656
62657 /**
62658 * audit_set_loginuid - set a task's audit_context loginuid
62659 @@ -2131,7 +2131,7 @@ static atomic_t session_id = ATOMIC_INIT(0);
62660 */
62661 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
62662 {
62663 - unsigned int sessionid = atomic_inc_return(&session_id);
62664 + unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
62665 struct audit_context *context = task->audit_context;
62666
62667 if (context && context->in_syscall) {
62668 diff --git a/kernel/capability.c b/kernel/capability.c
62669 index b463871..fa3ea1f 100644
62670 --- a/kernel/capability.c
62671 +++ b/kernel/capability.c
62672 @@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
62673 * before modification is attempted and the application
62674 * fails.
62675 */
62676 + if (tocopy > ARRAY_SIZE(kdata))
62677 + return -EFAULT;
62678 +
62679 if (copy_to_user(dataptr, kdata, tocopy
62680 * sizeof(struct __user_cap_data_struct))) {
62681 return -EFAULT;
62682 @@ -374,7 +377,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
62683 BUG();
62684 }
62685
62686 - if (security_capable(ns, current_cred(), cap) == 0) {
62687 + if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable(cap)) {
62688 current->flags |= PF_SUPERPRIV;
62689 return true;
62690 }
62691 @@ -382,6 +385,27 @@ bool ns_capable(struct user_namespace *ns, int cap)
62692 }
62693 EXPORT_SYMBOL(ns_capable);
62694
62695 +bool ns_capable_nolog(struct user_namespace *ns, int cap)
62696 +{
62697 + if (unlikely(!cap_valid(cap))) {
62698 + printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
62699 + BUG();
62700 + }
62701 +
62702 + if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable_nolog(cap)) {
62703 + current->flags |= PF_SUPERPRIV;
62704 + return true;
62705 + }
62706 + return false;
62707 +}
62708 +EXPORT_SYMBOL(ns_capable_nolog);
62709 +
62710 +bool capable_nolog(int cap)
62711 +{
62712 + return ns_capable_nolog(&init_user_ns, cap);
62713 +}
62714 +EXPORT_SYMBOL(capable_nolog);
62715 +
62716 /**
62717 * task_ns_capable - Determine whether current task has a superior
62718 * capability targeted at a specific task's user namespace.
62719 @@ -396,6 +420,12 @@ bool task_ns_capable(struct task_struct *t, int cap)
62720 }
62721 EXPORT_SYMBOL(task_ns_capable);
62722
62723 +bool task_ns_capable_nolog(struct task_struct *t, int cap)
62724 +{
62725 + return ns_capable_nolog(task_cred_xxx(t, user)->user_ns, cap);
62726 +}
62727 +EXPORT_SYMBOL(task_ns_capable_nolog);
62728 +
62729 /**
62730 * nsown_capable - Check superior capability to one's own user_ns
62731 * @cap: The capability in question
62732 diff --git a/kernel/compat.c b/kernel/compat.c
62733 index f346ced..aa2b1f4 100644
62734 --- a/kernel/compat.c
62735 +++ b/kernel/compat.c
62736 @@ -13,6 +13,7 @@
62737
62738 #include <linux/linkage.h>
62739 #include <linux/compat.h>
62740 +#include <linux/module.h>
62741 #include <linux/errno.h>
62742 #include <linux/time.h>
62743 #include <linux/signal.h>
62744 @@ -168,7 +169,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
62745 mm_segment_t oldfs;
62746 long ret;
62747
62748 - restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
62749 + restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
62750 oldfs = get_fs();
62751 set_fs(KERNEL_DS);
62752 ret = hrtimer_nanosleep_restart(restart);
62753 @@ -200,7 +201,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
62754 oldfs = get_fs();
62755 set_fs(KERNEL_DS);
62756 ret = hrtimer_nanosleep(&tu,
62757 - rmtp ? (struct timespec __user *)&rmt : NULL,
62758 + rmtp ? (struct timespec __force_user *)&rmt : NULL,
62759 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
62760 set_fs(oldfs);
62761
62762 @@ -309,7 +310,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
62763 mm_segment_t old_fs = get_fs();
62764
62765 set_fs(KERNEL_DS);
62766 - ret = sys_sigpending((old_sigset_t __user *) &s);
62767 + ret = sys_sigpending((old_sigset_t __force_user *) &s);
62768 set_fs(old_fs);
62769 if (ret == 0)
62770 ret = put_user(s, set);
62771 @@ -332,8 +333,8 @@ asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set,
62772 old_fs = get_fs();
62773 set_fs(KERNEL_DS);
62774 ret = sys_sigprocmask(how,
62775 - set ? (old_sigset_t __user *) &s : NULL,
62776 - oset ? (old_sigset_t __user *) &s : NULL);
62777 + set ? (old_sigset_t __force_user *) &s : NULL,
62778 + oset ? (old_sigset_t __force_user *) &s : NULL);
62779 set_fs(old_fs);
62780 if (ret == 0)
62781 if (oset)
62782 @@ -370,7 +371,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
62783 mm_segment_t old_fs = get_fs();
62784
62785 set_fs(KERNEL_DS);
62786 - ret = sys_old_getrlimit(resource, &r);
62787 + ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
62788 set_fs(old_fs);
62789
62790 if (!ret) {
62791 @@ -442,7 +443,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
62792 mm_segment_t old_fs = get_fs();
62793
62794 set_fs(KERNEL_DS);
62795 - ret = sys_getrusage(who, (struct rusage __user *) &r);
62796 + ret = sys_getrusage(who, (struct rusage __force_user *) &r);
62797 set_fs(old_fs);
62798
62799 if (ret)
62800 @@ -469,8 +470,8 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
62801 set_fs (KERNEL_DS);
62802 ret = sys_wait4(pid,
62803 (stat_addr ?
62804 - (unsigned int __user *) &status : NULL),
62805 - options, (struct rusage __user *) &r);
62806 + (unsigned int __force_user *) &status : NULL),
62807 + options, (struct rusage __force_user *) &r);
62808 set_fs (old_fs);
62809
62810 if (ret > 0) {
62811 @@ -495,8 +496,8 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
62812 memset(&info, 0, sizeof(info));
62813
62814 set_fs(KERNEL_DS);
62815 - ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
62816 - uru ? (struct rusage __user *)&ru : NULL);
62817 + ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
62818 + uru ? (struct rusage __force_user *)&ru : NULL);
62819 set_fs(old_fs);
62820
62821 if ((ret < 0) || (info.si_signo == 0))
62822 @@ -626,8 +627,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
62823 oldfs = get_fs();
62824 set_fs(KERNEL_DS);
62825 err = sys_timer_settime(timer_id, flags,
62826 - (struct itimerspec __user *) &newts,
62827 - (struct itimerspec __user *) &oldts);
62828 + (struct itimerspec __force_user *) &newts,
62829 + (struct itimerspec __force_user *) &oldts);
62830 set_fs(oldfs);
62831 if (!err && old && put_compat_itimerspec(old, &oldts))
62832 return -EFAULT;
62833 @@ -644,7 +645,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
62834 oldfs = get_fs();
62835 set_fs(KERNEL_DS);
62836 err = sys_timer_gettime(timer_id,
62837 - (struct itimerspec __user *) &ts);
62838 + (struct itimerspec __force_user *) &ts);
62839 set_fs(oldfs);
62840 if (!err && put_compat_itimerspec(setting, &ts))
62841 return -EFAULT;
62842 @@ -663,7 +664,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
62843 oldfs = get_fs();
62844 set_fs(KERNEL_DS);
62845 err = sys_clock_settime(which_clock,
62846 - (struct timespec __user *) &ts);
62847 + (struct timespec __force_user *) &ts);
62848 set_fs(oldfs);
62849 return err;
62850 }
62851 @@ -678,7 +679,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
62852 oldfs = get_fs();
62853 set_fs(KERNEL_DS);
62854 err = sys_clock_gettime(which_clock,
62855 - (struct timespec __user *) &ts);
62856 + (struct timespec __force_user *) &ts);
62857 set_fs(oldfs);
62858 if (!err && put_compat_timespec(&ts, tp))
62859 return -EFAULT;
62860 @@ -698,7 +699,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
62861
62862 oldfs = get_fs();
62863 set_fs(KERNEL_DS);
62864 - ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
62865 + ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
62866 set_fs(oldfs);
62867
62868 err = compat_put_timex(utp, &txc);
62869 @@ -718,7 +719,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
62870 oldfs = get_fs();
62871 set_fs(KERNEL_DS);
62872 err = sys_clock_getres(which_clock,
62873 - (struct timespec __user *) &ts);
62874 + (struct timespec __force_user *) &ts);
62875 set_fs(oldfs);
62876 if (!err && tp && put_compat_timespec(&ts, tp))
62877 return -EFAULT;
62878 @@ -730,9 +731,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
62879 long err;
62880 mm_segment_t oldfs;
62881 struct timespec tu;
62882 - struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
62883 + struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
62884
62885 - restart->nanosleep.rmtp = (struct timespec __user *) &tu;
62886 + restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
62887 oldfs = get_fs();
62888 set_fs(KERNEL_DS);
62889 err = clock_nanosleep_restart(restart);
62890 @@ -764,8 +765,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
62891 oldfs = get_fs();
62892 set_fs(KERNEL_DS);
62893 err = sys_clock_nanosleep(which_clock, flags,
62894 - (struct timespec __user *) &in,
62895 - (struct timespec __user *) &out);
62896 + (struct timespec __force_user *) &in,
62897 + (struct timespec __force_user *) &out);
62898 set_fs(oldfs);
62899
62900 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
62901 diff --git a/kernel/configs.c b/kernel/configs.c
62902 index 42e8fa0..9e7406b 100644
62903 --- a/kernel/configs.c
62904 +++ b/kernel/configs.c
62905 @@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
62906 struct proc_dir_entry *entry;
62907
62908 /* create the current config file */
62909 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
62910 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
62911 + entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
62912 + &ikconfig_file_ops);
62913 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62914 + entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
62915 + &ikconfig_file_ops);
62916 +#endif
62917 +#else
62918 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
62919 &ikconfig_file_ops);
62920 +#endif
62921 +
62922 if (!entry)
62923 return -ENOMEM;
62924
62925 diff --git a/kernel/cred.c b/kernel/cred.c
62926 index 5791612..a3c04dc 100644
62927 --- a/kernel/cred.c
62928 +++ b/kernel/cred.c
62929 @@ -204,6 +204,15 @@ void exit_creds(struct task_struct *tsk)
62930 validate_creds(cred);
62931 put_cred(cred);
62932 }
62933 +
62934 +#ifdef CONFIG_GRKERNSEC_SETXID
62935 + cred = (struct cred *) tsk->delayed_cred;
62936 + if (cred) {
62937 + tsk->delayed_cred = NULL;
62938 + validate_creds(cred);
62939 + put_cred(cred);
62940 + }
62941 +#endif
62942 }
62943
62944 /**
62945 @@ -470,7 +479,7 @@ error_put:
62946 * Always returns 0 thus allowing this function to be tail-called at the end
62947 * of, say, sys_setgid().
62948 */
62949 -int commit_creds(struct cred *new)
62950 +static int __commit_creds(struct cred *new)
62951 {
62952 struct task_struct *task = current;
62953 const struct cred *old = task->real_cred;
62954 @@ -489,6 +498,8 @@ int commit_creds(struct cred *new)
62955
62956 get_cred(new); /* we will require a ref for the subj creds too */
62957
62958 + gr_set_role_label(task, new->uid, new->gid);
62959 +
62960 /* dumpability changes */
62961 if (old->euid != new->euid ||
62962 old->egid != new->egid ||
62963 @@ -538,6 +549,92 @@ int commit_creds(struct cred *new)
62964 put_cred(old);
62965 return 0;
62966 }
62967 +#ifdef CONFIG_GRKERNSEC_SETXID
62968 +extern int set_user(struct cred *new);
62969 +
62970 +void gr_delayed_cred_worker(void)
62971 +{
62972 + const struct cred *new = current->delayed_cred;
62973 + struct cred *ncred;
62974 +
62975 + current->delayed_cred = NULL;
62976 +
62977 + if (current_uid() && new != NULL) {
62978 + // from doing get_cred on it when queueing this
62979 + put_cred(new);
62980 + return;
62981 + } else if (new == NULL)
62982 + return;
62983 +
62984 + ncred = prepare_creds();
62985 + if (!ncred)
62986 + goto die;
62987 + // uids
62988 + ncred->uid = new->uid;
62989 + ncred->euid = new->euid;
62990 + ncred->suid = new->suid;
62991 + ncred->fsuid = new->fsuid;
62992 + // gids
62993 + ncred->gid = new->gid;
62994 + ncred->egid = new->egid;
62995 + ncred->sgid = new->sgid;
62996 + ncred->fsgid = new->fsgid;
62997 + // groups
62998 + if (set_groups(ncred, new->group_info) < 0) {
62999 + abort_creds(ncred);
63000 + goto die;
63001 + }
63002 + // caps
63003 + ncred->securebits = new->securebits;
63004 + ncred->cap_inheritable = new->cap_inheritable;
63005 + ncred->cap_permitted = new->cap_permitted;
63006 + ncred->cap_effective = new->cap_effective;
63007 + ncred->cap_bset = new->cap_bset;
63008 +
63009 + if (set_user(ncred)) {
63010 + abort_creds(ncred);
63011 + goto die;
63012 + }
63013 +
63014 + // from doing get_cred on it when queueing this
63015 + put_cred(new);
63016 +
63017 + __commit_creds(ncred);
63018 + return;
63019 +die:
63020 + // from doing get_cred on it when queueing this
63021 + put_cred(new);
63022 + do_group_exit(SIGKILL);
63023 +}
63024 +#endif
63025 +
63026 +int commit_creds(struct cred *new)
63027 +{
63028 +#ifdef CONFIG_GRKERNSEC_SETXID
63029 + struct task_struct *t;
63030 +
63031 + /* we won't get called with tasklist_lock held for writing
63032 + and interrupts disabled as the cred struct in that case is
63033 + init_cred
63034 + */
63035 + if (grsec_enable_setxid && !current_is_single_threaded() &&
63036 + !current_uid() && new->uid) {
63037 + rcu_read_lock();
63038 + read_lock(&tasklist_lock);
63039 + for (t = next_thread(current); t != current;
63040 + t = next_thread(t)) {
63041 + if (t->delayed_cred == NULL) {
63042 + t->delayed_cred = get_cred(new);
63043 + set_tsk_need_resched(t);
63044 + }
63045 + }
63046 + read_unlock(&tasklist_lock);
63047 + rcu_read_unlock();
63048 + }
63049 +#endif
63050 + return __commit_creds(new);
63051 +}
63052 +
63053 EXPORT_SYMBOL(commit_creds);
63054
63055 /**
63056 diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
63057 index 0d7c087..01b8cef 100644
63058 --- a/kernel/debug/debug_core.c
63059 +++ b/kernel/debug/debug_core.c
63060 @@ -119,7 +119,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
63061 */
63062 static atomic_t masters_in_kgdb;
63063 static atomic_t slaves_in_kgdb;
63064 -static atomic_t kgdb_break_tasklet_var;
63065 +static atomic_unchecked_t kgdb_break_tasklet_var;
63066 atomic_t kgdb_setting_breakpoint;
63067
63068 struct task_struct *kgdb_usethread;
63069 @@ -129,7 +129,7 @@ int kgdb_single_step;
63070 static pid_t kgdb_sstep_pid;
63071
63072 /* to keep track of the CPU which is doing the single stepping*/
63073 -atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
63074 +atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
63075
63076 /*
63077 * If you are debugging a problem where roundup (the collection of
63078 @@ -542,7 +542,7 @@ return_normal:
63079 * kernel will only try for the value of sstep_tries before
63080 * giving up and continuing on.
63081 */
63082 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
63083 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
63084 (kgdb_info[cpu].task &&
63085 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
63086 atomic_set(&kgdb_active, -1);
63087 @@ -636,8 +636,8 @@ cpu_master_loop:
63088 }
63089
63090 kgdb_restore:
63091 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
63092 - int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
63093 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
63094 + int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
63095 if (kgdb_info[sstep_cpu].task)
63096 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
63097 else
63098 @@ -834,18 +834,18 @@ static void kgdb_unregister_callbacks(void)
63099 static void kgdb_tasklet_bpt(unsigned long ing)
63100 {
63101 kgdb_breakpoint();
63102 - atomic_set(&kgdb_break_tasklet_var, 0);
63103 + atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
63104 }
63105
63106 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
63107
63108 void kgdb_schedule_breakpoint(void)
63109 {
63110 - if (atomic_read(&kgdb_break_tasklet_var) ||
63111 + if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
63112 atomic_read(&kgdb_active) != -1 ||
63113 atomic_read(&kgdb_setting_breakpoint))
63114 return;
63115 - atomic_inc(&kgdb_break_tasklet_var);
63116 + atomic_inc_unchecked(&kgdb_break_tasklet_var);
63117 tasklet_schedule(&kgdb_tasklet_breakpoint);
63118 }
63119 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
63120 diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
63121 index 63786e7..0780cac 100644
63122 --- a/kernel/debug/kdb/kdb_main.c
63123 +++ b/kernel/debug/kdb/kdb_main.c
63124 @@ -1980,7 +1980,7 @@ static int kdb_lsmod(int argc, const char **argv)
63125 list_for_each_entry(mod, kdb_modules, list) {
63126
63127 kdb_printf("%-20s%8u 0x%p ", mod->name,
63128 - mod->core_size, (void *)mod);
63129 + mod->core_size_rx + mod->core_size_rw, (void *)mod);
63130 #ifdef CONFIG_MODULE_UNLOAD
63131 kdb_printf("%4d ", module_refcount(mod));
63132 #endif
63133 @@ -1990,7 +1990,7 @@ static int kdb_lsmod(int argc, const char **argv)
63134 kdb_printf(" (Loading)");
63135 else
63136 kdb_printf(" (Live)");
63137 - kdb_printf(" 0x%p", mod->module_core);
63138 + kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
63139
63140 #ifdef CONFIG_MODULE_UNLOAD
63141 {
63142 diff --git a/kernel/events/core.c b/kernel/events/core.c
63143 index 58690af..d903d75 100644
63144 --- a/kernel/events/core.c
63145 +++ b/kernel/events/core.c
63146 @@ -173,7 +173,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
63147 return 0;
63148 }
63149
63150 -static atomic64_t perf_event_id;
63151 +static atomic64_unchecked_t perf_event_id;
63152
63153 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
63154 enum event_type_t event_type);
63155 @@ -2540,7 +2540,7 @@ static void __perf_event_read(void *info)
63156
63157 static inline u64 perf_event_count(struct perf_event *event)
63158 {
63159 - return local64_read(&event->count) + atomic64_read(&event->child_count);
63160 + return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
63161 }
63162
63163 static u64 perf_event_read(struct perf_event *event)
63164 @@ -3065,9 +3065,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
63165 mutex_lock(&event->child_mutex);
63166 total += perf_event_read(event);
63167 *enabled += event->total_time_enabled +
63168 - atomic64_read(&event->child_total_time_enabled);
63169 + atomic64_read_unchecked(&event->child_total_time_enabled);
63170 *running += event->total_time_running +
63171 - atomic64_read(&event->child_total_time_running);
63172 + atomic64_read_unchecked(&event->child_total_time_running);
63173
63174 list_for_each_entry(child, &event->child_list, child_list) {
63175 total += perf_event_read(child);
63176 @@ -3474,10 +3474,10 @@ void perf_event_update_userpage(struct perf_event *event)
63177 userpg->offset -= local64_read(&event->hw.prev_count);
63178
63179 userpg->time_enabled = enabled +
63180 - atomic64_read(&event->child_total_time_enabled);
63181 + atomic64_read_unchecked(&event->child_total_time_enabled);
63182
63183 userpg->time_running = running +
63184 - atomic64_read(&event->child_total_time_running);
63185 + atomic64_read_unchecked(&event->child_total_time_running);
63186
63187 barrier();
63188 ++userpg->lock;
63189 @@ -3906,11 +3906,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
63190 values[n++] = perf_event_count(event);
63191 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
63192 values[n++] = enabled +
63193 - atomic64_read(&event->child_total_time_enabled);
63194 + atomic64_read_unchecked(&event->child_total_time_enabled);
63195 }
63196 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
63197 values[n++] = running +
63198 - atomic64_read(&event->child_total_time_running);
63199 + atomic64_read_unchecked(&event->child_total_time_running);
63200 }
63201 if (read_format & PERF_FORMAT_ID)
63202 values[n++] = primary_event_id(event);
63203 @@ -4561,12 +4561,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
63204 * need to add enough zero bytes after the string to handle
63205 * the 64bit alignment we do later.
63206 */
63207 - buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
63208 + buf = kzalloc(PATH_MAX, GFP_KERNEL);
63209 if (!buf) {
63210 name = strncpy(tmp, "//enomem", sizeof(tmp));
63211 goto got_name;
63212 }
63213 - name = d_path(&file->f_path, buf, PATH_MAX);
63214 + name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
63215 if (IS_ERR(name)) {
63216 name = strncpy(tmp, "//toolong", sizeof(tmp));
63217 goto got_name;
63218 @@ -5921,7 +5921,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
63219 event->parent = parent_event;
63220
63221 event->ns = get_pid_ns(current->nsproxy->pid_ns);
63222 - event->id = atomic64_inc_return(&perf_event_id);
63223 + event->id = atomic64_inc_return_unchecked(&perf_event_id);
63224
63225 event->state = PERF_EVENT_STATE_INACTIVE;
63226
63227 @@ -6443,10 +6443,10 @@ static void sync_child_event(struct perf_event *child_event,
63228 /*
63229 * Add back the child's count to the parent's count:
63230 */
63231 - atomic64_add(child_val, &parent_event->child_count);
63232 - atomic64_add(child_event->total_time_enabled,
63233 + atomic64_add_unchecked(child_val, &parent_event->child_count);
63234 + atomic64_add_unchecked(child_event->total_time_enabled,
63235 &parent_event->child_total_time_enabled);
63236 - atomic64_add(child_event->total_time_running,
63237 + atomic64_add_unchecked(child_event->total_time_running,
63238 &parent_event->child_total_time_running);
63239
63240 /*
63241 diff --git a/kernel/exit.c b/kernel/exit.c
63242 index e6e01b9..619f837 100644
63243 --- a/kernel/exit.c
63244 +++ b/kernel/exit.c
63245 @@ -57,6 +57,10 @@
63246 #include <asm/pgtable.h>
63247 #include <asm/mmu_context.h>
63248
63249 +#ifdef CONFIG_GRKERNSEC
63250 +extern rwlock_t grsec_exec_file_lock;
63251 +#endif
63252 +
63253 static void exit_mm(struct task_struct * tsk);
63254
63255 static void __unhash_process(struct task_struct *p, bool group_dead)
63256 @@ -168,6 +172,10 @@ void release_task(struct task_struct * p)
63257 struct task_struct *leader;
63258 int zap_leader;
63259 repeat:
63260 +#ifdef CONFIG_NET
63261 + gr_del_task_from_ip_table(p);
63262 +#endif
63263 +
63264 /* don't need to get the RCU readlock here - the process is dead and
63265 * can't be modifying its own credentials. But shut RCU-lockdep up */
63266 rcu_read_lock();
63267 @@ -380,7 +388,7 @@ int allow_signal(int sig)
63268 * know it'll be handled, so that they don't get converted to
63269 * SIGKILL or just silently dropped.
63270 */
63271 - current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
63272 + current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
63273 recalc_sigpending();
63274 spin_unlock_irq(&current->sighand->siglock);
63275 return 0;
63276 @@ -416,6 +424,17 @@ void daemonize(const char *name, ...)
63277 vsnprintf(current->comm, sizeof(current->comm), name, args);
63278 va_end(args);
63279
63280 +#ifdef CONFIG_GRKERNSEC
63281 + write_lock(&grsec_exec_file_lock);
63282 + if (current->exec_file) {
63283 + fput(current->exec_file);
63284 + current->exec_file = NULL;
63285 + }
63286 + write_unlock(&grsec_exec_file_lock);
63287 +#endif
63288 +
63289 + gr_set_kernel_label(current);
63290 +
63291 /*
63292 * If we were started as result of loading a module, close all of the
63293 * user space pages. We don't need them, and if we didn't close them
63294 @@ -893,6 +912,8 @@ NORET_TYPE void do_exit(long code)
63295 struct task_struct *tsk = current;
63296 int group_dead;
63297
63298 + set_fs(USER_DS);
63299 +
63300 profile_task_exit(tsk);
63301
63302 WARN_ON(blk_needs_flush_plug(tsk));
63303 @@ -909,7 +930,6 @@ NORET_TYPE void do_exit(long code)
63304 * mm_release()->clear_child_tid() from writing to a user-controlled
63305 * kernel address.
63306 */
63307 - set_fs(USER_DS);
63308
63309 ptrace_event(PTRACE_EVENT_EXIT, code);
63310
63311 @@ -971,6 +991,9 @@ NORET_TYPE void do_exit(long code)
63312 tsk->exit_code = code;
63313 taskstats_exit(tsk, group_dead);
63314
63315 + gr_acl_handle_psacct(tsk, code);
63316 + gr_acl_handle_exit();
63317 +
63318 exit_mm(tsk);
63319
63320 if (group_dead)
63321 diff --git a/kernel/fork.c b/kernel/fork.c
63322 index da4a6a1..c04943c 100644
63323 --- a/kernel/fork.c
63324 +++ b/kernel/fork.c
63325 @@ -280,7 +280,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
63326 *stackend = STACK_END_MAGIC; /* for overflow detection */
63327
63328 #ifdef CONFIG_CC_STACKPROTECTOR
63329 - tsk->stack_canary = get_random_int();
63330 + tsk->stack_canary = pax_get_random_long();
63331 #endif
63332
63333 /*
63334 @@ -304,13 +304,77 @@ out:
63335 }
63336
63337 #ifdef CONFIG_MMU
63338 +static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct vm_area_struct *mpnt)
63339 +{
63340 + struct vm_area_struct *tmp;
63341 + unsigned long charge;
63342 + struct mempolicy *pol;
63343 + struct file *file;
63344 +
63345 + charge = 0;
63346 + if (mpnt->vm_flags & VM_ACCOUNT) {
63347 + unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
63348 + if (security_vm_enough_memory(len))
63349 + goto fail_nomem;
63350 + charge = len;
63351 + }
63352 + tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
63353 + if (!tmp)
63354 + goto fail_nomem;
63355 + *tmp = *mpnt;
63356 + tmp->vm_mm = mm;
63357 + INIT_LIST_HEAD(&tmp->anon_vma_chain);
63358 + pol = mpol_dup(vma_policy(mpnt));
63359 + if (IS_ERR(pol))
63360 + goto fail_nomem_policy;
63361 + vma_set_policy(tmp, pol);
63362 + if (anon_vma_fork(tmp, mpnt))
63363 + goto fail_nomem_anon_vma_fork;
63364 + tmp->vm_flags &= ~VM_LOCKED;
63365 + tmp->vm_next = tmp->vm_prev = NULL;
63366 + tmp->vm_mirror = NULL;
63367 + file = tmp->vm_file;
63368 + if (file) {
63369 + struct inode *inode = file->f_path.dentry->d_inode;
63370 + struct address_space *mapping = file->f_mapping;
63371 +
63372 + get_file(file);
63373 + if (tmp->vm_flags & VM_DENYWRITE)
63374 + atomic_dec(&inode->i_writecount);
63375 + mutex_lock(&mapping->i_mmap_mutex);
63376 + if (tmp->vm_flags & VM_SHARED)
63377 + mapping->i_mmap_writable++;
63378 + flush_dcache_mmap_lock(mapping);
63379 + /* insert tmp into the share list, just after mpnt */
63380 + vma_prio_tree_add(tmp, mpnt);
63381 + flush_dcache_mmap_unlock(mapping);
63382 + mutex_unlock(&mapping->i_mmap_mutex);
63383 + }
63384 +
63385 + /*
63386 + * Clear hugetlb-related page reserves for children. This only
63387 + * affects MAP_PRIVATE mappings. Faults generated by the child
63388 + * are not guaranteed to succeed, even if read-only
63389 + */
63390 + if (is_vm_hugetlb_page(tmp))
63391 + reset_vma_resv_huge_pages(tmp);
63392 +
63393 + return tmp;
63394 +
63395 +fail_nomem_anon_vma_fork:
63396 + mpol_put(pol);
63397 +fail_nomem_policy:
63398 + kmem_cache_free(vm_area_cachep, tmp);
63399 +fail_nomem:
63400 + vm_unacct_memory(charge);
63401 + return NULL;
63402 +}
63403 +
63404 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
63405 {
63406 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
63407 struct rb_node **rb_link, *rb_parent;
63408 int retval;
63409 - unsigned long charge;
63410 - struct mempolicy *pol;
63411
63412 down_write(&oldmm->mmap_sem);
63413 flush_cache_dup_mm(oldmm);
63414 @@ -322,8 +386,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
63415 mm->locked_vm = 0;
63416 mm->mmap = NULL;
63417 mm->mmap_cache = NULL;
63418 - mm->free_area_cache = oldmm->mmap_base;
63419 - mm->cached_hole_size = ~0UL;
63420 + mm->free_area_cache = oldmm->free_area_cache;
63421 + mm->cached_hole_size = oldmm->cached_hole_size;
63422 mm->map_count = 0;
63423 cpumask_clear(mm_cpumask(mm));
63424 mm->mm_rb = RB_ROOT;
63425 @@ -339,8 +403,6 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
63426
63427 prev = NULL;
63428 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
63429 - struct file *file;
63430 -
63431 if (mpnt->vm_flags & VM_DONTCOPY) {
63432 long pages = vma_pages(mpnt);
63433 mm->total_vm -= pages;
63434 @@ -348,53 +410,11 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
63435 -pages);
63436 continue;
63437 }
63438 - charge = 0;
63439 - if (mpnt->vm_flags & VM_ACCOUNT) {
63440 - unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
63441 - if (security_vm_enough_memory(len))
63442 - goto fail_nomem;
63443 - charge = len;
63444 + tmp = dup_vma(mm, mpnt);
63445 + if (!tmp) {
63446 + retval = -ENOMEM;
63447 + goto out;
63448 }
63449 - tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
63450 - if (!tmp)
63451 - goto fail_nomem;
63452 - *tmp = *mpnt;
63453 - INIT_LIST_HEAD(&tmp->anon_vma_chain);
63454 - pol = mpol_dup(vma_policy(mpnt));
63455 - retval = PTR_ERR(pol);
63456 - if (IS_ERR(pol))
63457 - goto fail_nomem_policy;
63458 - vma_set_policy(tmp, pol);
63459 - tmp->vm_mm = mm;
63460 - if (anon_vma_fork(tmp, mpnt))
63461 - goto fail_nomem_anon_vma_fork;
63462 - tmp->vm_flags &= ~VM_LOCKED;
63463 - tmp->vm_next = tmp->vm_prev = NULL;
63464 - file = tmp->vm_file;
63465 - if (file) {
63466 - struct inode *inode = file->f_path.dentry->d_inode;
63467 - struct address_space *mapping = file->f_mapping;
63468 -
63469 - get_file(file);
63470 - if (tmp->vm_flags & VM_DENYWRITE)
63471 - atomic_dec(&inode->i_writecount);
63472 - mutex_lock(&mapping->i_mmap_mutex);
63473 - if (tmp->vm_flags & VM_SHARED)
63474 - mapping->i_mmap_writable++;
63475 - flush_dcache_mmap_lock(mapping);
63476 - /* insert tmp into the share list, just after mpnt */
63477 - vma_prio_tree_add(tmp, mpnt);
63478 - flush_dcache_mmap_unlock(mapping);
63479 - mutex_unlock(&mapping->i_mmap_mutex);
63480 - }
63481 -
63482 - /*
63483 - * Clear hugetlb-related page reserves for children. This only
63484 - * affects MAP_PRIVATE mappings. Faults generated by the child
63485 - * are not guaranteed to succeed, even if read-only
63486 - */
63487 - if (is_vm_hugetlb_page(tmp))
63488 - reset_vma_resv_huge_pages(tmp);
63489
63490 /*
63491 * Link in the new vma and copy the page table entries.
63492 @@ -417,6 +437,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
63493 if (retval)
63494 goto out;
63495 }
63496 +
63497 +#ifdef CONFIG_PAX_SEGMEXEC
63498 + if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
63499 + struct vm_area_struct *mpnt_m;
63500 +
63501 + for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
63502 + BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
63503 +
63504 + if (!mpnt->vm_mirror)
63505 + continue;
63506 +
63507 + if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
63508 + BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
63509 + mpnt->vm_mirror = mpnt_m;
63510 + } else {
63511 + BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
63512 + mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
63513 + mpnt_m->vm_mirror->vm_mirror = mpnt_m;
63514 + mpnt->vm_mirror->vm_mirror = mpnt;
63515 + }
63516 + }
63517 + BUG_ON(mpnt_m);
63518 + }
63519 +#endif
63520 +
63521 /* a new mm has just been created */
63522 arch_dup_mmap(oldmm, mm);
63523 retval = 0;
63524 @@ -425,14 +470,6 @@ out:
63525 flush_tlb_mm(oldmm);
63526 up_write(&oldmm->mmap_sem);
63527 return retval;
63528 -fail_nomem_anon_vma_fork:
63529 - mpol_put(pol);
63530 -fail_nomem_policy:
63531 - kmem_cache_free(vm_area_cachep, tmp);
63532 -fail_nomem:
63533 - retval = -ENOMEM;
63534 - vm_unacct_memory(charge);
63535 - goto out;
63536 }
63537
63538 static inline int mm_alloc_pgd(struct mm_struct *mm)
63539 @@ -829,13 +866,14 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
63540 spin_unlock(&fs->lock);
63541 return -EAGAIN;
63542 }
63543 - fs->users++;
63544 + atomic_inc(&fs->users);
63545 spin_unlock(&fs->lock);
63546 return 0;
63547 }
63548 tsk->fs = copy_fs_struct(fs);
63549 if (!tsk->fs)
63550 return -ENOMEM;
63551 + gr_set_chroot_entries(tsk, &tsk->fs->root);
63552 return 0;
63553 }
63554
63555 @@ -1097,6 +1135,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
63556 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
63557 #endif
63558 retval = -EAGAIN;
63559 +
63560 + gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
63561 +
63562 if (atomic_read(&p->real_cred->user->processes) >=
63563 task_rlimit(p, RLIMIT_NPROC)) {
63564 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
63565 @@ -1256,6 +1297,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
63566 if (clone_flags & CLONE_THREAD)
63567 p->tgid = current->tgid;
63568
63569 + gr_copy_label(p);
63570 +
63571 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
63572 /*
63573 * Clear TID on mm_release()?
63574 @@ -1418,6 +1461,8 @@ bad_fork_cleanup_count:
63575 bad_fork_free:
63576 free_task(p);
63577 fork_out:
63578 + gr_log_forkfail(retval);
63579 +
63580 return ERR_PTR(retval);
63581 }
63582
63583 @@ -1518,6 +1563,8 @@ long do_fork(unsigned long clone_flags,
63584 if (clone_flags & CLONE_PARENT_SETTID)
63585 put_user(nr, parent_tidptr);
63586
63587 + gr_handle_brute_check();
63588 +
63589 if (clone_flags & CLONE_VFORK) {
63590 p->vfork_done = &vfork;
63591 init_completion(&vfork);
63592 @@ -1627,7 +1674,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
63593 return 0;
63594
63595 /* don't need lock here; in the worst case we'll do useless copy */
63596 - if (fs->users == 1)
63597 + if (atomic_read(&fs->users) == 1)
63598 return 0;
63599
63600 *new_fsp = copy_fs_struct(fs);
63601 @@ -1716,7 +1763,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
63602 fs = current->fs;
63603 spin_lock(&fs->lock);
63604 current->fs = new_fs;
63605 - if (--fs->users)
63606 + gr_set_chroot_entries(current, &current->fs->root);
63607 + if (atomic_dec_return(&fs->users))
63608 new_fs = NULL;
63609 else
63610 new_fs = fs;
63611 diff --git a/kernel/futex.c b/kernel/futex.c
63612 index 1614be2..37abc7e 100644
63613 --- a/kernel/futex.c
63614 +++ b/kernel/futex.c
63615 @@ -54,6 +54,7 @@
63616 #include <linux/mount.h>
63617 #include <linux/pagemap.h>
63618 #include <linux/syscalls.h>
63619 +#include <linux/ptrace.h>
63620 #include <linux/signal.h>
63621 #include <linux/export.h>
63622 #include <linux/magic.h>
63623 @@ -238,6 +239,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
63624 struct page *page, *page_head;
63625 int err, ro = 0;
63626
63627 +#ifdef CONFIG_PAX_SEGMEXEC
63628 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
63629 + return -EFAULT;
63630 +#endif
63631 +
63632 /*
63633 * The futex address must be "naturally" aligned.
63634 */
63635 @@ -2459,6 +2465,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
63636 if (!p)
63637 goto err_unlock;
63638 ret = -EPERM;
63639 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63640 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
63641 + goto err_unlock;
63642 +#endif
63643 pcred = __task_cred(p);
63644 /* If victim is in different user_ns, then uids are not
63645 comparable, so we must have CAP_SYS_PTRACE */
63646 @@ -2724,6 +2734,7 @@ static int __init futex_init(void)
63647 {
63648 u32 curval;
63649 int i;
63650 + mm_segment_t oldfs;
63651
63652 /*
63653 * This will fail and we want it. Some arch implementations do
63654 @@ -2735,8 +2746,11 @@ static int __init futex_init(void)
63655 * implementation, the non-functional ones will return
63656 * -ENOSYS.
63657 */
63658 + oldfs = get_fs();
63659 + set_fs(USER_DS);
63660 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
63661 futex_cmpxchg_enabled = 1;
63662 + set_fs(oldfs);
63663
63664 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
63665 plist_head_init(&futex_queues[i].chain);
63666 diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
63667 index 5f9e689..582d46d 100644
63668 --- a/kernel/futex_compat.c
63669 +++ b/kernel/futex_compat.c
63670 @@ -10,6 +10,7 @@
63671 #include <linux/compat.h>
63672 #include <linux/nsproxy.h>
63673 #include <linux/futex.h>
63674 +#include <linux/ptrace.h>
63675
63676 #include <asm/uaccess.h>
63677
63678 @@ -136,7 +137,8 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
63679 {
63680 struct compat_robust_list_head __user *head;
63681 unsigned long ret;
63682 - const struct cred *cred = current_cred(), *pcred;
63683 + const struct cred *cred = current_cred();
63684 + const struct cred *pcred;
63685
63686 if (!futex_cmpxchg_enabled)
63687 return -ENOSYS;
63688 @@ -152,6 +154,10 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
63689 if (!p)
63690 goto err_unlock;
63691 ret = -EPERM;
63692 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63693 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
63694 + goto err_unlock;
63695 +#endif
63696 pcred = __task_cred(p);
63697 /* If victim is in different user_ns, then uids are not
63698 comparable, so we must have CAP_SYS_PTRACE */
63699 diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
63700 index 9b22d03..6295b62 100644
63701 --- a/kernel/gcov/base.c
63702 +++ b/kernel/gcov/base.c
63703 @@ -102,11 +102,6 @@ void gcov_enable_events(void)
63704 }
63705
63706 #ifdef CONFIG_MODULES
63707 -static inline int within(void *addr, void *start, unsigned long size)
63708 -{
63709 - return ((addr >= start) && (addr < start + size));
63710 -}
63711 -
63712 /* Update list and generate events when modules are unloaded. */
63713 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
63714 void *data)
63715 @@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
63716 prev = NULL;
63717 /* Remove entries located in module from linked list. */
63718 for (info = gcov_info_head; info; info = info->next) {
63719 - if (within(info, mod->module_core, mod->core_size)) {
63720 + if (within_module_core_rw((unsigned long)info, mod)) {
63721 if (prev)
63722 prev->next = info->next;
63723 else
63724 diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
63725 index ae34bf5..4e2f3d0 100644
63726 --- a/kernel/hrtimer.c
63727 +++ b/kernel/hrtimer.c
63728 @@ -1393,7 +1393,7 @@ void hrtimer_peek_ahead_timers(void)
63729 local_irq_restore(flags);
63730 }
63731
63732 -static void run_hrtimer_softirq(struct softirq_action *h)
63733 +static void run_hrtimer_softirq(void)
63734 {
63735 hrtimer_peek_ahead_timers();
63736 }
63737 diff --git a/kernel/jump_label.c b/kernel/jump_label.c
63738 index 66ff710..05a5128 100644
63739 --- a/kernel/jump_label.c
63740 +++ b/kernel/jump_label.c
63741 @@ -55,7 +55,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
63742
63743 size = (((unsigned long)stop - (unsigned long)start)
63744 / sizeof(struct jump_entry));
63745 + pax_open_kernel();
63746 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
63747 + pax_close_kernel();
63748 }
63749
63750 static void jump_label_update(struct jump_label_key *key, int enable);
63751 @@ -303,10 +305,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
63752 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
63753 struct jump_entry *iter;
63754
63755 + pax_open_kernel();
63756 for (iter = iter_start; iter < iter_stop; iter++) {
63757 if (within_module_init(iter->code, mod))
63758 iter->code = 0;
63759 }
63760 + pax_close_kernel();
63761 }
63762
63763 static int
63764 diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
63765 index 079f1d3..a407562 100644
63766 --- a/kernel/kallsyms.c
63767 +++ b/kernel/kallsyms.c
63768 @@ -11,6 +11,9 @@
63769 * Changed the compression method from stem compression to "table lookup"
63770 * compression (see scripts/kallsyms.c for a more complete description)
63771 */
63772 +#ifdef CONFIG_GRKERNSEC_HIDESYM
63773 +#define __INCLUDED_BY_HIDESYM 1
63774 +#endif
63775 #include <linux/kallsyms.h>
63776 #include <linux/module.h>
63777 #include <linux/init.h>
63778 @@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
63779
63780 static inline int is_kernel_inittext(unsigned long addr)
63781 {
63782 + if (system_state != SYSTEM_BOOTING)
63783 + return 0;
63784 +
63785 if (addr >= (unsigned long)_sinittext
63786 && addr <= (unsigned long)_einittext)
63787 return 1;
63788 return 0;
63789 }
63790
63791 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63792 +#ifdef CONFIG_MODULES
63793 +static inline int is_module_text(unsigned long addr)
63794 +{
63795 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
63796 + return 1;
63797 +
63798 + addr = ktla_ktva(addr);
63799 + return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
63800 +}
63801 +#else
63802 +static inline int is_module_text(unsigned long addr)
63803 +{
63804 + return 0;
63805 +}
63806 +#endif
63807 +#endif
63808 +
63809 static inline int is_kernel_text(unsigned long addr)
63810 {
63811 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
63812 @@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr)
63813
63814 static inline int is_kernel(unsigned long addr)
63815 {
63816 +
63817 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63818 + if (is_kernel_text(addr) || is_kernel_inittext(addr))
63819 + return 1;
63820 +
63821 + if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
63822 +#else
63823 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
63824 +#endif
63825 +
63826 return 1;
63827 return in_gate_area_no_mm(addr);
63828 }
63829
63830 static int is_ksym_addr(unsigned long addr)
63831 {
63832 +
63833 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63834 + if (is_module_text(addr))
63835 + return 0;
63836 +#endif
63837 +
63838 if (all_var)
63839 return is_kernel(addr);
63840
63841 @@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
63842
63843 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
63844 {
63845 - iter->name[0] = '\0';
63846 iter->nameoff = get_symbol_offset(new_pos);
63847 iter->pos = new_pos;
63848 }
63849 @@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, void *p)
63850 {
63851 struct kallsym_iter *iter = m->private;
63852
63853 +#ifdef CONFIG_GRKERNSEC_HIDESYM
63854 + if (current_uid())
63855 + return 0;
63856 +#endif
63857 +
63858 /* Some debugging symbols have no name. Ignore them. */
63859 if (!iter->name[0])
63860 return 0;
63861 @@ -540,7 +583,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
63862 struct kallsym_iter *iter;
63863 int ret;
63864
63865 - iter = kmalloc(sizeof(*iter), GFP_KERNEL);
63866 + iter = kzalloc(sizeof(*iter), GFP_KERNEL);
63867 if (!iter)
63868 return -ENOMEM;
63869 reset_iter(iter, 0);
63870 diff --git a/kernel/kexec.c b/kernel/kexec.c
63871 index dc7bc08..4601964 100644
63872 --- a/kernel/kexec.c
63873 +++ b/kernel/kexec.c
63874 @@ -1048,7 +1048,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
63875 unsigned long flags)
63876 {
63877 struct compat_kexec_segment in;
63878 - struct kexec_segment out, __user *ksegments;
63879 + struct kexec_segment out;
63880 + struct kexec_segment __user *ksegments;
63881 unsigned long i, result;
63882
63883 /* Don't allow clients that don't understand the native
63884 diff --git a/kernel/kmod.c b/kernel/kmod.c
63885 index a4bea97..7a1ae9a 100644
63886 --- a/kernel/kmod.c
63887 +++ b/kernel/kmod.c
63888 @@ -73,13 +73,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe";
63889 * If module auto-loading support is disabled then this function
63890 * becomes a no-operation.
63891 */
63892 -int __request_module(bool wait, const char *fmt, ...)
63893 +static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
63894 {
63895 - va_list args;
63896 char module_name[MODULE_NAME_LEN];
63897 unsigned int max_modprobes;
63898 int ret;
63899 - char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
63900 + char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
63901 static char *envp[] = { "HOME=/",
63902 "TERM=linux",
63903 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
63904 @@ -88,9 +87,7 @@ int __request_module(bool wait, const char *fmt, ...)
63905 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
63906 static int kmod_loop_msg;
63907
63908 - va_start(args, fmt);
63909 - ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
63910 - va_end(args);
63911 + ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
63912 if (ret >= MODULE_NAME_LEN)
63913 return -ENAMETOOLONG;
63914
63915 @@ -98,6 +95,20 @@ int __request_module(bool wait, const char *fmt, ...)
63916 if (ret)
63917 return ret;
63918
63919 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
63920 + if (!current_uid()) {
63921 + /* hack to workaround consolekit/udisks stupidity */
63922 + read_lock(&tasklist_lock);
63923 + if (!strcmp(current->comm, "mount") &&
63924 + current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
63925 + read_unlock(&tasklist_lock);
63926 + printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
63927 + return -EPERM;
63928 + }
63929 + read_unlock(&tasklist_lock);
63930 + }
63931 +#endif
63932 +
63933 /* If modprobe needs a service that is in a module, we get a recursive
63934 * loop. Limit the number of running kmod threads to max_threads/2 or
63935 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
63936 @@ -133,6 +144,47 @@ int __request_module(bool wait, const char *fmt, ...)
63937 atomic_dec(&kmod_concurrent);
63938 return ret;
63939 }
63940 +
63941 +int ___request_module(bool wait, char *module_param, const char *fmt, ...)
63942 +{
63943 + va_list args;
63944 + int ret;
63945 +
63946 + va_start(args, fmt);
63947 + ret = ____request_module(wait, module_param, fmt, args);
63948 + va_end(args);
63949 +
63950 + return ret;
63951 +}
63952 +
63953 +int __request_module(bool wait, const char *fmt, ...)
63954 +{
63955 + va_list args;
63956 + int ret;
63957 +
63958 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
63959 + if (current_uid()) {
63960 + char module_param[MODULE_NAME_LEN];
63961 +
63962 + memset(module_param, 0, sizeof(module_param));
63963 +
63964 + snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
63965 +
63966 + va_start(args, fmt);
63967 + ret = ____request_module(wait, module_param, fmt, args);
63968 + va_end(args);
63969 +
63970 + return ret;
63971 + }
63972 +#endif
63973 +
63974 + va_start(args, fmt);
63975 + ret = ____request_module(wait, NULL, fmt, args);
63976 + va_end(args);
63977 +
63978 + return ret;
63979 +}
63980 +
63981 EXPORT_SYMBOL(__request_module);
63982 #endif /* CONFIG_MODULES */
63983
63984 @@ -222,7 +274,7 @@ static int wait_for_helper(void *data)
63985 *
63986 * Thus the __user pointer cast is valid here.
63987 */
63988 - sys_wait4(pid, (int __user *)&ret, 0, NULL);
63989 + sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
63990
63991 /*
63992 * If ret is 0, either ____call_usermodehelper failed and the
63993 diff --git a/kernel/kprobes.c b/kernel/kprobes.c
63994 index 52fd049..3def6a8 100644
63995 --- a/kernel/kprobes.c
63996 +++ b/kernel/kprobes.c
63997 @@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
63998 * kernel image and loaded module images reside. This is required
63999 * so x86_64 can correctly handle the %rip-relative fixups.
64000 */
64001 - kip->insns = module_alloc(PAGE_SIZE);
64002 + kip->insns = module_alloc_exec(PAGE_SIZE);
64003 if (!kip->insns) {
64004 kfree(kip);
64005 return NULL;
64006 @@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
64007 */
64008 if (!list_is_singular(&kip->list)) {
64009 list_del(&kip->list);
64010 - module_free(NULL, kip->insns);
64011 + module_free_exec(NULL, kip->insns);
64012 kfree(kip);
64013 }
64014 return 1;
64015 @@ -1949,7 +1949,7 @@ static int __init init_kprobes(void)
64016 {
64017 int i, err = 0;
64018 unsigned long offset = 0, size = 0;
64019 - char *modname, namebuf[128];
64020 + char *modname, namebuf[KSYM_NAME_LEN];
64021 const char *symbol_name;
64022 void *addr;
64023 struct kprobe_blackpoint *kb;
64024 @@ -2075,7 +2075,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
64025 const char *sym = NULL;
64026 unsigned int i = *(loff_t *) v;
64027 unsigned long offset = 0;
64028 - char *modname, namebuf[128];
64029 + char *modname, namebuf[KSYM_NAME_LEN];
64030
64031 head = &kprobe_table[i];
64032 preempt_disable();
64033 diff --git a/kernel/lockdep.c b/kernel/lockdep.c
64034 index b2e08c9..01d8049 100644
64035 --- a/kernel/lockdep.c
64036 +++ b/kernel/lockdep.c
64037 @@ -592,6 +592,10 @@ static int static_obj(void *obj)
64038 end = (unsigned long) &_end,
64039 addr = (unsigned long) obj;
64040
64041 +#ifdef CONFIG_PAX_KERNEXEC
64042 + start = ktla_ktva(start);
64043 +#endif
64044 +
64045 /*
64046 * static variable?
64047 */
64048 @@ -731,6 +735,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
64049 if (!static_obj(lock->key)) {
64050 debug_locks_off();
64051 printk("INFO: trying to register non-static key.\n");
64052 + printk("lock:%pS key:%pS.\n", lock, lock->key);
64053 printk("the code is fine but needs lockdep annotation.\n");
64054 printk("turning off the locking correctness validator.\n");
64055 dump_stack();
64056 @@ -3042,7 +3047,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
64057 if (!class)
64058 return 0;
64059 }
64060 - atomic_inc((atomic_t *)&class->ops);
64061 + atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
64062 if (very_verbose(class)) {
64063 printk("\nacquire class [%p] %s", class->key, class->name);
64064 if (class->name_version > 1)
64065 diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
64066 index 91c32a0..b2c71c5 100644
64067 --- a/kernel/lockdep_proc.c
64068 +++ b/kernel/lockdep_proc.c
64069 @@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v)
64070
64071 static void print_name(struct seq_file *m, struct lock_class *class)
64072 {
64073 - char str[128];
64074 + char str[KSYM_NAME_LEN];
64075 const char *name = class->name;
64076
64077 if (!name) {
64078 diff --git a/kernel/module.c b/kernel/module.c
64079 index 178333c..04e3408 100644
64080 --- a/kernel/module.c
64081 +++ b/kernel/module.c
64082 @@ -58,6 +58,7 @@
64083 #include <linux/jump_label.h>
64084 #include <linux/pfn.h>
64085 #include <linux/bsearch.h>
64086 +#include <linux/grsecurity.h>
64087
64088 #define CREATE_TRACE_POINTS
64089 #include <trace/events/module.h>
64090 @@ -119,7 +120,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
64091
64092 /* Bounds of module allocation, for speeding __module_address.
64093 * Protected by module_mutex. */
64094 -static unsigned long module_addr_min = -1UL, module_addr_max = 0;
64095 +static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
64096 +static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
64097
64098 int register_module_notifier(struct notifier_block * nb)
64099 {
64100 @@ -284,7 +286,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
64101 return true;
64102
64103 list_for_each_entry_rcu(mod, &modules, list) {
64104 - struct symsearch arr[] = {
64105 + struct symsearch modarr[] = {
64106 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
64107 NOT_GPL_ONLY, false },
64108 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
64109 @@ -306,7 +308,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
64110 #endif
64111 };
64112
64113 - if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
64114 + if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
64115 return true;
64116 }
64117 return false;
64118 @@ -438,7 +440,7 @@ static inline void __percpu *mod_percpu(struct module *mod)
64119 static int percpu_modalloc(struct module *mod,
64120 unsigned long size, unsigned long align)
64121 {
64122 - if (align > PAGE_SIZE) {
64123 + if (align-1 >= PAGE_SIZE) {
64124 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
64125 mod->name, align, PAGE_SIZE);
64126 align = PAGE_SIZE;
64127 @@ -1183,7 +1185,7 @@ resolve_symbol_wait(struct module *mod,
64128 */
64129 #ifdef CONFIG_SYSFS
64130
64131 -#ifdef CONFIG_KALLSYMS
64132 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
64133 static inline bool sect_empty(const Elf_Shdr *sect)
64134 {
64135 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
64136 @@ -1649,21 +1651,21 @@ static void set_section_ro_nx(void *base,
64137
64138 static void unset_module_core_ro_nx(struct module *mod)
64139 {
64140 - set_page_attributes(mod->module_core + mod->core_text_size,
64141 - mod->module_core + mod->core_size,
64142 + set_page_attributes(mod->module_core_rw,
64143 + mod->module_core_rw + mod->core_size_rw,
64144 set_memory_x);
64145 - set_page_attributes(mod->module_core,
64146 - mod->module_core + mod->core_ro_size,
64147 + set_page_attributes(mod->module_core_rx,
64148 + mod->module_core_rx + mod->core_size_rx,
64149 set_memory_rw);
64150 }
64151
64152 static void unset_module_init_ro_nx(struct module *mod)
64153 {
64154 - set_page_attributes(mod->module_init + mod->init_text_size,
64155 - mod->module_init + mod->init_size,
64156 + set_page_attributes(mod->module_init_rw,
64157 + mod->module_init_rw + mod->init_size_rw,
64158 set_memory_x);
64159 - set_page_attributes(mod->module_init,
64160 - mod->module_init + mod->init_ro_size,
64161 + set_page_attributes(mod->module_init_rx,
64162 + mod->module_init_rx + mod->init_size_rx,
64163 set_memory_rw);
64164 }
64165
64166 @@ -1674,14 +1676,14 @@ void set_all_modules_text_rw(void)
64167
64168 mutex_lock(&module_mutex);
64169 list_for_each_entry_rcu(mod, &modules, list) {
64170 - if ((mod->module_core) && (mod->core_text_size)) {
64171 - set_page_attributes(mod->module_core,
64172 - mod->module_core + mod->core_text_size,
64173 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
64174 + set_page_attributes(mod->module_core_rx,
64175 + mod->module_core_rx + mod->core_size_rx,
64176 set_memory_rw);
64177 }
64178 - if ((mod->module_init) && (mod->init_text_size)) {
64179 - set_page_attributes(mod->module_init,
64180 - mod->module_init + mod->init_text_size,
64181 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
64182 + set_page_attributes(mod->module_init_rx,
64183 + mod->module_init_rx + mod->init_size_rx,
64184 set_memory_rw);
64185 }
64186 }
64187 @@ -1695,14 +1697,14 @@ void set_all_modules_text_ro(void)
64188
64189 mutex_lock(&module_mutex);
64190 list_for_each_entry_rcu(mod, &modules, list) {
64191 - if ((mod->module_core) && (mod->core_text_size)) {
64192 - set_page_attributes(mod->module_core,
64193 - mod->module_core + mod->core_text_size,
64194 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
64195 + set_page_attributes(mod->module_core_rx,
64196 + mod->module_core_rx + mod->core_size_rx,
64197 set_memory_ro);
64198 }
64199 - if ((mod->module_init) && (mod->init_text_size)) {
64200 - set_page_attributes(mod->module_init,
64201 - mod->module_init + mod->init_text_size,
64202 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
64203 + set_page_attributes(mod->module_init_rx,
64204 + mod->module_init_rx + mod->init_size_rx,
64205 set_memory_ro);
64206 }
64207 }
64208 @@ -1748,16 +1750,19 @@ static void free_module(struct module *mod)
64209
64210 /* This may be NULL, but that's OK */
64211 unset_module_init_ro_nx(mod);
64212 - module_free(mod, mod->module_init);
64213 + module_free(mod, mod->module_init_rw);
64214 + module_free_exec(mod, mod->module_init_rx);
64215 kfree(mod->args);
64216 percpu_modfree(mod);
64217
64218 /* Free lock-classes: */
64219 - lockdep_free_key_range(mod->module_core, mod->core_size);
64220 + lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
64221 + lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
64222
64223 /* Finally, free the core (containing the module structure) */
64224 unset_module_core_ro_nx(mod);
64225 - module_free(mod, mod->module_core);
64226 + module_free_exec(mod, mod->module_core_rx);
64227 + module_free(mod, mod->module_core_rw);
64228
64229 #ifdef CONFIG_MPU
64230 update_protections(current->mm);
64231 @@ -1826,10 +1831,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
64232 unsigned int i;
64233 int ret = 0;
64234 const struct kernel_symbol *ksym;
64235 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
64236 + int is_fs_load = 0;
64237 + int register_filesystem_found = 0;
64238 + char *p;
64239 +
64240 + p = strstr(mod->args, "grsec_modharden_fs");
64241 + if (p) {
64242 + char *endptr = p + strlen("grsec_modharden_fs");
64243 + /* copy \0 as well */
64244 + memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
64245 + is_fs_load = 1;
64246 + }
64247 +#endif
64248
64249 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
64250 const char *name = info->strtab + sym[i].st_name;
64251
64252 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
64253 + /* it's a real shame this will never get ripped and copied
64254 + upstream! ;(
64255 + */
64256 + if (is_fs_load && !strcmp(name, "register_filesystem"))
64257 + register_filesystem_found = 1;
64258 +#endif
64259 +
64260 switch (sym[i].st_shndx) {
64261 case SHN_COMMON:
64262 /* We compiled with -fno-common. These are not
64263 @@ -1850,7 +1876,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
64264 ksym = resolve_symbol_wait(mod, info, name);
64265 /* Ok if resolved. */
64266 if (ksym && !IS_ERR(ksym)) {
64267 + pax_open_kernel();
64268 sym[i].st_value = ksym->value;
64269 + pax_close_kernel();
64270 break;
64271 }
64272
64273 @@ -1869,11 +1897,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
64274 secbase = (unsigned long)mod_percpu(mod);
64275 else
64276 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
64277 + pax_open_kernel();
64278 sym[i].st_value += secbase;
64279 + pax_close_kernel();
64280 break;
64281 }
64282 }
64283
64284 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
64285 + if (is_fs_load && !register_filesystem_found) {
64286 + printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
64287 + ret = -EPERM;
64288 + }
64289 +#endif
64290 +
64291 return ret;
64292 }
64293
64294 @@ -1977,22 +2014,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
64295 || s->sh_entsize != ~0UL
64296 || strstarts(sname, ".init"))
64297 continue;
64298 - s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
64299 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
64300 + s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
64301 + else
64302 + s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
64303 DEBUGP("\t%s\n", name);
64304 }
64305 - switch (m) {
64306 - case 0: /* executable */
64307 - mod->core_size = debug_align(mod->core_size);
64308 - mod->core_text_size = mod->core_size;
64309 - break;
64310 - case 1: /* RO: text and ro-data */
64311 - mod->core_size = debug_align(mod->core_size);
64312 - mod->core_ro_size = mod->core_size;
64313 - break;
64314 - case 3: /* whole core */
64315 - mod->core_size = debug_align(mod->core_size);
64316 - break;
64317 - }
64318 }
64319
64320 DEBUGP("Init section allocation order:\n");
64321 @@ -2006,23 +2033,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
64322 || s->sh_entsize != ~0UL
64323 || !strstarts(sname, ".init"))
64324 continue;
64325 - s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
64326 - | INIT_OFFSET_MASK);
64327 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
64328 + s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
64329 + else
64330 + s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
64331 + s->sh_entsize |= INIT_OFFSET_MASK;
64332 DEBUGP("\t%s\n", sname);
64333 }
64334 - switch (m) {
64335 - case 0: /* executable */
64336 - mod->init_size = debug_align(mod->init_size);
64337 - mod->init_text_size = mod->init_size;
64338 - break;
64339 - case 1: /* RO: text and ro-data */
64340 - mod->init_size = debug_align(mod->init_size);
64341 - mod->init_ro_size = mod->init_size;
64342 - break;
64343 - case 3: /* whole init */
64344 - mod->init_size = debug_align(mod->init_size);
64345 - break;
64346 - }
64347 }
64348 }
64349
64350 @@ -2187,7 +2204,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
64351
64352 /* Put symbol section at end of init part of module. */
64353 symsect->sh_flags |= SHF_ALLOC;
64354 - symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
64355 + symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
64356 info->index.sym) | INIT_OFFSET_MASK;
64357 DEBUGP("\t%s\n", info->secstrings + symsect->sh_name);
64358
64359 @@ -2204,19 +2221,19 @@ static void layout_symtab(struct module *mod, struct load_info *info)
64360 }
64361
64362 /* Append room for core symbols at end of core part. */
64363 - info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
64364 - mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
64365 + info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
64366 + mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
64367
64368 /* Put string table section at end of init part of module. */
64369 strsect->sh_flags |= SHF_ALLOC;
64370 - strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
64371 + strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
64372 info->index.str) | INIT_OFFSET_MASK;
64373 DEBUGP("\t%s\n", info->secstrings + strsect->sh_name);
64374
64375 /* Append room for core symbols' strings at end of core part. */
64376 - info->stroffs = mod->core_size;
64377 + info->stroffs = mod->core_size_rx;
64378 __set_bit(0, info->strmap);
64379 - mod->core_size += bitmap_weight(info->strmap, strsect->sh_size);
64380 + mod->core_size_rx += bitmap_weight(info->strmap, strsect->sh_size);
64381 }
64382
64383 static void add_kallsyms(struct module *mod, const struct load_info *info)
64384 @@ -2232,11 +2249,13 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
64385 /* Make sure we get permanent strtab: don't use info->strtab. */
64386 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
64387
64388 + pax_open_kernel();
64389 +
64390 /* Set types up while we still have access to sections. */
64391 for (i = 0; i < mod->num_symtab; i++)
64392 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
64393
64394 - mod->core_symtab = dst = mod->module_core + info->symoffs;
64395 + mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
64396 src = mod->symtab;
64397 *dst = *src;
64398 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
64399 @@ -2249,10 +2268,12 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
64400 }
64401 mod->core_num_syms = ndst;
64402
64403 - mod->core_strtab = s = mod->module_core + info->stroffs;
64404 + mod->core_strtab = s = mod->module_core_rx + info->stroffs;
64405 for (*s = 0, i = 1; i < info->sechdrs[info->index.str].sh_size; ++i)
64406 if (test_bit(i, info->strmap))
64407 *++s = mod->strtab[i];
64408 +
64409 + pax_close_kernel();
64410 }
64411 #else
64412 static inline void layout_symtab(struct module *mod, struct load_info *info)
64413 @@ -2286,17 +2307,33 @@ void * __weak module_alloc(unsigned long size)
64414 return size == 0 ? NULL : vmalloc_exec(size);
64415 }
64416
64417 -static void *module_alloc_update_bounds(unsigned long size)
64418 +static void *module_alloc_update_bounds_rw(unsigned long size)
64419 {
64420 void *ret = module_alloc(size);
64421
64422 if (ret) {
64423 mutex_lock(&module_mutex);
64424 /* Update module bounds. */
64425 - if ((unsigned long)ret < module_addr_min)
64426 - module_addr_min = (unsigned long)ret;
64427 - if ((unsigned long)ret + size > module_addr_max)
64428 - module_addr_max = (unsigned long)ret + size;
64429 + if ((unsigned long)ret < module_addr_min_rw)
64430 + module_addr_min_rw = (unsigned long)ret;
64431 + if ((unsigned long)ret + size > module_addr_max_rw)
64432 + module_addr_max_rw = (unsigned long)ret + size;
64433 + mutex_unlock(&module_mutex);
64434 + }
64435 + return ret;
64436 +}
64437 +
64438 +static void *module_alloc_update_bounds_rx(unsigned long size)
64439 +{
64440 + void *ret = module_alloc_exec(size);
64441 +
64442 + if (ret) {
64443 + mutex_lock(&module_mutex);
64444 + /* Update module bounds. */
64445 + if ((unsigned long)ret < module_addr_min_rx)
64446 + module_addr_min_rx = (unsigned long)ret;
64447 + if ((unsigned long)ret + size > module_addr_max_rx)
64448 + module_addr_max_rx = (unsigned long)ret + size;
64449 mutex_unlock(&module_mutex);
64450 }
64451 return ret;
64452 @@ -2474,8 +2511,14 @@ static struct module *setup_load_info(struct load_info *info)
64453 static int check_modinfo(struct module *mod, struct load_info *info)
64454 {
64455 const char *modmagic = get_modinfo(info, "vermagic");
64456 + const char *license = get_modinfo(info, "license");
64457 int err;
64458
64459 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
64460 + if (!license || !license_is_gpl_compatible(license))
64461 + return -ENOEXEC;
64462 +#endif
64463 +
64464 /* This is allowed: modprobe --force will invalidate it. */
64465 if (!modmagic) {
64466 err = try_to_force_load(mod, "bad vermagic");
64467 @@ -2498,7 +2541,7 @@ static int check_modinfo(struct module *mod, struct load_info *info)
64468 }
64469
64470 /* Set up license info based on the info section */
64471 - set_license(mod, get_modinfo(info, "license"));
64472 + set_license(mod, license);
64473
64474 return 0;
64475 }
64476 @@ -2592,7 +2635,7 @@ static int move_module(struct module *mod, struct load_info *info)
64477 void *ptr;
64478
64479 /* Do the allocs. */
64480 - ptr = module_alloc_update_bounds(mod->core_size);
64481 + ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
64482 /*
64483 * The pointer to this block is stored in the module structure
64484 * which is inside the block. Just mark it as not being a
64485 @@ -2602,23 +2645,50 @@ static int move_module(struct module *mod, struct load_info *info)
64486 if (!ptr)
64487 return -ENOMEM;
64488
64489 - memset(ptr, 0, mod->core_size);
64490 - mod->module_core = ptr;
64491 + memset(ptr, 0, mod->core_size_rw);
64492 + mod->module_core_rw = ptr;
64493
64494 - ptr = module_alloc_update_bounds(mod->init_size);
64495 + ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
64496 /*
64497 * The pointer to this block is stored in the module structure
64498 * which is inside the block. This block doesn't need to be
64499 * scanned as it contains data and code that will be freed
64500 * after the module is initialized.
64501 */
64502 - kmemleak_ignore(ptr);
64503 - if (!ptr && mod->init_size) {
64504 - module_free(mod, mod->module_core);
64505 + kmemleak_not_leak(ptr);
64506 + if (!ptr && mod->init_size_rw) {
64507 + module_free(mod, mod->module_core_rw);
64508 return -ENOMEM;
64509 }
64510 - memset(ptr, 0, mod->init_size);
64511 - mod->module_init = ptr;
64512 + memset(ptr, 0, mod->init_size_rw);
64513 + mod->module_init_rw = ptr;
64514 +
64515 + ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
64516 + kmemleak_not_leak(ptr);
64517 + if (!ptr) {
64518 + module_free(mod, mod->module_init_rw);
64519 + module_free(mod, mod->module_core_rw);
64520 + return -ENOMEM;
64521 + }
64522 +
64523 + pax_open_kernel();
64524 + memset(ptr, 0, mod->core_size_rx);
64525 + pax_close_kernel();
64526 + mod->module_core_rx = ptr;
64527 +
64528 + ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
64529 + kmemleak_not_leak(ptr);
64530 + if (!ptr && mod->init_size_rx) {
64531 + module_free_exec(mod, mod->module_core_rx);
64532 + module_free(mod, mod->module_init_rw);
64533 + module_free(mod, mod->module_core_rw);
64534 + return -ENOMEM;
64535 + }
64536 +
64537 + pax_open_kernel();
64538 + memset(ptr, 0, mod->init_size_rx);
64539 + pax_close_kernel();
64540 + mod->module_init_rx = ptr;
64541
64542 /* Transfer each section which specifies SHF_ALLOC */
64543 DEBUGP("final section addresses:\n");
64544 @@ -2629,16 +2699,45 @@ static int move_module(struct module *mod, struct load_info *info)
64545 if (!(shdr->sh_flags & SHF_ALLOC))
64546 continue;
64547
64548 - if (shdr->sh_entsize & INIT_OFFSET_MASK)
64549 - dest = mod->module_init
64550 - + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
64551 - else
64552 - dest = mod->module_core + shdr->sh_entsize;
64553 + if (shdr->sh_entsize & INIT_OFFSET_MASK) {
64554 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
64555 + dest = mod->module_init_rw
64556 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
64557 + else
64558 + dest = mod->module_init_rx
64559 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
64560 + } else {
64561 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
64562 + dest = mod->module_core_rw + shdr->sh_entsize;
64563 + else
64564 + dest = mod->module_core_rx + shdr->sh_entsize;
64565 + }
64566 +
64567 + if (shdr->sh_type != SHT_NOBITS) {
64568 +
64569 +#ifdef CONFIG_PAX_KERNEXEC
64570 +#ifdef CONFIG_X86_64
64571 + if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
64572 + set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
64573 +#endif
64574 + if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
64575 + pax_open_kernel();
64576 + memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
64577 + pax_close_kernel();
64578 + } else
64579 +#endif
64580
64581 - if (shdr->sh_type != SHT_NOBITS)
64582 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
64583 + }
64584 /* Update sh_addr to point to copy in image. */
64585 - shdr->sh_addr = (unsigned long)dest;
64586 +
64587 +#ifdef CONFIG_PAX_KERNEXEC
64588 + if (shdr->sh_flags & SHF_EXECINSTR)
64589 + shdr->sh_addr = ktva_ktla((unsigned long)dest);
64590 + else
64591 +#endif
64592 +
64593 + shdr->sh_addr = (unsigned long)dest;
64594 DEBUGP("\t0x%lx %s\n",
64595 shdr->sh_addr, info->secstrings + shdr->sh_name);
64596 }
64597 @@ -2689,12 +2788,12 @@ static void flush_module_icache(const struct module *mod)
64598 * Do it before processing of module parameters, so the module
64599 * can provide parameter accessor functions of its own.
64600 */
64601 - if (mod->module_init)
64602 - flush_icache_range((unsigned long)mod->module_init,
64603 - (unsigned long)mod->module_init
64604 - + mod->init_size);
64605 - flush_icache_range((unsigned long)mod->module_core,
64606 - (unsigned long)mod->module_core + mod->core_size);
64607 + if (mod->module_init_rx)
64608 + flush_icache_range((unsigned long)mod->module_init_rx,
64609 + (unsigned long)mod->module_init_rx
64610 + + mod->init_size_rx);
64611 + flush_icache_range((unsigned long)mod->module_core_rx,
64612 + (unsigned long)mod->module_core_rx + mod->core_size_rx);
64613
64614 set_fs(old_fs);
64615 }
64616 @@ -2774,8 +2873,10 @@ static void module_deallocate(struct module *mod, struct load_info *info)
64617 {
64618 kfree(info->strmap);
64619 percpu_modfree(mod);
64620 - module_free(mod, mod->module_init);
64621 - module_free(mod, mod->module_core);
64622 + module_free_exec(mod, mod->module_init_rx);
64623 + module_free_exec(mod, mod->module_core_rx);
64624 + module_free(mod, mod->module_init_rw);
64625 + module_free(mod, mod->module_core_rw);
64626 }
64627
64628 int __weak module_finalize(const Elf_Ehdr *hdr,
64629 @@ -2839,9 +2940,38 @@ static struct module *load_module(void __user *umod,
64630 if (err)
64631 goto free_unload;
64632
64633 + /* Now copy in args */
64634 + mod->args = strndup_user(uargs, ~0UL >> 1);
64635 + if (IS_ERR(mod->args)) {
64636 + err = PTR_ERR(mod->args);
64637 + goto free_unload;
64638 + }
64639 +
64640 /* Set up MODINFO_ATTR fields */
64641 setup_modinfo(mod, &info);
64642
64643 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
64644 + {
64645 + char *p, *p2;
64646 +
64647 + if (strstr(mod->args, "grsec_modharden_netdev")) {
64648 + printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
64649 + err = -EPERM;
64650 + goto free_modinfo;
64651 + } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
64652 + p += strlen("grsec_modharden_normal");
64653 + p2 = strstr(p, "_");
64654 + if (p2) {
64655 + *p2 = '\0';
64656 + printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
64657 + *p2 = '_';
64658 + }
64659 + err = -EPERM;
64660 + goto free_modinfo;
64661 + }
64662 + }
64663 +#endif
64664 +
64665 /* Fix up syms, so that st_value is a pointer to location. */
64666 err = simplify_symbols(mod, &info);
64667 if (err < 0)
64668 @@ -2857,13 +2987,6 @@ static struct module *load_module(void __user *umod,
64669
64670 flush_module_icache(mod);
64671
64672 - /* Now copy in args */
64673 - mod->args = strndup_user(uargs, ~0UL >> 1);
64674 - if (IS_ERR(mod->args)) {
64675 - err = PTR_ERR(mod->args);
64676 - goto free_arch_cleanup;
64677 - }
64678 -
64679 /* Mark state as coming so strong_try_module_get() ignores us. */
64680 mod->state = MODULE_STATE_COMING;
64681
64682 @@ -2921,11 +3044,10 @@ static struct module *load_module(void __user *umod,
64683 unlock:
64684 mutex_unlock(&module_mutex);
64685 synchronize_sched();
64686 - kfree(mod->args);
64687 - free_arch_cleanup:
64688 module_arch_cleanup(mod);
64689 free_modinfo:
64690 free_modinfo(mod);
64691 + kfree(mod->args);
64692 free_unload:
64693 module_unload_free(mod);
64694 free_module:
64695 @@ -2966,16 +3088,16 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
64696 MODULE_STATE_COMING, mod);
64697
64698 /* Set RO and NX regions for core */
64699 - set_section_ro_nx(mod->module_core,
64700 - mod->core_text_size,
64701 - mod->core_ro_size,
64702 - mod->core_size);
64703 + set_section_ro_nx(mod->module_core_rx,
64704 + mod->core_size_rx,
64705 + mod->core_size_rx,
64706 + mod->core_size_rx);
64707
64708 /* Set RO and NX regions for init */
64709 - set_section_ro_nx(mod->module_init,
64710 - mod->init_text_size,
64711 - mod->init_ro_size,
64712 - mod->init_size);
64713 + set_section_ro_nx(mod->module_init_rx,
64714 + mod->init_size_rx,
64715 + mod->init_size_rx,
64716 + mod->init_size_rx);
64717
64718 do_mod_ctors(mod);
64719 /* Start the module */
64720 @@ -3021,11 +3143,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
64721 mod->strtab = mod->core_strtab;
64722 #endif
64723 unset_module_init_ro_nx(mod);
64724 - module_free(mod, mod->module_init);
64725 - mod->module_init = NULL;
64726 - mod->init_size = 0;
64727 - mod->init_ro_size = 0;
64728 - mod->init_text_size = 0;
64729 + module_free(mod, mod->module_init_rw);
64730 + module_free_exec(mod, mod->module_init_rx);
64731 + mod->module_init_rw = NULL;
64732 + mod->module_init_rx = NULL;
64733 + mod->init_size_rw = 0;
64734 + mod->init_size_rx = 0;
64735 mutex_unlock(&module_mutex);
64736
64737 return 0;
64738 @@ -3056,10 +3179,16 @@ static const char *get_ksymbol(struct module *mod,
64739 unsigned long nextval;
64740
64741 /* At worse, next value is at end of module */
64742 - if (within_module_init(addr, mod))
64743 - nextval = (unsigned long)mod->module_init+mod->init_text_size;
64744 + if (within_module_init_rx(addr, mod))
64745 + nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
64746 + else if (within_module_init_rw(addr, mod))
64747 + nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
64748 + else if (within_module_core_rx(addr, mod))
64749 + nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
64750 + else if (within_module_core_rw(addr, mod))
64751 + nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
64752 else
64753 - nextval = (unsigned long)mod->module_core+mod->core_text_size;
64754 + return NULL;
64755
64756 /* Scan for closest preceding symbol, and next symbol. (ELF
64757 starts real symbols at 1). */
64758 @@ -3307,7 +3436,7 @@ static int m_show(struct seq_file *m, void *p)
64759 char buf[8];
64760
64761 seq_printf(m, "%s %u",
64762 - mod->name, mod->init_size + mod->core_size);
64763 + mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
64764 print_unload_info(m, mod);
64765
64766 /* Informative for users. */
64767 @@ -3316,7 +3445,7 @@ static int m_show(struct seq_file *m, void *p)
64768 mod->state == MODULE_STATE_COMING ? "Loading":
64769 "Live");
64770 /* Used by oprofile and other similar tools. */
64771 - seq_printf(m, " 0x%pK", mod->module_core);
64772 + seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
64773
64774 /* Taints info */
64775 if (mod->taints)
64776 @@ -3352,7 +3481,17 @@ static const struct file_operations proc_modules_operations = {
64777
64778 static int __init proc_modules_init(void)
64779 {
64780 +#ifndef CONFIG_GRKERNSEC_HIDESYM
64781 +#ifdef CONFIG_GRKERNSEC_PROC_USER
64782 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
64783 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
64784 + proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
64785 +#else
64786 proc_create("modules", 0, NULL, &proc_modules_operations);
64787 +#endif
64788 +#else
64789 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
64790 +#endif
64791 return 0;
64792 }
64793 module_init(proc_modules_init);
64794 @@ -3411,12 +3550,12 @@ struct module *__module_address(unsigned long addr)
64795 {
64796 struct module *mod;
64797
64798 - if (addr < module_addr_min || addr > module_addr_max)
64799 + if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
64800 + (addr < module_addr_min_rw || addr > module_addr_max_rw))
64801 return NULL;
64802
64803 list_for_each_entry_rcu(mod, &modules, list)
64804 - if (within_module_core(addr, mod)
64805 - || within_module_init(addr, mod))
64806 + if (within_module_init(addr, mod) || within_module_core(addr, mod))
64807 return mod;
64808 return NULL;
64809 }
64810 @@ -3450,11 +3589,20 @@ bool is_module_text_address(unsigned long addr)
64811 */
64812 struct module *__module_text_address(unsigned long addr)
64813 {
64814 - struct module *mod = __module_address(addr);
64815 + struct module *mod;
64816 +
64817 +#ifdef CONFIG_X86_32
64818 + addr = ktla_ktva(addr);
64819 +#endif
64820 +
64821 + if (addr < module_addr_min_rx || addr > module_addr_max_rx)
64822 + return NULL;
64823 +
64824 + mod = __module_address(addr);
64825 +
64826 if (mod) {
64827 /* Make sure it's within the text section. */
64828 - if (!within(addr, mod->module_init, mod->init_text_size)
64829 - && !within(addr, mod->module_core, mod->core_text_size))
64830 + if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
64831 mod = NULL;
64832 }
64833 return mod;
64834 diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
64835 index 7e3443f..b2a1e6b 100644
64836 --- a/kernel/mutex-debug.c
64837 +++ b/kernel/mutex-debug.c
64838 @@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
64839 }
64840
64841 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64842 - struct thread_info *ti)
64843 + struct task_struct *task)
64844 {
64845 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
64846
64847 /* Mark the current thread as blocked on the lock: */
64848 - ti->task->blocked_on = waiter;
64849 + task->blocked_on = waiter;
64850 }
64851
64852 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64853 - struct thread_info *ti)
64854 + struct task_struct *task)
64855 {
64856 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
64857 - DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
64858 - DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
64859 - ti->task->blocked_on = NULL;
64860 + DEBUG_LOCKS_WARN_ON(waiter->task != task);
64861 + DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
64862 + task->blocked_on = NULL;
64863
64864 list_del_init(&waiter->list);
64865 waiter->task = NULL;
64866 diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
64867 index 0799fd3..d06ae3b 100644
64868 --- a/kernel/mutex-debug.h
64869 +++ b/kernel/mutex-debug.h
64870 @@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
64871 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
64872 extern void debug_mutex_add_waiter(struct mutex *lock,
64873 struct mutex_waiter *waiter,
64874 - struct thread_info *ti);
64875 + struct task_struct *task);
64876 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64877 - struct thread_info *ti);
64878 + struct task_struct *task);
64879 extern void debug_mutex_unlock(struct mutex *lock);
64880 extern void debug_mutex_init(struct mutex *lock, const char *name,
64881 struct lock_class_key *key);
64882 diff --git a/kernel/mutex.c b/kernel/mutex.c
64883 index 89096dd..f91ebc5 100644
64884 --- a/kernel/mutex.c
64885 +++ b/kernel/mutex.c
64886 @@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
64887 spin_lock_mutex(&lock->wait_lock, flags);
64888
64889 debug_mutex_lock_common(lock, &waiter);
64890 - debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
64891 + debug_mutex_add_waiter(lock, &waiter, task);
64892
64893 /* add waiting tasks to the end of the waitqueue (FIFO): */
64894 list_add_tail(&waiter.list, &lock->wait_list);
64895 @@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
64896 * TASK_UNINTERRUPTIBLE case.)
64897 */
64898 if (unlikely(signal_pending_state(state, task))) {
64899 - mutex_remove_waiter(lock, &waiter,
64900 - task_thread_info(task));
64901 + mutex_remove_waiter(lock, &waiter, task);
64902 mutex_release(&lock->dep_map, 1, ip);
64903 spin_unlock_mutex(&lock->wait_lock, flags);
64904
64905 @@ -249,7 +248,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
64906 done:
64907 lock_acquired(&lock->dep_map, ip);
64908 /* got the lock - rejoice! */
64909 - mutex_remove_waiter(lock, &waiter, current_thread_info());
64910 + mutex_remove_waiter(lock, &waiter, task);
64911 mutex_set_owner(lock);
64912
64913 /* set it to 0 if there are no waiters left: */
64914 diff --git a/kernel/padata.c b/kernel/padata.c
64915 index b452599..5d68f4e 100644
64916 --- a/kernel/padata.c
64917 +++ b/kernel/padata.c
64918 @@ -132,10 +132,10 @@ int padata_do_parallel(struct padata_instance *pinst,
64919 padata->pd = pd;
64920 padata->cb_cpu = cb_cpu;
64921
64922 - if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr))
64923 - atomic_set(&pd->seq_nr, -1);
64924 + if (unlikely(atomic_read_unchecked(&pd->seq_nr) == pd->max_seq_nr))
64925 + atomic_set_unchecked(&pd->seq_nr, -1);
64926
64927 - padata->seq_nr = atomic_inc_return(&pd->seq_nr);
64928 + padata->seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
64929
64930 target_cpu = padata_cpu_hash(padata);
64931 queue = per_cpu_ptr(pd->pqueue, target_cpu);
64932 @@ -444,7 +444,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
64933 padata_init_pqueues(pd);
64934 padata_init_squeues(pd);
64935 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
64936 - atomic_set(&pd->seq_nr, -1);
64937 + atomic_set_unchecked(&pd->seq_nr, -1);
64938 atomic_set(&pd->reorder_objects, 0);
64939 atomic_set(&pd->refcnt, 0);
64940 pd->pinst = pinst;
64941 diff --git a/kernel/panic.c b/kernel/panic.c
64942 index b2659360..5972a0f 100644
64943 --- a/kernel/panic.c
64944 +++ b/kernel/panic.c
64945 @@ -78,7 +78,11 @@ NORET_TYPE void panic(const char * fmt, ...)
64946 va_end(args);
64947 printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf);
64948 #ifdef CONFIG_DEBUG_BUGVERBOSE
64949 - dump_stack();
64950 + /*
64951 + * Avoid nested stack-dumping if a panic occurs during oops processing
64952 + */
64953 + if (!oops_in_progress)
64954 + dump_stack();
64955 #endif
64956
64957 /*
64958 @@ -373,7 +377,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
64959 const char *board;
64960
64961 printk(KERN_WARNING "------------[ cut here ]------------\n");
64962 - printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
64963 + printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
64964 board = dmi_get_system_info(DMI_PRODUCT_NAME);
64965 if (board)
64966 printk(KERN_WARNING "Hardware name: %s\n", board);
64967 @@ -428,7 +432,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
64968 */
64969 void __stack_chk_fail(void)
64970 {
64971 - panic("stack-protector: Kernel stack is corrupted in: %p\n",
64972 + dump_stack();
64973 + panic("stack-protector: Kernel stack is corrupted in: %pA\n",
64974 __builtin_return_address(0));
64975 }
64976 EXPORT_SYMBOL(__stack_chk_fail);
64977 diff --git a/kernel/pid.c b/kernel/pid.c
64978 index fa5f722..0c93e57 100644
64979 --- a/kernel/pid.c
64980 +++ b/kernel/pid.c
64981 @@ -33,6 +33,7 @@
64982 #include <linux/rculist.h>
64983 #include <linux/bootmem.h>
64984 #include <linux/hash.h>
64985 +#include <linux/security.h>
64986 #include <linux/pid_namespace.h>
64987 #include <linux/init_task.h>
64988 #include <linux/syscalls.h>
64989 @@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
64990
64991 int pid_max = PID_MAX_DEFAULT;
64992
64993 -#define RESERVED_PIDS 300
64994 +#define RESERVED_PIDS 500
64995
64996 int pid_max_min = RESERVED_PIDS + 1;
64997 int pid_max_max = PID_MAX_LIMIT;
64998 @@ -418,10 +419,18 @@ EXPORT_SYMBOL(pid_task);
64999 */
65000 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
65001 {
65002 + struct task_struct *task;
65003 +
65004 rcu_lockdep_assert(rcu_read_lock_held(),
65005 "find_task_by_pid_ns() needs rcu_read_lock()"
65006 " protection");
65007 - return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
65008 +
65009 + task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
65010 +
65011 + if (gr_pid_is_chrooted(task))
65012 + return NULL;
65013 +
65014 + return task;
65015 }
65016
65017 struct task_struct *find_task_by_vpid(pid_t vnr)
65018 @@ -429,6 +438,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
65019 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
65020 }
65021
65022 +struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
65023 +{
65024 + rcu_lockdep_assert(rcu_read_lock_held(),
65025 + "find_task_by_pid_ns() needs rcu_read_lock()"
65026 + " protection");
65027 + return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
65028 +}
65029 +
65030 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
65031 {
65032 struct pid *pid;
65033 diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
65034 index e7cb76d..75eceb3 100644
65035 --- a/kernel/posix-cpu-timers.c
65036 +++ b/kernel/posix-cpu-timers.c
65037 @@ -6,6 +6,7 @@
65038 #include <linux/posix-timers.h>
65039 #include <linux/errno.h>
65040 #include <linux/math64.h>
65041 +#include <linux/security.h>
65042 #include <asm/uaccess.h>
65043 #include <linux/kernel_stat.h>
65044 #include <trace/events/timer.h>
65045 @@ -1606,14 +1607,14 @@ struct k_clock clock_posix_cpu = {
65046
65047 static __init int init_posix_cpu_timers(void)
65048 {
65049 - struct k_clock process = {
65050 + static struct k_clock process = {
65051 .clock_getres = process_cpu_clock_getres,
65052 .clock_get = process_cpu_clock_get,
65053 .timer_create = process_cpu_timer_create,
65054 .nsleep = process_cpu_nsleep,
65055 .nsleep_restart = process_cpu_nsleep_restart,
65056 };
65057 - struct k_clock thread = {
65058 + static struct k_clock thread = {
65059 .clock_getres = thread_cpu_clock_getres,
65060 .clock_get = thread_cpu_clock_get,
65061 .timer_create = thread_cpu_timer_create,
65062 diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
65063 index 69185ae..cc2847a 100644
65064 --- a/kernel/posix-timers.c
65065 +++ b/kernel/posix-timers.c
65066 @@ -43,6 +43,7 @@
65067 #include <linux/idr.h>
65068 #include <linux/posix-clock.h>
65069 #include <linux/posix-timers.h>
65070 +#include <linux/grsecurity.h>
65071 #include <linux/syscalls.h>
65072 #include <linux/wait.h>
65073 #include <linux/workqueue.h>
65074 @@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
65075 * which we beg off on and pass to do_sys_settimeofday().
65076 */
65077
65078 -static struct k_clock posix_clocks[MAX_CLOCKS];
65079 +static struct k_clock *posix_clocks[MAX_CLOCKS];
65080
65081 /*
65082 * These ones are defined below.
65083 @@ -227,7 +228,7 @@ static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp)
65084 */
65085 static __init int init_posix_timers(void)
65086 {
65087 - struct k_clock clock_realtime = {
65088 + static struct k_clock clock_realtime = {
65089 .clock_getres = hrtimer_get_res,
65090 .clock_get = posix_clock_realtime_get,
65091 .clock_set = posix_clock_realtime_set,
65092 @@ -239,7 +240,7 @@ static __init int init_posix_timers(void)
65093 .timer_get = common_timer_get,
65094 .timer_del = common_timer_del,
65095 };
65096 - struct k_clock clock_monotonic = {
65097 + static struct k_clock clock_monotonic = {
65098 .clock_getres = hrtimer_get_res,
65099 .clock_get = posix_ktime_get_ts,
65100 .nsleep = common_nsleep,
65101 @@ -249,19 +250,19 @@ static __init int init_posix_timers(void)
65102 .timer_get = common_timer_get,
65103 .timer_del = common_timer_del,
65104 };
65105 - struct k_clock clock_monotonic_raw = {
65106 + static struct k_clock clock_monotonic_raw = {
65107 .clock_getres = hrtimer_get_res,
65108 .clock_get = posix_get_monotonic_raw,
65109 };
65110 - struct k_clock clock_realtime_coarse = {
65111 + static struct k_clock clock_realtime_coarse = {
65112 .clock_getres = posix_get_coarse_res,
65113 .clock_get = posix_get_realtime_coarse,
65114 };
65115 - struct k_clock clock_monotonic_coarse = {
65116 + static struct k_clock clock_monotonic_coarse = {
65117 .clock_getres = posix_get_coarse_res,
65118 .clock_get = posix_get_monotonic_coarse,
65119 };
65120 - struct k_clock clock_boottime = {
65121 + static struct k_clock clock_boottime = {
65122 .clock_getres = hrtimer_get_res,
65123 .clock_get = posix_get_boottime,
65124 .nsleep = common_nsleep,
65125 @@ -473,7 +474,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
65126 return;
65127 }
65128
65129 - posix_clocks[clock_id] = *new_clock;
65130 + posix_clocks[clock_id] = new_clock;
65131 }
65132 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
65133
65134 @@ -519,9 +520,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
65135 return (id & CLOCKFD_MASK) == CLOCKFD ?
65136 &clock_posix_dynamic : &clock_posix_cpu;
65137
65138 - if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
65139 + if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
65140 return NULL;
65141 - return &posix_clocks[id];
65142 + return posix_clocks[id];
65143 }
65144
65145 static int common_timer_create(struct k_itimer *new_timer)
65146 @@ -959,6 +960,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
65147 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
65148 return -EFAULT;
65149
65150 + /* only the CLOCK_REALTIME clock can be set, all other clocks
65151 + have their clock_set fptr set to a nosettime dummy function
65152 + CLOCK_REALTIME has a NULL clock_set fptr which causes it to
65153 + call common_clock_set, which calls do_sys_settimeofday, which
65154 + we hook
65155 + */
65156 +
65157 return kc->clock_set(which_clock, &new_tp);
65158 }
65159
65160 diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
65161 index d523593..68197a4 100644
65162 --- a/kernel/power/poweroff.c
65163 +++ b/kernel/power/poweroff.c
65164 @@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_poweroff_op = {
65165 .enable_mask = SYSRQ_ENABLE_BOOT,
65166 };
65167
65168 -static int pm_sysrq_init(void)
65169 +static int __init pm_sysrq_init(void)
65170 {
65171 register_sysrq_key('o', &sysrq_poweroff_op);
65172 return 0;
65173 diff --git a/kernel/power/process.c b/kernel/power/process.c
65174 index addbbe5..f9e32e0 100644
65175 --- a/kernel/power/process.c
65176 +++ b/kernel/power/process.c
65177 @@ -41,6 +41,7 @@ static int try_to_freeze_tasks(bool sig_only)
65178 u64 elapsed_csecs64;
65179 unsigned int elapsed_csecs;
65180 bool wakeup = false;
65181 + bool timedout = false;
65182
65183 do_gettimeofday(&start);
65184
65185 @@ -51,6 +52,8 @@ static int try_to_freeze_tasks(bool sig_only)
65186
65187 while (true) {
65188 todo = 0;
65189 + if (time_after(jiffies, end_time))
65190 + timedout = true;
65191 read_lock(&tasklist_lock);
65192 do_each_thread(g, p) {
65193 if (frozen(p) || !freezable(p))
65194 @@ -71,9 +74,13 @@ static int try_to_freeze_tasks(bool sig_only)
65195 * try_to_stop() after schedule() in ptrace/signal
65196 * stop sees TIF_FREEZE.
65197 */
65198 - if (!task_is_stopped_or_traced(p) &&
65199 - !freezer_should_skip(p))
65200 + if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
65201 todo++;
65202 + if (timedout) {
65203 + printk(KERN_ERR "Task refusing to freeze:\n");
65204 + sched_show_task(p);
65205 + }
65206 + }
65207 } while_each_thread(g, p);
65208 read_unlock(&tasklist_lock);
65209
65210 @@ -82,7 +89,7 @@ static int try_to_freeze_tasks(bool sig_only)
65211 todo += wq_busy;
65212 }
65213
65214 - if (!todo || time_after(jiffies, end_time))
65215 + if (!todo || timedout)
65216 break;
65217
65218 if (pm_wakeup_pending()) {
65219 diff --git a/kernel/printk.c b/kernel/printk.c
65220 index 7982a0a..2095fdc 100644
65221 --- a/kernel/printk.c
65222 +++ b/kernel/printk.c
65223 @@ -313,6 +313,11 @@ static int check_syslog_permissions(int type, bool from_file)
65224 if (from_file && type != SYSLOG_ACTION_OPEN)
65225 return 0;
65226
65227 +#ifdef CONFIG_GRKERNSEC_DMESG
65228 + if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
65229 + return -EPERM;
65230 +#endif
65231 +
65232 if (syslog_action_restricted(type)) {
65233 if (capable(CAP_SYSLOG))
65234 return 0;
65235 diff --git a/kernel/profile.c b/kernel/profile.c
65236 index 76b8e77..a2930e8 100644
65237 --- a/kernel/profile.c
65238 +++ b/kernel/profile.c
65239 @@ -39,7 +39,7 @@ struct profile_hit {
65240 /* Oprofile timer tick hook */
65241 static int (*timer_hook)(struct pt_regs *) __read_mostly;
65242
65243 -static atomic_t *prof_buffer;
65244 +static atomic_unchecked_t *prof_buffer;
65245 static unsigned long prof_len, prof_shift;
65246
65247 int prof_on __read_mostly;
65248 @@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
65249 hits[i].pc = 0;
65250 continue;
65251 }
65252 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
65253 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
65254 hits[i].hits = hits[i].pc = 0;
65255 }
65256 }
65257 @@ -342,9 +342,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
65258 * Add the current hit(s) and flush the write-queue out
65259 * to the global buffer:
65260 */
65261 - atomic_add(nr_hits, &prof_buffer[pc]);
65262 + atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
65263 for (i = 0; i < NR_PROFILE_HIT; ++i) {
65264 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
65265 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
65266 hits[i].pc = hits[i].hits = 0;
65267 }
65268 out:
65269 @@ -419,7 +419,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
65270 {
65271 unsigned long pc;
65272 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
65273 - atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
65274 + atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
65275 }
65276 #endif /* !CONFIG_SMP */
65277
65278 @@ -517,7 +517,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
65279 return -EFAULT;
65280 buf++; p++; count--; read++;
65281 }
65282 - pnt = (char *)prof_buffer + p - sizeof(atomic_t);
65283 + pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
65284 if (copy_to_user(buf, (void *)pnt, count))
65285 return -EFAULT;
65286 read += count;
65287 @@ -548,7 +548,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
65288 }
65289 #endif
65290 profile_discard_flip_buffers();
65291 - memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
65292 + memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
65293 return count;
65294 }
65295
65296 diff --git a/kernel/ptrace.c b/kernel/ptrace.c
65297 index 78ab24a..332c915 100644
65298 --- a/kernel/ptrace.c
65299 +++ b/kernel/ptrace.c
65300 @@ -172,7 +172,8 @@ int ptrace_check_attach(struct task_struct *child, bool ignore_state)
65301 return ret;
65302 }
65303
65304 -int __ptrace_may_access(struct task_struct *task, unsigned int mode)
65305 +static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
65306 + unsigned int log)
65307 {
65308 const struct cred *cred = current_cred(), *tcred;
65309
65310 @@ -198,7 +199,8 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
65311 cred->gid == tcred->sgid &&
65312 cred->gid == tcred->gid))
65313 goto ok;
65314 - if (ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE))
65315 + if ((!log && ns_capable_nolog(tcred->user->user_ns, CAP_SYS_PTRACE)) ||
65316 + (log && ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE)))
65317 goto ok;
65318 rcu_read_unlock();
65319 return -EPERM;
65320 @@ -207,7 +209,9 @@ ok:
65321 smp_rmb();
65322 if (task->mm)
65323 dumpable = get_dumpable(task->mm);
65324 - if (!dumpable && !task_ns_capable(task, CAP_SYS_PTRACE))
65325 + if (!dumpable &&
65326 + ((!log && !task_ns_capable_nolog(task, CAP_SYS_PTRACE)) ||
65327 + (log && !task_ns_capable(task, CAP_SYS_PTRACE))))
65328 return -EPERM;
65329
65330 return security_ptrace_access_check(task, mode);
65331 @@ -217,7 +221,21 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode)
65332 {
65333 int err;
65334 task_lock(task);
65335 - err = __ptrace_may_access(task, mode);
65336 + err = __ptrace_may_access(task, mode, 0);
65337 + task_unlock(task);
65338 + return !err;
65339 +}
65340 +
65341 +bool ptrace_may_access_nolock(struct task_struct *task, unsigned int mode)
65342 +{
65343 + return __ptrace_may_access(task, mode, 0);
65344 +}
65345 +
65346 +bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
65347 +{
65348 + int err;
65349 + task_lock(task);
65350 + err = __ptrace_may_access(task, mode, 1);
65351 task_unlock(task);
65352 return !err;
65353 }
65354 @@ -262,7 +280,7 @@ static int ptrace_attach(struct task_struct *task, long request,
65355 goto out;
65356
65357 task_lock(task);
65358 - retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
65359 + retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
65360 task_unlock(task);
65361 if (retval)
65362 goto unlock_creds;
65363 @@ -277,7 +295,7 @@ static int ptrace_attach(struct task_struct *task, long request,
65364 task->ptrace = PT_PTRACED;
65365 if (seize)
65366 task->ptrace |= PT_SEIZED;
65367 - if (task_ns_capable(task, CAP_SYS_PTRACE))
65368 + if (task_ns_capable_nolog(task, CAP_SYS_PTRACE))
65369 task->ptrace |= PT_PTRACE_CAP;
65370
65371 __ptrace_link(task, current);
65372 @@ -483,7 +501,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
65373 break;
65374 return -EIO;
65375 }
65376 - if (copy_to_user(dst, buf, retval))
65377 + if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
65378 return -EFAULT;
65379 copied += retval;
65380 src += retval;
65381 @@ -680,7 +698,7 @@ int ptrace_request(struct task_struct *child, long request,
65382 bool seized = child->ptrace & PT_SEIZED;
65383 int ret = -EIO;
65384 siginfo_t siginfo, *si;
65385 - void __user *datavp = (void __user *) data;
65386 + void __user *datavp = (__force void __user *) data;
65387 unsigned long __user *datalp = datavp;
65388 unsigned long flags;
65389
65390 @@ -882,14 +900,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
65391 goto out;
65392 }
65393
65394 + if (gr_handle_ptrace(child, request)) {
65395 + ret = -EPERM;
65396 + goto out_put_task_struct;
65397 + }
65398 +
65399 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
65400 ret = ptrace_attach(child, request, data);
65401 /*
65402 * Some architectures need to do book-keeping after
65403 * a ptrace attach.
65404 */
65405 - if (!ret)
65406 + if (!ret) {
65407 arch_ptrace_attach(child);
65408 + gr_audit_ptrace(child);
65409 + }
65410 goto out_put_task_struct;
65411 }
65412
65413 @@ -915,7 +940,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
65414 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
65415 if (copied != sizeof(tmp))
65416 return -EIO;
65417 - return put_user(tmp, (unsigned long __user *)data);
65418 + return put_user(tmp, (__force unsigned long __user *)data);
65419 }
65420
65421 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
65422 @@ -1025,14 +1050,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
65423 goto out;
65424 }
65425
65426 + if (gr_handle_ptrace(child, request)) {
65427 + ret = -EPERM;
65428 + goto out_put_task_struct;
65429 + }
65430 +
65431 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
65432 ret = ptrace_attach(child, request, data);
65433 /*
65434 * Some architectures need to do book-keeping after
65435 * a ptrace attach.
65436 */
65437 - if (!ret)
65438 + if (!ret) {
65439 arch_ptrace_attach(child);
65440 + gr_audit_ptrace(child);
65441 + }
65442 goto out_put_task_struct;
65443 }
65444
65445 diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
65446 index 764825c..3aa6ac4 100644
65447 --- a/kernel/rcutorture.c
65448 +++ b/kernel/rcutorture.c
65449 @@ -138,12 +138,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
65450 { 0 };
65451 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
65452 { 0 };
65453 -static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
65454 -static atomic_t n_rcu_torture_alloc;
65455 -static atomic_t n_rcu_torture_alloc_fail;
65456 -static atomic_t n_rcu_torture_free;
65457 -static atomic_t n_rcu_torture_mberror;
65458 -static atomic_t n_rcu_torture_error;
65459 +static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
65460 +static atomic_unchecked_t n_rcu_torture_alloc;
65461 +static atomic_unchecked_t n_rcu_torture_alloc_fail;
65462 +static atomic_unchecked_t n_rcu_torture_free;
65463 +static atomic_unchecked_t n_rcu_torture_mberror;
65464 +static atomic_unchecked_t n_rcu_torture_error;
65465 static long n_rcu_torture_boost_ktrerror;
65466 static long n_rcu_torture_boost_rterror;
65467 static long n_rcu_torture_boost_failure;
65468 @@ -223,11 +223,11 @@ rcu_torture_alloc(void)
65469
65470 spin_lock_bh(&rcu_torture_lock);
65471 if (list_empty(&rcu_torture_freelist)) {
65472 - atomic_inc(&n_rcu_torture_alloc_fail);
65473 + atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
65474 spin_unlock_bh(&rcu_torture_lock);
65475 return NULL;
65476 }
65477 - atomic_inc(&n_rcu_torture_alloc);
65478 + atomic_inc_unchecked(&n_rcu_torture_alloc);
65479 p = rcu_torture_freelist.next;
65480 list_del_init(p);
65481 spin_unlock_bh(&rcu_torture_lock);
65482 @@ -240,7 +240,7 @@ rcu_torture_alloc(void)
65483 static void
65484 rcu_torture_free(struct rcu_torture *p)
65485 {
65486 - atomic_inc(&n_rcu_torture_free);
65487 + atomic_inc_unchecked(&n_rcu_torture_free);
65488 spin_lock_bh(&rcu_torture_lock);
65489 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
65490 spin_unlock_bh(&rcu_torture_lock);
65491 @@ -360,7 +360,7 @@ rcu_torture_cb(struct rcu_head *p)
65492 i = rp->rtort_pipe_count;
65493 if (i > RCU_TORTURE_PIPE_LEN)
65494 i = RCU_TORTURE_PIPE_LEN;
65495 - atomic_inc(&rcu_torture_wcount[i]);
65496 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
65497 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
65498 rp->rtort_mbtest = 0;
65499 rcu_torture_free(rp);
65500 @@ -407,7 +407,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
65501 i = rp->rtort_pipe_count;
65502 if (i > RCU_TORTURE_PIPE_LEN)
65503 i = RCU_TORTURE_PIPE_LEN;
65504 - atomic_inc(&rcu_torture_wcount[i]);
65505 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
65506 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
65507 rp->rtort_mbtest = 0;
65508 list_del(&rp->rtort_free);
65509 @@ -872,7 +872,7 @@ rcu_torture_writer(void *arg)
65510 i = old_rp->rtort_pipe_count;
65511 if (i > RCU_TORTURE_PIPE_LEN)
65512 i = RCU_TORTURE_PIPE_LEN;
65513 - atomic_inc(&rcu_torture_wcount[i]);
65514 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
65515 old_rp->rtort_pipe_count++;
65516 cur_ops->deferred_free(old_rp);
65517 }
65518 @@ -940,7 +940,7 @@ static void rcu_torture_timer(unsigned long unused)
65519 return;
65520 }
65521 if (p->rtort_mbtest == 0)
65522 - atomic_inc(&n_rcu_torture_mberror);
65523 + atomic_inc_unchecked(&n_rcu_torture_mberror);
65524 spin_lock(&rand_lock);
65525 cur_ops->read_delay(&rand);
65526 n_rcu_torture_timers++;
65527 @@ -1001,7 +1001,7 @@ rcu_torture_reader(void *arg)
65528 continue;
65529 }
65530 if (p->rtort_mbtest == 0)
65531 - atomic_inc(&n_rcu_torture_mberror);
65532 + atomic_inc_unchecked(&n_rcu_torture_mberror);
65533 cur_ops->read_delay(&rand);
65534 preempt_disable();
65535 pipe_count = p->rtort_pipe_count;
65536 @@ -1060,16 +1060,16 @@ rcu_torture_printk(char *page)
65537 rcu_torture_current,
65538 rcu_torture_current_version,
65539 list_empty(&rcu_torture_freelist),
65540 - atomic_read(&n_rcu_torture_alloc),
65541 - atomic_read(&n_rcu_torture_alloc_fail),
65542 - atomic_read(&n_rcu_torture_free),
65543 - atomic_read(&n_rcu_torture_mberror),
65544 + atomic_read_unchecked(&n_rcu_torture_alloc),
65545 + atomic_read_unchecked(&n_rcu_torture_alloc_fail),
65546 + atomic_read_unchecked(&n_rcu_torture_free),
65547 + atomic_read_unchecked(&n_rcu_torture_mberror),
65548 n_rcu_torture_boost_ktrerror,
65549 n_rcu_torture_boost_rterror,
65550 n_rcu_torture_boost_failure,
65551 n_rcu_torture_boosts,
65552 n_rcu_torture_timers);
65553 - if (atomic_read(&n_rcu_torture_mberror) != 0 ||
65554 + if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
65555 n_rcu_torture_boost_ktrerror != 0 ||
65556 n_rcu_torture_boost_rterror != 0 ||
65557 n_rcu_torture_boost_failure != 0)
65558 @@ -1077,7 +1077,7 @@ rcu_torture_printk(char *page)
65559 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
65560 if (i > 1) {
65561 cnt += sprintf(&page[cnt], "!!! ");
65562 - atomic_inc(&n_rcu_torture_error);
65563 + atomic_inc_unchecked(&n_rcu_torture_error);
65564 WARN_ON_ONCE(1);
65565 }
65566 cnt += sprintf(&page[cnt], "Reader Pipe: ");
65567 @@ -1091,7 +1091,7 @@ rcu_torture_printk(char *page)
65568 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
65569 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
65570 cnt += sprintf(&page[cnt], " %d",
65571 - atomic_read(&rcu_torture_wcount[i]));
65572 + atomic_read_unchecked(&rcu_torture_wcount[i]));
65573 }
65574 cnt += sprintf(&page[cnt], "\n");
65575 if (cur_ops->stats)
65576 @@ -1401,7 +1401,7 @@ rcu_torture_cleanup(void)
65577
65578 if (cur_ops->cleanup)
65579 cur_ops->cleanup();
65580 - if (atomic_read(&n_rcu_torture_error))
65581 + if (atomic_read_unchecked(&n_rcu_torture_error))
65582 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
65583 else
65584 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
65585 @@ -1465,17 +1465,17 @@ rcu_torture_init(void)
65586
65587 rcu_torture_current = NULL;
65588 rcu_torture_current_version = 0;
65589 - atomic_set(&n_rcu_torture_alloc, 0);
65590 - atomic_set(&n_rcu_torture_alloc_fail, 0);
65591 - atomic_set(&n_rcu_torture_free, 0);
65592 - atomic_set(&n_rcu_torture_mberror, 0);
65593 - atomic_set(&n_rcu_torture_error, 0);
65594 + atomic_set_unchecked(&n_rcu_torture_alloc, 0);
65595 + atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
65596 + atomic_set_unchecked(&n_rcu_torture_free, 0);
65597 + atomic_set_unchecked(&n_rcu_torture_mberror, 0);
65598 + atomic_set_unchecked(&n_rcu_torture_error, 0);
65599 n_rcu_torture_boost_ktrerror = 0;
65600 n_rcu_torture_boost_rterror = 0;
65601 n_rcu_torture_boost_failure = 0;
65602 n_rcu_torture_boosts = 0;
65603 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
65604 - atomic_set(&rcu_torture_wcount[i], 0);
65605 + atomic_set_unchecked(&rcu_torture_wcount[i], 0);
65606 for_each_possible_cpu(cpu) {
65607 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
65608 per_cpu(rcu_torture_count, cpu)[i] = 0;
65609 diff --git a/kernel/rcutree.c b/kernel/rcutree.c
65610 index 6b76d81..7afc1b3 100644
65611 --- a/kernel/rcutree.c
65612 +++ b/kernel/rcutree.c
65613 @@ -367,9 +367,9 @@ void rcu_enter_nohz(void)
65614 trace_rcu_dyntick("Start");
65615 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
65616 smp_mb__before_atomic_inc(); /* See above. */
65617 - atomic_inc(&rdtp->dynticks);
65618 + atomic_inc_unchecked(&rdtp->dynticks);
65619 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
65620 - WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
65621 + WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
65622 local_irq_restore(flags);
65623 }
65624
65625 @@ -391,10 +391,10 @@ void rcu_exit_nohz(void)
65626 return;
65627 }
65628 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
65629 - atomic_inc(&rdtp->dynticks);
65630 + atomic_inc_unchecked(&rdtp->dynticks);
65631 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
65632 smp_mb__after_atomic_inc(); /* See above. */
65633 - WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
65634 + WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
65635 trace_rcu_dyntick("End");
65636 local_irq_restore(flags);
65637 }
65638 @@ -411,14 +411,14 @@ void rcu_nmi_enter(void)
65639 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
65640
65641 if (rdtp->dynticks_nmi_nesting == 0 &&
65642 - (atomic_read(&rdtp->dynticks) & 0x1))
65643 + (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
65644 return;
65645 rdtp->dynticks_nmi_nesting++;
65646 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
65647 - atomic_inc(&rdtp->dynticks);
65648 + atomic_inc_unchecked(&rdtp->dynticks);
65649 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
65650 smp_mb__after_atomic_inc(); /* See above. */
65651 - WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
65652 + WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
65653 }
65654
65655 /**
65656 @@ -437,9 +437,9 @@ void rcu_nmi_exit(void)
65657 return;
65658 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
65659 smp_mb__before_atomic_inc(); /* See above. */
65660 - atomic_inc(&rdtp->dynticks);
65661 + atomic_inc_unchecked(&rdtp->dynticks);
65662 smp_mb__after_atomic_inc(); /* Force delay to next write. */
65663 - WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
65664 + WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
65665 }
65666
65667 /**
65668 @@ -474,7 +474,7 @@ void rcu_irq_exit(void)
65669 */
65670 static int dyntick_save_progress_counter(struct rcu_data *rdp)
65671 {
65672 - rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
65673 + rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
65674 return 0;
65675 }
65676
65677 @@ -489,7 +489,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
65678 unsigned int curr;
65679 unsigned int snap;
65680
65681 - curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
65682 + curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
65683 snap = (unsigned int)rdp->dynticks_snap;
65684
65685 /*
65686 @@ -1552,7 +1552,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
65687 /*
65688 * Do RCU core processing for the current CPU.
65689 */
65690 -static void rcu_process_callbacks(struct softirq_action *unused)
65691 +static void rcu_process_callbacks(void)
65692 {
65693 trace_rcu_utilization("Start RCU core");
65694 __rcu_process_callbacks(&rcu_sched_state,
65695 diff --git a/kernel/rcutree.h b/kernel/rcutree.h
65696 index 849ce9e..74bc9de 100644
65697 --- a/kernel/rcutree.h
65698 +++ b/kernel/rcutree.h
65699 @@ -86,7 +86,7 @@
65700 struct rcu_dynticks {
65701 int dynticks_nesting; /* Track irq/process nesting level. */
65702 int dynticks_nmi_nesting; /* Track NMI nesting level. */
65703 - atomic_t dynticks; /* Even value for dynticks-idle, else odd. */
65704 + atomic_unchecked_t dynticks; /* Even value for dynticks-idle, else odd. */
65705 };
65706
65707 /* RCU's kthread states for tracing. */
65708 diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
65709 index 4b9b9f8..2326053 100644
65710 --- a/kernel/rcutree_plugin.h
65711 +++ b/kernel/rcutree_plugin.h
65712 @@ -842,7 +842,7 @@ void synchronize_rcu_expedited(void)
65713
65714 /* Clean up and exit. */
65715 smp_mb(); /* ensure expedited GP seen before counter increment. */
65716 - ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
65717 + ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
65718 unlock_mb_ret:
65719 mutex_unlock(&sync_rcu_preempt_exp_mutex);
65720 mb_ret:
65721 @@ -1815,8 +1815,8 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
65722
65723 #else /* #ifndef CONFIG_SMP */
65724
65725 -static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
65726 -static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
65727 +static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
65728 +static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
65729
65730 static int synchronize_sched_expedited_cpu_stop(void *data)
65731 {
65732 @@ -1871,7 +1871,7 @@ void synchronize_sched_expedited(void)
65733 int firstsnap, s, snap, trycount = 0;
65734
65735 /* Note that atomic_inc_return() implies full memory barrier. */
65736 - firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
65737 + firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
65738 get_online_cpus();
65739
65740 /*
65741 @@ -1892,7 +1892,7 @@ void synchronize_sched_expedited(void)
65742 }
65743
65744 /* Check to see if someone else did our work for us. */
65745 - s = atomic_read(&sync_sched_expedited_done);
65746 + s = atomic_read_unchecked(&sync_sched_expedited_done);
65747 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
65748 smp_mb(); /* ensure test happens before caller kfree */
65749 return;
65750 @@ -1907,7 +1907,7 @@ void synchronize_sched_expedited(void)
65751 * grace period works for us.
65752 */
65753 get_online_cpus();
65754 - snap = atomic_read(&sync_sched_expedited_started) - 1;
65755 + snap = atomic_read_unchecked(&sync_sched_expedited_started) - 1;
65756 smp_mb(); /* ensure read is before try_stop_cpus(). */
65757 }
65758
65759 @@ -1918,12 +1918,12 @@ void synchronize_sched_expedited(void)
65760 * than we did beat us to the punch.
65761 */
65762 do {
65763 - s = atomic_read(&sync_sched_expedited_done);
65764 + s = atomic_read_unchecked(&sync_sched_expedited_done);
65765 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
65766 smp_mb(); /* ensure test happens before caller kfree */
65767 break;
65768 }
65769 - } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
65770 + } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
65771
65772 put_online_cpus();
65773 }
65774 @@ -1985,7 +1985,7 @@ int rcu_needs_cpu(int cpu)
65775 for_each_online_cpu(thatcpu) {
65776 if (thatcpu == cpu)
65777 continue;
65778 - snap = atomic_add_return(0, &per_cpu(rcu_dynticks,
65779 + snap = atomic_add_return_unchecked(0, &per_cpu(rcu_dynticks,
65780 thatcpu).dynticks);
65781 smp_mb(); /* Order sampling of snap with end of grace period. */
65782 if ((snap & 0x1) != 0) {
65783 diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
65784 index 9feffa4..54058df 100644
65785 --- a/kernel/rcutree_trace.c
65786 +++ b/kernel/rcutree_trace.c
65787 @@ -69,7 +69,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
65788 rdp->qs_pending);
65789 #ifdef CONFIG_NO_HZ
65790 seq_printf(m, " dt=%d/%d/%d df=%lu",
65791 - atomic_read(&rdp->dynticks->dynticks),
65792 + atomic_read_unchecked(&rdp->dynticks->dynticks),
65793 rdp->dynticks->dynticks_nesting,
65794 rdp->dynticks->dynticks_nmi_nesting,
65795 rdp->dynticks_fqs);
65796 @@ -143,7 +143,7 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
65797 rdp->qs_pending);
65798 #ifdef CONFIG_NO_HZ
65799 seq_printf(m, ",%d,%d,%d,%lu",
65800 - atomic_read(&rdp->dynticks->dynticks),
65801 + atomic_read_unchecked(&rdp->dynticks->dynticks),
65802 rdp->dynticks->dynticks_nesting,
65803 rdp->dynticks->dynticks_nmi_nesting,
65804 rdp->dynticks_fqs);
65805 diff --git a/kernel/resource.c b/kernel/resource.c
65806 index 7640b3a..5879283 100644
65807 --- a/kernel/resource.c
65808 +++ b/kernel/resource.c
65809 @@ -141,8 +141,18 @@ static const struct file_operations proc_iomem_operations = {
65810
65811 static int __init ioresources_init(void)
65812 {
65813 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
65814 +#ifdef CONFIG_GRKERNSEC_PROC_USER
65815 + proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
65816 + proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
65817 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65818 + proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
65819 + proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
65820 +#endif
65821 +#else
65822 proc_create("ioports", 0, NULL, &proc_ioports_operations);
65823 proc_create("iomem", 0, NULL, &proc_iomem_operations);
65824 +#endif
65825 return 0;
65826 }
65827 __initcall(ioresources_init);
65828 diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
65829 index 3d9f31c..7fefc9e 100644
65830 --- a/kernel/rtmutex-tester.c
65831 +++ b/kernel/rtmutex-tester.c
65832 @@ -20,7 +20,7 @@
65833 #define MAX_RT_TEST_MUTEXES 8
65834
65835 static spinlock_t rttest_lock;
65836 -static atomic_t rttest_event;
65837 +static atomic_unchecked_t rttest_event;
65838
65839 struct test_thread_data {
65840 int opcode;
65841 @@ -61,7 +61,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
65842
65843 case RTTEST_LOCKCONT:
65844 td->mutexes[td->opdata] = 1;
65845 - td->event = atomic_add_return(1, &rttest_event);
65846 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65847 return 0;
65848
65849 case RTTEST_RESET:
65850 @@ -74,7 +74,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
65851 return 0;
65852
65853 case RTTEST_RESETEVENT:
65854 - atomic_set(&rttest_event, 0);
65855 + atomic_set_unchecked(&rttest_event, 0);
65856 return 0;
65857
65858 default:
65859 @@ -91,9 +91,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
65860 return ret;
65861
65862 td->mutexes[id] = 1;
65863 - td->event = atomic_add_return(1, &rttest_event);
65864 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65865 rt_mutex_lock(&mutexes[id]);
65866 - td->event = atomic_add_return(1, &rttest_event);
65867 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65868 td->mutexes[id] = 4;
65869 return 0;
65870
65871 @@ -104,9 +104,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
65872 return ret;
65873
65874 td->mutexes[id] = 1;
65875 - td->event = atomic_add_return(1, &rttest_event);
65876 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65877 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
65878 - td->event = atomic_add_return(1, &rttest_event);
65879 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65880 td->mutexes[id] = ret ? 0 : 4;
65881 return ret ? -EINTR : 0;
65882
65883 @@ -115,9 +115,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
65884 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
65885 return ret;
65886
65887 - td->event = atomic_add_return(1, &rttest_event);
65888 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65889 rt_mutex_unlock(&mutexes[id]);
65890 - td->event = atomic_add_return(1, &rttest_event);
65891 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65892 td->mutexes[id] = 0;
65893 return 0;
65894
65895 @@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
65896 break;
65897
65898 td->mutexes[dat] = 2;
65899 - td->event = atomic_add_return(1, &rttest_event);
65900 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65901 break;
65902
65903 default:
65904 @@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
65905 return;
65906
65907 td->mutexes[dat] = 3;
65908 - td->event = atomic_add_return(1, &rttest_event);
65909 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65910 break;
65911
65912 case RTTEST_LOCKNOWAIT:
65913 @@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
65914 return;
65915
65916 td->mutexes[dat] = 1;
65917 - td->event = atomic_add_return(1, &rttest_event);
65918 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65919 return;
65920
65921 default:
65922 diff --git a/kernel/sched.c b/kernel/sched.c
65923 index d6b149c..896cbb8 100644
65924 --- a/kernel/sched.c
65925 +++ b/kernel/sched.c
65926 @@ -4389,6 +4389,19 @@ pick_next_task(struct rq *rq)
65927 BUG(); /* the idle class will always have a runnable task */
65928 }
65929
65930 +#ifdef CONFIG_GRKERNSEC_SETXID
65931 +extern void gr_delayed_cred_worker(void);
65932 +static inline void gr_cred_schedule(void)
65933 +{
65934 + if (unlikely(current->delayed_cred))
65935 + gr_delayed_cred_worker();
65936 +}
65937 +#else
65938 +static inline void gr_cred_schedule(void)
65939 +{
65940 +}
65941 +#endif
65942 +
65943 /*
65944 * __schedule() is the main scheduler function.
65945 */
65946 @@ -4408,6 +4421,8 @@ need_resched:
65947
65948 schedule_debug(prev);
65949
65950 + gr_cred_schedule();
65951 +
65952 if (sched_feat(HRTICK))
65953 hrtick_clear(rq);
65954
65955 @@ -5098,6 +5113,8 @@ int can_nice(const struct task_struct *p, const int nice)
65956 /* convert nice value [19,-20] to rlimit style value [1,40] */
65957 int nice_rlim = 20 - nice;
65958
65959 + gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
65960 +
65961 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
65962 capable(CAP_SYS_NICE));
65963 }
65964 @@ -5131,7 +5148,8 @@ SYSCALL_DEFINE1(nice, int, increment)
65965 if (nice > 19)
65966 nice = 19;
65967
65968 - if (increment < 0 && !can_nice(current, nice))
65969 + if (increment < 0 && (!can_nice(current, nice) ||
65970 + gr_handle_chroot_nice()))
65971 return -EPERM;
65972
65973 retval = security_task_setnice(current, nice);
65974 @@ -5288,6 +5306,7 @@ recheck:
65975 unsigned long rlim_rtprio =
65976 task_rlimit(p, RLIMIT_RTPRIO);
65977
65978 + gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
65979 /* can't set/change the rt policy */
65980 if (policy != p->policy && !rlim_rtprio)
65981 return -EPERM;
65982 diff --git a/kernel/sched_autogroup.c b/kernel/sched_autogroup.c
65983 index 429242f..d7cca82 100644
65984 --- a/kernel/sched_autogroup.c
65985 +++ b/kernel/sched_autogroup.c
65986 @@ -7,7 +7,7 @@
65987
65988 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
65989 static struct autogroup autogroup_default;
65990 -static atomic_t autogroup_seq_nr;
65991 +static atomic_unchecked_t autogroup_seq_nr;
65992
65993 static void __init autogroup_init(struct task_struct *init_task)
65994 {
65995 @@ -78,7 +78,7 @@ static inline struct autogroup *autogroup_create(void)
65996
65997 kref_init(&ag->kref);
65998 init_rwsem(&ag->lock);
65999 - ag->id = atomic_inc_return(&autogroup_seq_nr);
66000 + ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
66001 ag->tg = tg;
66002 #ifdef CONFIG_RT_GROUP_SCHED
66003 /*
66004 diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
66005 index 8a39fa3..34f3dbc 100644
66006 --- a/kernel/sched_fair.c
66007 +++ b/kernel/sched_fair.c
66008 @@ -4801,7 +4801,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
66009 * run_rebalance_domains is triggered when needed from the scheduler tick.
66010 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
66011 */
66012 -static void run_rebalance_domains(struct softirq_action *h)
66013 +static void run_rebalance_domains(void)
66014 {
66015 int this_cpu = smp_processor_id();
66016 struct rq *this_rq = cpu_rq(this_cpu);
66017 diff --git a/kernel/signal.c b/kernel/signal.c
66018 index 2065515..aed2987 100644
66019 --- a/kernel/signal.c
66020 +++ b/kernel/signal.c
66021 @@ -45,12 +45,12 @@ static struct kmem_cache *sigqueue_cachep;
66022
66023 int print_fatal_signals __read_mostly;
66024
66025 -static void __user *sig_handler(struct task_struct *t, int sig)
66026 +static __sighandler_t sig_handler(struct task_struct *t, int sig)
66027 {
66028 return t->sighand->action[sig - 1].sa.sa_handler;
66029 }
66030
66031 -static int sig_handler_ignored(void __user *handler, int sig)
66032 +static int sig_handler_ignored(__sighandler_t handler, int sig)
66033 {
66034 /* Is it explicitly or implicitly ignored? */
66035 return handler == SIG_IGN ||
66036 @@ -60,7 +60,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
66037 static int sig_task_ignored(struct task_struct *t, int sig,
66038 int from_ancestor_ns)
66039 {
66040 - void __user *handler;
66041 + __sighandler_t handler;
66042
66043 handler = sig_handler(t, sig);
66044
66045 @@ -364,6 +364,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
66046 atomic_inc(&user->sigpending);
66047 rcu_read_unlock();
66048
66049 + if (!override_rlimit)
66050 + gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
66051 +
66052 if (override_rlimit ||
66053 atomic_read(&user->sigpending) <=
66054 task_rlimit(t, RLIMIT_SIGPENDING)) {
66055 @@ -488,7 +491,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
66056
66057 int unhandled_signal(struct task_struct *tsk, int sig)
66058 {
66059 - void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
66060 + __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
66061 if (is_global_init(tsk))
66062 return 1;
66063 if (handler != SIG_IGN && handler != SIG_DFL)
66064 @@ -815,6 +818,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
66065 }
66066 }
66067
66068 + /* allow glibc communication via tgkill to other threads in our
66069 + thread group */
66070 + if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
66071 + sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
66072 + && gr_handle_signal(t, sig))
66073 + return -EPERM;
66074 +
66075 return security_task_kill(t, info, sig, 0);
66076 }
66077
66078 @@ -1165,7 +1175,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
66079 return send_signal(sig, info, p, 1);
66080 }
66081
66082 -static int
66083 +int
66084 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
66085 {
66086 return send_signal(sig, info, t, 0);
66087 @@ -1202,6 +1212,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
66088 unsigned long int flags;
66089 int ret, blocked, ignored;
66090 struct k_sigaction *action;
66091 + int is_unhandled = 0;
66092
66093 spin_lock_irqsave(&t->sighand->siglock, flags);
66094 action = &t->sighand->action[sig-1];
66095 @@ -1216,9 +1227,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
66096 }
66097 if (action->sa.sa_handler == SIG_DFL)
66098 t->signal->flags &= ~SIGNAL_UNKILLABLE;
66099 + if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
66100 + is_unhandled = 1;
66101 ret = specific_send_sig_info(sig, info, t);
66102 spin_unlock_irqrestore(&t->sighand->siglock, flags);
66103
66104 + /* only deal with unhandled signals, java etc trigger SIGSEGV during
66105 + normal operation */
66106 + if (is_unhandled) {
66107 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
66108 + gr_handle_crash(t, sig);
66109 + }
66110 +
66111 return ret;
66112 }
66113
66114 @@ -1285,8 +1305,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
66115 ret = check_kill_permission(sig, info, p);
66116 rcu_read_unlock();
66117
66118 - if (!ret && sig)
66119 + if (!ret && sig) {
66120 ret = do_send_sig_info(sig, info, p, true);
66121 + if (!ret)
66122 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
66123 + }
66124
66125 return ret;
66126 }
66127 @@ -2754,7 +2777,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
66128 int error = -ESRCH;
66129
66130 rcu_read_lock();
66131 - p = find_task_by_vpid(pid);
66132 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
66133 + /* allow glibc communication via tgkill to other threads in our
66134 + thread group */
66135 + if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
66136 + sig == (SIGRTMIN+1) && tgid == info->si_pid)
66137 + p = find_task_by_vpid_unrestricted(pid);
66138 + else
66139 +#endif
66140 + p = find_task_by_vpid(pid);
66141 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
66142 error = check_kill_permission(sig, info, p);
66143 /*
66144 diff --git a/kernel/smp.c b/kernel/smp.c
66145 index db197d6..17aef0b 100644
66146 --- a/kernel/smp.c
66147 +++ b/kernel/smp.c
66148 @@ -580,22 +580,22 @@ int smp_call_function(smp_call_func_t func, void *info, int wait)
66149 }
66150 EXPORT_SYMBOL(smp_call_function);
66151
66152 -void ipi_call_lock(void)
66153 +void ipi_call_lock(void) __acquires(call_function.lock)
66154 {
66155 raw_spin_lock(&call_function.lock);
66156 }
66157
66158 -void ipi_call_unlock(void)
66159 +void ipi_call_unlock(void) __releases(call_function.lock)
66160 {
66161 raw_spin_unlock(&call_function.lock);
66162 }
66163
66164 -void ipi_call_lock_irq(void)
66165 +void ipi_call_lock_irq(void) __acquires(call_function.lock)
66166 {
66167 raw_spin_lock_irq(&call_function.lock);
66168 }
66169
66170 -void ipi_call_unlock_irq(void)
66171 +void ipi_call_unlock_irq(void) __releases(call_function.lock)
66172 {
66173 raw_spin_unlock_irq(&call_function.lock);
66174 }
66175 diff --git a/kernel/softirq.c b/kernel/softirq.c
66176 index 2c71d91..1021f81 100644
66177 --- a/kernel/softirq.c
66178 +++ b/kernel/softirq.c
66179 @@ -56,7 +56,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
66180
66181 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
66182
66183 -char *softirq_to_name[NR_SOFTIRQS] = {
66184 +const char * const softirq_to_name[NR_SOFTIRQS] = {
66185 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
66186 "TASKLET", "SCHED", "HRTIMER", "RCU"
66187 };
66188 @@ -235,7 +235,7 @@ restart:
66189 kstat_incr_softirqs_this_cpu(vec_nr);
66190
66191 trace_softirq_entry(vec_nr);
66192 - h->action(h);
66193 + h->action();
66194 trace_softirq_exit(vec_nr);
66195 if (unlikely(prev_count != preempt_count())) {
66196 printk(KERN_ERR "huh, entered softirq %u %s %p"
66197 @@ -385,9 +385,11 @@ void raise_softirq(unsigned int nr)
66198 local_irq_restore(flags);
66199 }
66200
66201 -void open_softirq(int nr, void (*action)(struct softirq_action *))
66202 +void open_softirq(int nr, void (*action)(void))
66203 {
66204 - softirq_vec[nr].action = action;
66205 + pax_open_kernel();
66206 + *(void **)&softirq_vec[nr].action = action;
66207 + pax_close_kernel();
66208 }
66209
66210 /*
66211 @@ -441,7 +443,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
66212
66213 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
66214
66215 -static void tasklet_action(struct softirq_action *a)
66216 +static void tasklet_action(void)
66217 {
66218 struct tasklet_struct *list;
66219
66220 @@ -476,7 +478,7 @@ static void tasklet_action(struct softirq_action *a)
66221 }
66222 }
66223
66224 -static void tasklet_hi_action(struct softirq_action *a)
66225 +static void tasklet_hi_action(void)
66226 {
66227 struct tasklet_struct *list;
66228
66229 diff --git a/kernel/sys.c b/kernel/sys.c
66230 index 481611f..0754d86 100644
66231 --- a/kernel/sys.c
66232 +++ b/kernel/sys.c
66233 @@ -158,6 +158,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
66234 error = -EACCES;
66235 goto out;
66236 }
66237 +
66238 + if (gr_handle_chroot_setpriority(p, niceval)) {
66239 + error = -EACCES;
66240 + goto out;
66241 + }
66242 +
66243 no_nice = security_task_setnice(p, niceval);
66244 if (no_nice) {
66245 error = no_nice;
66246 @@ -572,6 +578,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
66247 goto error;
66248 }
66249
66250 + if (gr_check_group_change(new->gid, new->egid, -1))
66251 + goto error;
66252 +
66253 if (rgid != (gid_t) -1 ||
66254 (egid != (gid_t) -1 && egid != old->gid))
66255 new->sgid = new->egid;
66256 @@ -601,6 +610,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
66257 old = current_cred();
66258
66259 retval = -EPERM;
66260 +
66261 + if (gr_check_group_change(gid, gid, gid))
66262 + goto error;
66263 +
66264 if (nsown_capable(CAP_SETGID))
66265 new->gid = new->egid = new->sgid = new->fsgid = gid;
66266 else if (gid == old->gid || gid == old->sgid)
66267 @@ -618,7 +631,7 @@ error:
66268 /*
66269 * change the user struct in a credentials set to match the new UID
66270 */
66271 -static int set_user(struct cred *new)
66272 +int set_user(struct cred *new)
66273 {
66274 struct user_struct *new_user;
66275
66276 @@ -688,6 +701,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
66277 goto error;
66278 }
66279
66280 + if (gr_check_user_change(new->uid, new->euid, -1))
66281 + goto error;
66282 +
66283 if (new->uid != old->uid) {
66284 retval = set_user(new);
66285 if (retval < 0)
66286 @@ -732,6 +748,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
66287 old = current_cred();
66288
66289 retval = -EPERM;
66290 +
66291 + if (gr_check_crash_uid(uid))
66292 + goto error;
66293 + if (gr_check_user_change(uid, uid, uid))
66294 + goto error;
66295 +
66296 if (nsown_capable(CAP_SETUID)) {
66297 new->suid = new->uid = uid;
66298 if (uid != old->uid) {
66299 @@ -786,6 +808,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
66300 goto error;
66301 }
66302
66303 + if (gr_check_user_change(ruid, euid, -1))
66304 + goto error;
66305 +
66306 if (ruid != (uid_t) -1) {
66307 new->uid = ruid;
66308 if (ruid != old->uid) {
66309 @@ -850,6 +875,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
66310 goto error;
66311 }
66312
66313 + if (gr_check_group_change(rgid, egid, -1))
66314 + goto error;
66315 +
66316 if (rgid != (gid_t) -1)
66317 new->gid = rgid;
66318 if (egid != (gid_t) -1)
66319 @@ -896,6 +924,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
66320 old = current_cred();
66321 old_fsuid = old->fsuid;
66322
66323 + if (gr_check_user_change(-1, -1, uid))
66324 + goto error;
66325 +
66326 if (uid == old->uid || uid == old->euid ||
66327 uid == old->suid || uid == old->fsuid ||
66328 nsown_capable(CAP_SETUID)) {
66329 @@ -906,6 +937,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
66330 }
66331 }
66332
66333 +error:
66334 abort_creds(new);
66335 return old_fsuid;
66336
66337 @@ -932,12 +964,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
66338 if (gid == old->gid || gid == old->egid ||
66339 gid == old->sgid || gid == old->fsgid ||
66340 nsown_capable(CAP_SETGID)) {
66341 + if (gr_check_group_change(-1, -1, gid))
66342 + goto error;
66343 +
66344 if (gid != old_fsgid) {
66345 new->fsgid = gid;
66346 goto change_okay;
66347 }
66348 }
66349
66350 +error:
66351 abort_creds(new);
66352 return old_fsgid;
66353
66354 @@ -1189,7 +1225,10 @@ static int override_release(char __user *release, int len)
66355 }
66356 v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
66357 snprintf(buf, len, "2.6.%u%s", v, rest);
66358 - ret = copy_to_user(release, buf, len);
66359 + if (len > sizeof(buf))
66360 + ret = -EFAULT;
66361 + else
66362 + ret = copy_to_user(release, buf, len);
66363 }
66364 return ret;
66365 }
66366 @@ -1243,19 +1282,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
66367 return -EFAULT;
66368
66369 down_read(&uts_sem);
66370 - error = __copy_to_user(&name->sysname, &utsname()->sysname,
66371 + error = __copy_to_user(name->sysname, &utsname()->sysname,
66372 __OLD_UTS_LEN);
66373 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
66374 - error |= __copy_to_user(&name->nodename, &utsname()->nodename,
66375 + error |= __copy_to_user(name->nodename, &utsname()->nodename,
66376 __OLD_UTS_LEN);
66377 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
66378 - error |= __copy_to_user(&name->release, &utsname()->release,
66379 + error |= __copy_to_user(name->release, &utsname()->release,
66380 __OLD_UTS_LEN);
66381 error |= __put_user(0, name->release + __OLD_UTS_LEN);
66382 - error |= __copy_to_user(&name->version, &utsname()->version,
66383 + error |= __copy_to_user(name->version, &utsname()->version,
66384 __OLD_UTS_LEN);
66385 error |= __put_user(0, name->version + __OLD_UTS_LEN);
66386 - error |= __copy_to_user(&name->machine, &utsname()->machine,
66387 + error |= __copy_to_user(name->machine, &utsname()->machine,
66388 __OLD_UTS_LEN);
66389 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
66390 up_read(&uts_sem);
66391 @@ -1720,7 +1759,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
66392 error = get_dumpable(me->mm);
66393 break;
66394 case PR_SET_DUMPABLE:
66395 - if (arg2 < 0 || arg2 > 1) {
66396 + if (arg2 > 1) {
66397 error = -EINVAL;
66398 break;
66399 }
66400 diff --git a/kernel/sysctl.c b/kernel/sysctl.c
66401 index ae27196..7506d69 100644
66402 --- a/kernel/sysctl.c
66403 +++ b/kernel/sysctl.c
66404 @@ -86,6 +86,13 @@
66405
66406
66407 #if defined(CONFIG_SYSCTL)
66408 +#include <linux/grsecurity.h>
66409 +#include <linux/grinternal.h>
66410 +
66411 +extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
66412 +extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
66413 + const int op);
66414 +extern int gr_handle_chroot_sysctl(const int op);
66415
66416 /* External variables not in a header file. */
66417 extern int sysctl_overcommit_memory;
66418 @@ -191,6 +198,7 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
66419 }
66420
66421 #endif
66422 +extern struct ctl_table grsecurity_table[];
66423
66424 static struct ctl_table root_table[];
66425 static struct ctl_table_root sysctl_table_root;
66426 @@ -220,6 +228,20 @@ extern struct ctl_table epoll_table[];
66427 int sysctl_legacy_va_layout;
66428 #endif
66429
66430 +#ifdef CONFIG_PAX_SOFTMODE
66431 +static ctl_table pax_table[] = {
66432 + {
66433 + .procname = "softmode",
66434 + .data = &pax_softmode,
66435 + .maxlen = sizeof(unsigned int),
66436 + .mode = 0600,
66437 + .proc_handler = &proc_dointvec,
66438 + },
66439 +
66440 + { }
66441 +};
66442 +#endif
66443 +
66444 /* The default sysctl tables: */
66445
66446 static struct ctl_table root_table[] = {
66447 @@ -266,6 +288,22 @@ static int max_extfrag_threshold = 1000;
66448 #endif
66449
66450 static struct ctl_table kern_table[] = {
66451 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
66452 + {
66453 + .procname = "grsecurity",
66454 + .mode = 0500,
66455 + .child = grsecurity_table,
66456 + },
66457 +#endif
66458 +
66459 +#ifdef CONFIG_PAX_SOFTMODE
66460 + {
66461 + .procname = "pax",
66462 + .mode = 0500,
66463 + .child = pax_table,
66464 + },
66465 +#endif
66466 +
66467 {
66468 .procname = "sched_child_runs_first",
66469 .data = &sysctl_sched_child_runs_first,
66470 @@ -550,7 +588,7 @@ static struct ctl_table kern_table[] = {
66471 .data = &modprobe_path,
66472 .maxlen = KMOD_PATH_LEN,
66473 .mode = 0644,
66474 - .proc_handler = proc_dostring,
66475 + .proc_handler = proc_dostring_modpriv,
66476 },
66477 {
66478 .procname = "modules_disabled",
66479 @@ -717,16 +755,20 @@ static struct ctl_table kern_table[] = {
66480 .extra1 = &zero,
66481 .extra2 = &one,
66482 },
66483 +#endif
66484 {
66485 .procname = "kptr_restrict",
66486 .data = &kptr_restrict,
66487 .maxlen = sizeof(int),
66488 .mode = 0644,
66489 .proc_handler = proc_dmesg_restrict,
66490 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66491 + .extra1 = &two,
66492 +#else
66493 .extra1 = &zero,
66494 +#endif
66495 .extra2 = &two,
66496 },
66497 -#endif
66498 {
66499 .procname = "ngroups_max",
66500 .data = &ngroups_max,
66501 @@ -1216,6 +1258,13 @@ static struct ctl_table vm_table[] = {
66502 .proc_handler = proc_dointvec_minmax,
66503 .extra1 = &zero,
66504 },
66505 + {
66506 + .procname = "heap_stack_gap",
66507 + .data = &sysctl_heap_stack_gap,
66508 + .maxlen = sizeof(sysctl_heap_stack_gap),
66509 + .mode = 0644,
66510 + .proc_handler = proc_doulongvec_minmax,
66511 + },
66512 #else
66513 {
66514 .procname = "nr_trim_pages",
66515 @@ -1720,6 +1769,17 @@ static int test_perm(int mode, int op)
66516 int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
66517 {
66518 int mode;
66519 + int error;
66520 +
66521 + if (table->parent != NULL && table->parent->procname != NULL &&
66522 + table->procname != NULL &&
66523 + gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
66524 + return -EACCES;
66525 + if (gr_handle_chroot_sysctl(op))
66526 + return -EACCES;
66527 + error = gr_handle_sysctl(table, op);
66528 + if (error)
66529 + return error;
66530
66531 if (root->permissions)
66532 mode = root->permissions(root, current->nsproxy, table);
66533 @@ -2124,6 +2184,16 @@ int proc_dostring(struct ctl_table *table, int write,
66534 buffer, lenp, ppos);
66535 }
66536
66537 +int proc_dostring_modpriv(struct ctl_table *table, int write,
66538 + void __user *buffer, size_t *lenp, loff_t *ppos)
66539 +{
66540 + if (write && !capable(CAP_SYS_MODULE))
66541 + return -EPERM;
66542 +
66543 + return _proc_do_string(table->data, table->maxlen, write,
66544 + buffer, lenp, ppos);
66545 +}
66546 +
66547 static size_t proc_skip_spaces(char **buf)
66548 {
66549 size_t ret;
66550 @@ -2229,6 +2299,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
66551 len = strlen(tmp);
66552 if (len > *size)
66553 len = *size;
66554 + if (len > sizeof(tmp))
66555 + len = sizeof(tmp);
66556 if (copy_to_user(*buf, tmp, len))
66557 return -EFAULT;
66558 *size -= len;
66559 @@ -2545,8 +2617,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
66560 *i = val;
66561 } else {
66562 val = convdiv * (*i) / convmul;
66563 - if (!first)
66564 + if (!first) {
66565 err = proc_put_char(&buffer, &left, '\t');
66566 + if (err)
66567 + break;
66568 + }
66569 err = proc_put_long(&buffer, &left, val, false);
66570 if (err)
66571 break;
66572 @@ -2941,6 +3016,12 @@ int proc_dostring(struct ctl_table *table, int write,
66573 return -ENOSYS;
66574 }
66575
66576 +int proc_dostring_modpriv(struct ctl_table *table, int write,
66577 + void __user *buffer, size_t *lenp, loff_t *ppos)
66578 +{
66579 + return -ENOSYS;
66580 +}
66581 +
66582 int proc_dointvec(struct ctl_table *table, int write,
66583 void __user *buffer, size_t *lenp, loff_t *ppos)
66584 {
66585 @@ -2997,6 +3078,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
66586 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
66587 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
66588 EXPORT_SYMBOL(proc_dostring);
66589 +EXPORT_SYMBOL(proc_dostring_modpriv);
66590 EXPORT_SYMBOL(proc_doulongvec_minmax);
66591 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
66592 EXPORT_SYMBOL(register_sysctl_table);
66593 diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
66594 index a650694..aaeeb20 100644
66595 --- a/kernel/sysctl_binary.c
66596 +++ b/kernel/sysctl_binary.c
66597 @@ -989,7 +989,7 @@ static ssize_t bin_intvec(struct file *file,
66598 int i;
66599
66600 set_fs(KERNEL_DS);
66601 - result = vfs_read(file, buffer, BUFSZ - 1, &pos);
66602 + result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
66603 set_fs(old_fs);
66604 if (result < 0)
66605 goto out_kfree;
66606 @@ -1034,7 +1034,7 @@ static ssize_t bin_intvec(struct file *file,
66607 }
66608
66609 set_fs(KERNEL_DS);
66610 - result = vfs_write(file, buffer, str - buffer, &pos);
66611 + result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
66612 set_fs(old_fs);
66613 if (result < 0)
66614 goto out_kfree;
66615 @@ -1067,7 +1067,7 @@ static ssize_t bin_ulongvec(struct file *file,
66616 int i;
66617
66618 set_fs(KERNEL_DS);
66619 - result = vfs_read(file, buffer, BUFSZ - 1, &pos);
66620 + result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
66621 set_fs(old_fs);
66622 if (result < 0)
66623 goto out_kfree;
66624 @@ -1112,7 +1112,7 @@ static ssize_t bin_ulongvec(struct file *file,
66625 }
66626
66627 set_fs(KERNEL_DS);
66628 - result = vfs_write(file, buffer, str - buffer, &pos);
66629 + result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
66630 set_fs(old_fs);
66631 if (result < 0)
66632 goto out_kfree;
66633 @@ -1138,7 +1138,7 @@ static ssize_t bin_uuid(struct file *file,
66634 int i;
66635
66636 set_fs(KERNEL_DS);
66637 - result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
66638 + result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
66639 set_fs(old_fs);
66640 if (result < 0)
66641 goto out;
66642 @@ -1185,7 +1185,7 @@ static ssize_t bin_dn_node_address(struct file *file,
66643 __le16 dnaddr;
66644
66645 set_fs(KERNEL_DS);
66646 - result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
66647 + result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
66648 set_fs(old_fs);
66649 if (result < 0)
66650 goto out;
66651 @@ -1233,7 +1233,7 @@ static ssize_t bin_dn_node_address(struct file *file,
66652 le16_to_cpu(dnaddr) & 0x3ff);
66653
66654 set_fs(KERNEL_DS);
66655 - result = vfs_write(file, buf, len, &pos);
66656 + result = vfs_write(file, (const char __force_user *)buf, len, &pos);
66657 set_fs(old_fs);
66658 if (result < 0)
66659 goto out;
66660 diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
66661 index 362da65..ab8ef8c 100644
66662 --- a/kernel/sysctl_check.c
66663 +++ b/kernel/sysctl_check.c
66664 @@ -129,6 +129,7 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
66665 set_fail(&fail, table, "Directory with extra2");
66666 } else {
66667 if ((table->proc_handler == proc_dostring) ||
66668 + (table->proc_handler == proc_dostring_modpriv) ||
66669 (table->proc_handler == proc_dointvec) ||
66670 (table->proc_handler == proc_dointvec_minmax) ||
66671 (table->proc_handler == proc_dointvec_jiffies) ||
66672 diff --git a/kernel/taskstats.c b/kernel/taskstats.c
66673 index e660464..c8b9e67 100644
66674 --- a/kernel/taskstats.c
66675 +++ b/kernel/taskstats.c
66676 @@ -27,9 +27,12 @@
66677 #include <linux/cgroup.h>
66678 #include <linux/fs.h>
66679 #include <linux/file.h>
66680 +#include <linux/grsecurity.h>
66681 #include <net/genetlink.h>
66682 #include <linux/atomic.h>
66683
66684 +extern int gr_is_taskstats_denied(int pid);
66685 +
66686 /*
66687 * Maximum length of a cpumask that can be specified in
66688 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
66689 @@ -556,6 +559,9 @@ err:
66690
66691 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
66692 {
66693 + if (gr_is_taskstats_denied(current->pid))
66694 + return -EACCES;
66695 +
66696 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
66697 return cmd_attr_register_cpumask(info);
66698 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
66699 diff --git a/kernel/time.c b/kernel/time.c
66700 index 73e416d..cfc6f69 100644
66701 --- a/kernel/time.c
66702 +++ b/kernel/time.c
66703 @@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
66704 return error;
66705
66706 if (tz) {
66707 + /* we log in do_settimeofday called below, so don't log twice
66708 + */
66709 + if (!tv)
66710 + gr_log_timechange();
66711 +
66712 /* SMP safe, global irq locking makes it work. */
66713 sys_tz = *tz;
66714 update_vsyscall_tz();
66715 diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
66716 index 8a46f5d..bbe6f9c 100644
66717 --- a/kernel/time/alarmtimer.c
66718 +++ b/kernel/time/alarmtimer.c
66719 @@ -773,7 +773,7 @@ static int __init alarmtimer_init(void)
66720 struct platform_device *pdev;
66721 int error = 0;
66722 int i;
66723 - struct k_clock alarm_clock = {
66724 + static struct k_clock alarm_clock = {
66725 .clock_getres = alarm_clock_getres,
66726 .clock_get = alarm_clock_get,
66727 .timer_create = alarm_timer_create,
66728 diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
66729 index fd4a7b1..fae5c2a 100644
66730 --- a/kernel/time/tick-broadcast.c
66731 +++ b/kernel/time/tick-broadcast.c
66732 @@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
66733 * then clear the broadcast bit.
66734 */
66735 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
66736 - int cpu = smp_processor_id();
66737 + cpu = smp_processor_id();
66738
66739 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
66740 tick_broadcast_clear_oneshot(cpu);
66741 diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
66742 index 2378413..be455fd 100644
66743 --- a/kernel/time/timekeeping.c
66744 +++ b/kernel/time/timekeeping.c
66745 @@ -14,6 +14,7 @@
66746 #include <linux/init.h>
66747 #include <linux/mm.h>
66748 #include <linux/sched.h>
66749 +#include <linux/grsecurity.h>
66750 #include <linux/syscore_ops.h>
66751 #include <linux/clocksource.h>
66752 #include <linux/jiffies.h>
66753 @@ -365,6 +366,8 @@ int do_settimeofday(const struct timespec *tv)
66754 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
66755 return -EINVAL;
66756
66757 + gr_log_timechange();
66758 +
66759 write_seqlock_irqsave(&xtime_lock, flags);
66760
66761 timekeeping_forward_now();
66762 diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
66763 index 3258455..f35227d 100644
66764 --- a/kernel/time/timer_list.c
66765 +++ b/kernel/time/timer_list.c
66766 @@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
66767
66768 static void print_name_offset(struct seq_file *m, void *sym)
66769 {
66770 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66771 + SEQ_printf(m, "<%p>", NULL);
66772 +#else
66773 char symname[KSYM_NAME_LEN];
66774
66775 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
66776 SEQ_printf(m, "<%pK>", sym);
66777 else
66778 SEQ_printf(m, "%s", symname);
66779 +#endif
66780 }
66781
66782 static void
66783 @@ -112,7 +116,11 @@ next_one:
66784 static void
66785 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
66786 {
66787 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66788 + SEQ_printf(m, " .base: %p\n", NULL);
66789 +#else
66790 SEQ_printf(m, " .base: %pK\n", base);
66791 +#endif
66792 SEQ_printf(m, " .index: %d\n",
66793 base->index);
66794 SEQ_printf(m, " .resolution: %Lu nsecs\n",
66795 @@ -293,7 +301,11 @@ static int __init init_timer_list_procfs(void)
66796 {
66797 struct proc_dir_entry *pe;
66798
66799 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
66800 + pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
66801 +#else
66802 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
66803 +#endif
66804 if (!pe)
66805 return -ENOMEM;
66806 return 0;
66807 diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
66808 index 0b537f2..9e71eca 100644
66809 --- a/kernel/time/timer_stats.c
66810 +++ b/kernel/time/timer_stats.c
66811 @@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
66812 static unsigned long nr_entries;
66813 static struct entry entries[MAX_ENTRIES];
66814
66815 -static atomic_t overflow_count;
66816 +static atomic_unchecked_t overflow_count;
66817
66818 /*
66819 * The entries are in a hash-table, for fast lookup:
66820 @@ -140,7 +140,7 @@ static void reset_entries(void)
66821 nr_entries = 0;
66822 memset(entries, 0, sizeof(entries));
66823 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
66824 - atomic_set(&overflow_count, 0);
66825 + atomic_set_unchecked(&overflow_count, 0);
66826 }
66827
66828 static struct entry *alloc_entry(void)
66829 @@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
66830 if (likely(entry))
66831 entry->count++;
66832 else
66833 - atomic_inc(&overflow_count);
66834 + atomic_inc_unchecked(&overflow_count);
66835
66836 out_unlock:
66837 raw_spin_unlock_irqrestore(lock, flags);
66838 @@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
66839
66840 static void print_name_offset(struct seq_file *m, unsigned long addr)
66841 {
66842 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66843 + seq_printf(m, "<%p>", NULL);
66844 +#else
66845 char symname[KSYM_NAME_LEN];
66846
66847 if (lookup_symbol_name(addr, symname) < 0)
66848 seq_printf(m, "<%p>", (void *)addr);
66849 else
66850 seq_printf(m, "%s", symname);
66851 +#endif
66852 }
66853
66854 static int tstats_show(struct seq_file *m, void *v)
66855 @@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
66856
66857 seq_puts(m, "Timer Stats Version: v0.2\n");
66858 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
66859 - if (atomic_read(&overflow_count))
66860 + if (atomic_read_unchecked(&overflow_count))
66861 seq_printf(m, "Overflow: %d entries\n",
66862 - atomic_read(&overflow_count));
66863 + atomic_read_unchecked(&overflow_count));
66864
66865 for (i = 0; i < nr_entries; i++) {
66866 entry = entries + i;
66867 @@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
66868 {
66869 struct proc_dir_entry *pe;
66870
66871 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
66872 + pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
66873 +#else
66874 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
66875 +#endif
66876 if (!pe)
66877 return -ENOMEM;
66878 return 0;
66879 diff --git a/kernel/timer.c b/kernel/timer.c
66880 index 9c3c62b..441690e 100644
66881 --- a/kernel/timer.c
66882 +++ b/kernel/timer.c
66883 @@ -1304,7 +1304,7 @@ void update_process_times(int user_tick)
66884 /*
66885 * This function runs timers and the timer-tq in bottom half context.
66886 */
66887 -static void run_timer_softirq(struct softirq_action *h)
66888 +static void run_timer_softirq(void)
66889 {
66890 struct tvec_base *base = __this_cpu_read(tvec_bases);
66891
66892 diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
66893 index 16fc34a..efd8bb8 100644
66894 --- a/kernel/trace/blktrace.c
66895 +++ b/kernel/trace/blktrace.c
66896 @@ -324,7 +324,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
66897 struct blk_trace *bt = filp->private_data;
66898 char buf[16];
66899
66900 - snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
66901 + snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
66902
66903 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
66904 }
66905 @@ -389,7 +389,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
66906 return 1;
66907
66908 bt = buf->chan->private_data;
66909 - atomic_inc(&bt->dropped);
66910 + atomic_inc_unchecked(&bt->dropped);
66911 return 0;
66912 }
66913
66914 @@ -490,7 +490,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
66915
66916 bt->dir = dir;
66917 bt->dev = dev;
66918 - atomic_set(&bt->dropped, 0);
66919 + atomic_set_unchecked(&bt->dropped, 0);
66920
66921 ret = -EIO;
66922 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
66923 diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
66924 index 25b4f4d..6f4772d 100644
66925 --- a/kernel/trace/ftrace.c
66926 +++ b/kernel/trace/ftrace.c
66927 @@ -1587,12 +1587,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
66928 if (unlikely(ftrace_disabled))
66929 return 0;
66930
66931 + ret = ftrace_arch_code_modify_prepare();
66932 + FTRACE_WARN_ON(ret);
66933 + if (ret)
66934 + return 0;
66935 +
66936 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
66937 + FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
66938 if (ret) {
66939 ftrace_bug(ret, ip);
66940 - return 0;
66941 }
66942 - return 1;
66943 + return ret ? 0 : 1;
66944 }
66945
66946 /*
66947 @@ -2608,7 +2613,7 @@ static void ftrace_free_entry_rcu(struct rcu_head *rhp)
66948
66949 int
66950 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
66951 - void *data)
66952 + void *data)
66953 {
66954 struct ftrace_func_probe *entry;
66955 struct ftrace_page *pg;
66956 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
66957 index f2bd275..adaf3a2 100644
66958 --- a/kernel/trace/trace.c
66959 +++ b/kernel/trace/trace.c
66960 @@ -4201,10 +4201,9 @@ static const struct file_operations tracing_dyn_info_fops = {
66961 };
66962 #endif
66963
66964 -static struct dentry *d_tracer;
66965 -
66966 struct dentry *tracing_init_dentry(void)
66967 {
66968 + static struct dentry *d_tracer;
66969 static int once;
66970
66971 if (d_tracer)
66972 @@ -4224,10 +4223,9 @@ struct dentry *tracing_init_dentry(void)
66973 return d_tracer;
66974 }
66975
66976 -static struct dentry *d_percpu;
66977 -
66978 struct dentry *tracing_dentry_percpu(void)
66979 {
66980 + static struct dentry *d_percpu;
66981 static int once;
66982 struct dentry *d_tracer;
66983
66984 diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
66985 index c212a7f..7b02394 100644
66986 --- a/kernel/trace/trace_events.c
66987 +++ b/kernel/trace/trace_events.c
66988 @@ -1299,10 +1299,6 @@ static LIST_HEAD(ftrace_module_file_list);
66989 struct ftrace_module_file_ops {
66990 struct list_head list;
66991 struct module *mod;
66992 - struct file_operations id;
66993 - struct file_operations enable;
66994 - struct file_operations format;
66995 - struct file_operations filter;
66996 };
66997
66998 static struct ftrace_module_file_ops *
66999 @@ -1323,17 +1319,12 @@ trace_create_file_ops(struct module *mod)
67000
67001 file_ops->mod = mod;
67002
67003 - file_ops->id = ftrace_event_id_fops;
67004 - file_ops->id.owner = mod;
67005 -
67006 - file_ops->enable = ftrace_enable_fops;
67007 - file_ops->enable.owner = mod;
67008 -
67009 - file_ops->filter = ftrace_event_filter_fops;
67010 - file_ops->filter.owner = mod;
67011 -
67012 - file_ops->format = ftrace_event_format_fops;
67013 - file_ops->format.owner = mod;
67014 + pax_open_kernel();
67015 + *(void **)&mod->trace_id.owner = mod;
67016 + *(void **)&mod->trace_enable.owner = mod;
67017 + *(void **)&mod->trace_filter.owner = mod;
67018 + *(void **)&mod->trace_format.owner = mod;
67019 + pax_close_kernel();
67020
67021 list_add(&file_ops->list, &ftrace_module_file_list);
67022
67023 @@ -1357,8 +1348,8 @@ static void trace_module_add_events(struct module *mod)
67024
67025 for_each_event(call, start, end) {
67026 __trace_add_event_call(*call, mod,
67027 - &file_ops->id, &file_ops->enable,
67028 - &file_ops->filter, &file_ops->format);
67029 + &mod->trace_id, &mod->trace_enable,
67030 + &mod->trace_filter, &mod->trace_format);
67031 }
67032 }
67033
67034 diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
67035 index 00d527c..7c5b1a3 100644
67036 --- a/kernel/trace/trace_kprobe.c
67037 +++ b/kernel/trace/trace_kprobe.c
67038 @@ -217,7 +217,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
67039 long ret;
67040 int maxlen = get_rloc_len(*(u32 *)dest);
67041 u8 *dst = get_rloc_data(dest);
67042 - u8 *src = addr;
67043 + const u8 __user *src = (const u8 __force_user *)addr;
67044 mm_segment_t old_fs = get_fs();
67045 if (!maxlen)
67046 return;
67047 @@ -229,7 +229,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
67048 pagefault_disable();
67049 do
67050 ret = __copy_from_user_inatomic(dst++, src++, 1);
67051 - while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen);
67052 + while (dst[-1] && ret == 0 && src - (const u8 __force_user *)addr < maxlen);
67053 dst[-1] = '\0';
67054 pagefault_enable();
67055 set_fs(old_fs);
67056 @@ -238,7 +238,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
67057 ((u8 *)get_rloc_data(dest))[0] = '\0';
67058 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
67059 } else
67060 - *(u32 *)dest = make_data_rloc(src - (u8 *)addr,
67061 + *(u32 *)dest = make_data_rloc(src - (const u8 __force_user *)addr,
67062 get_rloc_offs(*(u32 *)dest));
67063 }
67064 /* Return the length of string -- including null terminal byte */
67065 @@ -252,7 +252,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
67066 set_fs(KERNEL_DS);
67067 pagefault_disable();
67068 do {
67069 - ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
67070 + ret = __copy_from_user_inatomic(&c, (const u8 __force_user *)addr + len, 1);
67071 len++;
67072 } while (c && ret == 0 && len < MAX_STRING_SIZE);
67073 pagefault_enable();
67074 diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
67075 index fd3c8aa..5f324a6 100644
67076 --- a/kernel/trace/trace_mmiotrace.c
67077 +++ b/kernel/trace/trace_mmiotrace.c
67078 @@ -24,7 +24,7 @@ struct header_iter {
67079 static struct trace_array *mmio_trace_array;
67080 static bool overrun_detected;
67081 static unsigned long prev_overruns;
67082 -static atomic_t dropped_count;
67083 +static atomic_unchecked_t dropped_count;
67084
67085 static void mmio_reset_data(struct trace_array *tr)
67086 {
67087 @@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
67088
67089 static unsigned long count_overruns(struct trace_iterator *iter)
67090 {
67091 - unsigned long cnt = atomic_xchg(&dropped_count, 0);
67092 + unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
67093 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
67094
67095 if (over > prev_overruns)
67096 @@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
67097 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
67098 sizeof(*entry), 0, pc);
67099 if (!event) {
67100 - atomic_inc(&dropped_count);
67101 + atomic_inc_unchecked(&dropped_count);
67102 return;
67103 }
67104 entry = ring_buffer_event_data(event);
67105 @@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
67106 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
67107 sizeof(*entry), 0, pc);
67108 if (!event) {
67109 - atomic_inc(&dropped_count);
67110 + atomic_inc_unchecked(&dropped_count);
67111 return;
67112 }
67113 entry = ring_buffer_event_data(event);
67114 diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
67115 index 5199930..26c73a0 100644
67116 --- a/kernel/trace/trace_output.c
67117 +++ b/kernel/trace/trace_output.c
67118 @@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s, struct path *path)
67119
67120 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
67121 if (!IS_ERR(p)) {
67122 - p = mangle_path(s->buffer + s->len, p, "\n");
67123 + p = mangle_path(s->buffer + s->len, p, "\n\\");
67124 if (p) {
67125 s->len = p - s->buffer;
67126 return 1;
67127 diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
67128 index 77575b3..6e623d1 100644
67129 --- a/kernel/trace/trace_stack.c
67130 +++ b/kernel/trace/trace_stack.c
67131 @@ -50,7 +50,7 @@ static inline void check_stack(void)
67132 return;
67133
67134 /* we do not handle interrupt stacks yet */
67135 - if (!object_is_on_stack(&this_size))
67136 + if (!object_starts_on_stack(&this_size))
67137 return;
67138
67139 local_irq_save(flags);
67140 diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
67141 index 209b379..7f76423 100644
67142 --- a/kernel/trace/trace_workqueue.c
67143 +++ b/kernel/trace/trace_workqueue.c
67144 @@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
67145 int cpu;
67146 pid_t pid;
67147 /* Can be inserted from interrupt or user context, need to be atomic */
67148 - atomic_t inserted;
67149 + atomic_unchecked_t inserted;
67150 /*
67151 * Don't need to be atomic, works are serialized in a single workqueue thread
67152 * on a single CPU.
67153 @@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
67154 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
67155 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
67156 if (node->pid == wq_thread->pid) {
67157 - atomic_inc(&node->inserted);
67158 + atomic_inc_unchecked(&node->inserted);
67159 goto found;
67160 }
67161 }
67162 @@ -210,7 +210,7 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
67163 tsk = get_pid_task(pid, PIDTYPE_PID);
67164 if (tsk) {
67165 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
67166 - atomic_read(&cws->inserted), cws->executed,
67167 + atomic_read_unchecked(&cws->inserted), cws->executed,
67168 tsk->comm);
67169 put_task_struct(tsk);
67170 }
67171 diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
67172 index 82928f5..92da771 100644
67173 --- a/lib/Kconfig.debug
67174 +++ b/lib/Kconfig.debug
67175 @@ -1103,6 +1103,7 @@ config LATENCYTOP
67176 depends on DEBUG_KERNEL
67177 depends on STACKTRACE_SUPPORT
67178 depends on PROC_FS
67179 + depends on !GRKERNSEC_HIDESYM
67180 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
67181 select KALLSYMS
67182 select KALLSYMS_ALL
67183 diff --git a/lib/bitmap.c b/lib/bitmap.c
67184 index 0d4a127..33a06c7 100644
67185 --- a/lib/bitmap.c
67186 +++ b/lib/bitmap.c
67187 @@ -419,7 +419,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
67188 {
67189 int c, old_c, totaldigits, ndigits, nchunks, nbits;
67190 u32 chunk;
67191 - const char __user __force *ubuf = (const char __user __force *)buf;
67192 + const char __user *ubuf = (const char __force_user *)buf;
67193
67194 bitmap_zero(maskp, nmaskbits);
67195
67196 @@ -504,7 +504,7 @@ int bitmap_parse_user(const char __user *ubuf,
67197 {
67198 if (!access_ok(VERIFY_READ, ubuf, ulen))
67199 return -EFAULT;
67200 - return __bitmap_parse((const char __force *)ubuf,
67201 + return __bitmap_parse((const char __force_kernel *)ubuf,
67202 ulen, 1, maskp, nmaskbits);
67203
67204 }
67205 @@ -596,7 +596,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
67206 {
67207 unsigned a, b;
67208 int c, old_c, totaldigits;
67209 - const char __user __force *ubuf = (const char __user __force *)buf;
67210 + const char __user *ubuf = (const char __force_user *)buf;
67211 int exp_digit, in_range;
67212
67213 totaldigits = c = 0;
67214 @@ -696,7 +696,7 @@ int bitmap_parselist_user(const char __user *ubuf,
67215 {
67216 if (!access_ok(VERIFY_READ, ubuf, ulen))
67217 return -EFAULT;
67218 - return __bitmap_parselist((const char __force *)ubuf,
67219 + return __bitmap_parselist((const char __force_kernel *)ubuf,
67220 ulen, 1, maskp, nmaskbits);
67221 }
67222 EXPORT_SYMBOL(bitmap_parselist_user);
67223 diff --git a/lib/bug.c b/lib/bug.c
67224 index 1955209..cbbb2ad 100644
67225 --- a/lib/bug.c
67226 +++ b/lib/bug.c
67227 @@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
67228 return BUG_TRAP_TYPE_NONE;
67229
67230 bug = find_bug(bugaddr);
67231 + if (!bug)
67232 + return BUG_TRAP_TYPE_NONE;
67233
67234 file = NULL;
67235 line = 0;
67236 diff --git a/lib/debugobjects.c b/lib/debugobjects.c
67237 index a78b7c6..2c73084 100644
67238 --- a/lib/debugobjects.c
67239 +++ b/lib/debugobjects.c
67240 @@ -284,7 +284,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
67241 if (limit > 4)
67242 return;
67243
67244 - is_on_stack = object_is_on_stack(addr);
67245 + is_on_stack = object_starts_on_stack(addr);
67246 if (is_on_stack == onstack)
67247 return;
67248
67249 diff --git a/lib/devres.c b/lib/devres.c
67250 index 7c0e953..f642b5c 100644
67251 --- a/lib/devres.c
67252 +++ b/lib/devres.c
67253 @@ -80,7 +80,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
67254 void devm_iounmap(struct device *dev, void __iomem *addr)
67255 {
67256 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
67257 - (void *)addr));
67258 + (void __force *)addr));
67259 iounmap(addr);
67260 }
67261 EXPORT_SYMBOL(devm_iounmap);
67262 @@ -141,7 +141,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
67263 {
67264 ioport_unmap(addr);
67265 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
67266 - devm_ioport_map_match, (void *)addr));
67267 + devm_ioport_map_match, (void __force *)addr));
67268 }
67269 EXPORT_SYMBOL(devm_ioport_unmap);
67270
67271 diff --git a/lib/dma-debug.c b/lib/dma-debug.c
67272 index fea790a..ebb0e82 100644
67273 --- a/lib/dma-debug.c
67274 +++ b/lib/dma-debug.c
67275 @@ -925,7 +925,7 @@ out:
67276
67277 static void check_for_stack(struct device *dev, void *addr)
67278 {
67279 - if (object_is_on_stack(addr))
67280 + if (object_starts_on_stack(addr))
67281 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
67282 "stack [addr=%p]\n", addr);
67283 }
67284 diff --git a/lib/extable.c b/lib/extable.c
67285 index 4cac81e..63e9b8f 100644
67286 --- a/lib/extable.c
67287 +++ b/lib/extable.c
67288 @@ -13,6 +13,7 @@
67289 #include <linux/init.h>
67290 #include <linux/sort.h>
67291 #include <asm/uaccess.h>
67292 +#include <asm/pgtable.h>
67293
67294 #ifndef ARCH_HAS_SORT_EXTABLE
67295 /*
67296 @@ -36,8 +37,10 @@ static int cmp_ex(const void *a, const void *b)
67297 void sort_extable(struct exception_table_entry *start,
67298 struct exception_table_entry *finish)
67299 {
67300 + pax_open_kernel();
67301 sort(start, finish - start, sizeof(struct exception_table_entry),
67302 cmp_ex, NULL);
67303 + pax_close_kernel();
67304 }
67305
67306 #ifdef CONFIG_MODULES
67307 diff --git a/lib/inflate.c b/lib/inflate.c
67308 index 013a761..c28f3fc 100644
67309 --- a/lib/inflate.c
67310 +++ b/lib/inflate.c
67311 @@ -269,7 +269,7 @@ static void free(void *where)
67312 malloc_ptr = free_mem_ptr;
67313 }
67314 #else
67315 -#define malloc(a) kmalloc(a, GFP_KERNEL)
67316 +#define malloc(a) kmalloc((a), GFP_KERNEL)
67317 #define free(a) kfree(a)
67318 #endif
67319
67320 diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
67321 index bd2bea9..6b3c95e 100644
67322 --- a/lib/is_single_threaded.c
67323 +++ b/lib/is_single_threaded.c
67324 @@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
67325 struct task_struct *p, *t;
67326 bool ret;
67327
67328 + if (!mm)
67329 + return true;
67330 +
67331 if (atomic_read(&task->signal->live) != 1)
67332 return false;
67333
67334 diff --git a/lib/kref.c b/lib/kref.c
67335 index 3efb882..8492f4c 100644
67336 --- a/lib/kref.c
67337 +++ b/lib/kref.c
67338 @@ -52,7 +52,7 @@ void kref_get(struct kref *kref)
67339 */
67340 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
67341 {
67342 - WARN_ON(release == NULL);
67343 + BUG_ON(release == NULL);
67344 WARN_ON(release == (void (*)(struct kref *))kfree);
67345
67346 if (atomic_dec_and_test(&kref->refcount)) {
67347 diff --git a/lib/radix-tree.c b/lib/radix-tree.c
67348 index d9df745..e73c2fe 100644
67349 --- a/lib/radix-tree.c
67350 +++ b/lib/radix-tree.c
67351 @@ -80,7 +80,7 @@ struct radix_tree_preload {
67352 int nr;
67353 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
67354 };
67355 -static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
67356 +static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
67357
67358 static inline void *ptr_to_indirect(void *ptr)
67359 {
67360 diff --git a/lib/vsprintf.c b/lib/vsprintf.c
67361 index 993599e..84dc70e 100644
67362 --- a/lib/vsprintf.c
67363 +++ b/lib/vsprintf.c
67364 @@ -16,6 +16,9 @@
67365 * - scnprintf and vscnprintf
67366 */
67367
67368 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67369 +#define __INCLUDED_BY_HIDESYM 1
67370 +#endif
67371 #include <stdarg.h>
67372 #include <linux/module.h>
67373 #include <linux/types.h>
67374 @@ -413,7 +416,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
67375 char sym[KSYM_SYMBOL_LEN];
67376 if (ext == 'B')
67377 sprint_backtrace(sym, value);
67378 - else if (ext != 'f' && ext != 's')
67379 + else if (ext != 'f' && ext != 's' && ext != 'a')
67380 sprint_symbol(sym, value);
67381 else
67382 kallsyms_lookup(value, NULL, NULL, NULL, sym);
67383 @@ -777,7 +780,11 @@ char *uuid_string(char *buf, char *end, const u8 *addr,
67384 return string(buf, end, uuid, spec);
67385 }
67386
67387 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67388 +int kptr_restrict __read_mostly = 2;
67389 +#else
67390 int kptr_restrict __read_mostly;
67391 +#endif
67392
67393 /*
67394 * Show a '%p' thing. A kernel extension is that the '%p' is followed
67395 @@ -791,6 +798,8 @@ int kptr_restrict __read_mostly;
67396 * - 'S' For symbolic direct pointers with offset
67397 * - 's' For symbolic direct pointers without offset
67398 * - 'B' For backtraced symbolic direct pointers with offset
67399 + * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
67400 + * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
67401 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
67402 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
67403 * - 'M' For a 6-byte MAC address, it prints the address in the
67404 @@ -835,12 +844,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
67405 {
67406 if (!ptr && *fmt != 'K') {
67407 /*
67408 - * Print (null) with the same width as a pointer so it makes
67409 + * Print (nil) with the same width as a pointer so it makes
67410 * tabular output look nice.
67411 */
67412 if (spec.field_width == -1)
67413 spec.field_width = 2 * sizeof(void *);
67414 - return string(buf, end, "(null)", spec);
67415 + return string(buf, end, "(nil)", spec);
67416 }
67417
67418 switch (*fmt) {
67419 @@ -850,6 +859,13 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
67420 /* Fallthrough */
67421 case 'S':
67422 case 's':
67423 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67424 + break;
67425 +#else
67426 + return symbol_string(buf, end, ptr, spec, *fmt);
67427 +#endif
67428 + case 'A':
67429 + case 'a':
67430 case 'B':
67431 return symbol_string(buf, end, ptr, spec, *fmt);
67432 case 'R':
67433 @@ -1608,11 +1624,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
67434 typeof(type) value; \
67435 if (sizeof(type) == 8) { \
67436 args = PTR_ALIGN(args, sizeof(u32)); \
67437 - *(u32 *)&value = *(u32 *)args; \
67438 - *((u32 *)&value + 1) = *(u32 *)(args + 4); \
67439 + *(u32 *)&value = *(const u32 *)args; \
67440 + *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
67441 } else { \
67442 args = PTR_ALIGN(args, sizeof(type)); \
67443 - value = *(typeof(type) *)args; \
67444 + value = *(const typeof(type) *)args; \
67445 } \
67446 args += sizeof(type); \
67447 value; \
67448 @@ -1675,7 +1691,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
67449 case FORMAT_TYPE_STR: {
67450 const char *str_arg = args;
67451 args += strlen(str_arg) + 1;
67452 - str = string(str, end, (char *)str_arg, spec);
67453 + str = string(str, end, str_arg, spec);
67454 break;
67455 }
67456
67457 diff --git a/localversion-grsec b/localversion-grsec
67458 new file mode 100644
67459 index 0000000..7cd6065
67460 --- /dev/null
67461 +++ b/localversion-grsec
67462 @@ -0,0 +1 @@
67463 +-grsec
67464 diff --git a/mm/Kconfig b/mm/Kconfig
67465 index 011b110..b492af2 100644
67466 --- a/mm/Kconfig
67467 +++ b/mm/Kconfig
67468 @@ -241,10 +241,10 @@ config KSM
67469 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
67470
67471 config DEFAULT_MMAP_MIN_ADDR
67472 - int "Low address space to protect from user allocation"
67473 + int "Low address space to protect from user allocation"
67474 depends on MMU
67475 - default 4096
67476 - help
67477 + default 65536
67478 + help
67479 This is the portion of low virtual memory which should be protected
67480 from userspace allocation. Keeping a user from writing to low pages
67481 can help reduce the impact of kernel NULL pointer bugs.
67482 diff --git a/mm/filemap.c b/mm/filemap.c
67483 index 90286a4..f441caa 100644
67484 --- a/mm/filemap.c
67485 +++ b/mm/filemap.c
67486 @@ -1770,7 +1770,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
67487 struct address_space *mapping = file->f_mapping;
67488
67489 if (!mapping->a_ops->readpage)
67490 - return -ENOEXEC;
67491 + return -ENODEV;
67492 file_accessed(file);
67493 vma->vm_ops = &generic_file_vm_ops;
67494 vma->vm_flags |= VM_CAN_NONLINEAR;
67495 @@ -2176,6 +2176,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
67496 *pos = i_size_read(inode);
67497
67498 if (limit != RLIM_INFINITY) {
67499 + gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
67500 if (*pos >= limit) {
67501 send_sig(SIGXFSZ, current, 0);
67502 return -EFBIG;
67503 diff --git a/mm/fremap.c b/mm/fremap.c
67504 index 9ed4fd4..c42648d 100644
67505 --- a/mm/fremap.c
67506 +++ b/mm/fremap.c
67507 @@ -155,6 +155,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
67508 retry:
67509 vma = find_vma(mm, start);
67510
67511 +#ifdef CONFIG_PAX_SEGMEXEC
67512 + if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
67513 + goto out;
67514 +#endif
67515 +
67516 /*
67517 * Make sure the vma is shared, that it supports prefaulting,
67518 * and that the remapped range is valid and fully within
67519 diff --git a/mm/highmem.c b/mm/highmem.c
67520 index 57d82c6..e9e0552 100644
67521 --- a/mm/highmem.c
67522 +++ b/mm/highmem.c
67523 @@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
67524 * So no dangers, even with speculative execution.
67525 */
67526 page = pte_page(pkmap_page_table[i]);
67527 + pax_open_kernel();
67528 pte_clear(&init_mm, (unsigned long)page_address(page),
67529 &pkmap_page_table[i]);
67530 -
67531 + pax_close_kernel();
67532 set_page_address(page, NULL);
67533 need_flush = 1;
67534 }
67535 @@ -186,9 +187,11 @@ start:
67536 }
67537 }
67538 vaddr = PKMAP_ADDR(last_pkmap_nr);
67539 +
67540 + pax_open_kernel();
67541 set_pte_at(&init_mm, vaddr,
67542 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
67543 -
67544 + pax_close_kernel();
67545 pkmap_count[last_pkmap_nr] = 1;
67546 set_page_address(page, (void *)vaddr);
67547
67548 diff --git a/mm/huge_memory.c b/mm/huge_memory.c
67549 index 36b3d98..584cb54 100644
67550 --- a/mm/huge_memory.c
67551 +++ b/mm/huge_memory.c
67552 @@ -703,7 +703,7 @@ out:
67553 * run pte_offset_map on the pmd, if an huge pmd could
67554 * materialize from under us from a different thread.
67555 */
67556 - if (unlikely(__pte_alloc(mm, vma, pmd, address)))
67557 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
67558 return VM_FAULT_OOM;
67559 /* if an huge pmd materialized from under us just retry later */
67560 if (unlikely(pmd_trans_huge(*pmd)))
67561 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
67562 index 2316840..b418671 100644
67563 --- a/mm/hugetlb.c
67564 +++ b/mm/hugetlb.c
67565 @@ -2347,6 +2347,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
67566 return 1;
67567 }
67568
67569 +#ifdef CONFIG_PAX_SEGMEXEC
67570 +static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
67571 +{
67572 + struct mm_struct *mm = vma->vm_mm;
67573 + struct vm_area_struct *vma_m;
67574 + unsigned long address_m;
67575 + pte_t *ptep_m;
67576 +
67577 + vma_m = pax_find_mirror_vma(vma);
67578 + if (!vma_m)
67579 + return;
67580 +
67581 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67582 + address_m = address + SEGMEXEC_TASK_SIZE;
67583 + ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
67584 + get_page(page_m);
67585 + hugepage_add_anon_rmap(page_m, vma_m, address_m);
67586 + set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
67587 +}
67588 +#endif
67589 +
67590 /*
67591 * Hugetlb_cow() should be called with page lock of the original hugepage held.
67592 */
67593 @@ -2450,6 +2471,11 @@ retry_avoidcopy:
67594 make_huge_pte(vma, new_page, 1));
67595 page_remove_rmap(old_page);
67596 hugepage_add_new_anon_rmap(new_page, vma, address);
67597 +
67598 +#ifdef CONFIG_PAX_SEGMEXEC
67599 + pax_mirror_huge_pte(vma, address, new_page);
67600 +#endif
67601 +
67602 /* Make the old page be freed below */
67603 new_page = old_page;
67604 mmu_notifier_invalidate_range_end(mm,
67605 @@ -2601,6 +2627,10 @@ retry:
67606 && (vma->vm_flags & VM_SHARED)));
67607 set_huge_pte_at(mm, address, ptep, new_pte);
67608
67609 +#ifdef CONFIG_PAX_SEGMEXEC
67610 + pax_mirror_huge_pte(vma, address, page);
67611 +#endif
67612 +
67613 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
67614 /* Optimization, do the COW without a second fault */
67615 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
67616 @@ -2630,6 +2660,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
67617 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
67618 struct hstate *h = hstate_vma(vma);
67619
67620 +#ifdef CONFIG_PAX_SEGMEXEC
67621 + struct vm_area_struct *vma_m;
67622 +#endif
67623 +
67624 ptep = huge_pte_offset(mm, address);
67625 if (ptep) {
67626 entry = huge_ptep_get(ptep);
67627 @@ -2641,6 +2675,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
67628 VM_FAULT_SET_HINDEX(h - hstates);
67629 }
67630
67631 +#ifdef CONFIG_PAX_SEGMEXEC
67632 + vma_m = pax_find_mirror_vma(vma);
67633 + if (vma_m) {
67634 + unsigned long address_m;
67635 +
67636 + if (vma->vm_start > vma_m->vm_start) {
67637 + address_m = address;
67638 + address -= SEGMEXEC_TASK_SIZE;
67639 + vma = vma_m;
67640 + h = hstate_vma(vma);
67641 + } else
67642 + address_m = address + SEGMEXEC_TASK_SIZE;
67643 +
67644 + if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
67645 + return VM_FAULT_OOM;
67646 + address_m &= HPAGE_MASK;
67647 + unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
67648 + }
67649 +#endif
67650 +
67651 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
67652 if (!ptep)
67653 return VM_FAULT_OOM;
67654 diff --git a/mm/internal.h b/mm/internal.h
67655 index 2189af4..f2ca332 100644
67656 --- a/mm/internal.h
67657 +++ b/mm/internal.h
67658 @@ -95,6 +95,7 @@ extern void putback_lru_page(struct page *page);
67659 * in mm/page_alloc.c
67660 */
67661 extern void __free_pages_bootmem(struct page *page, unsigned int order);
67662 +extern void free_compound_page(struct page *page);
67663 extern void prep_compound_page(struct page *page, unsigned long order);
67664 #ifdef CONFIG_MEMORY_FAILURE
67665 extern bool is_free_buddy_page(struct page *page);
67666 diff --git a/mm/kmemleak.c b/mm/kmemleak.c
67667 index f3b2a00..61da94d 100644
67668 --- a/mm/kmemleak.c
67669 +++ b/mm/kmemleak.c
67670 @@ -357,7 +357,7 @@ static void print_unreferenced(struct seq_file *seq,
67671
67672 for (i = 0; i < object->trace_len; i++) {
67673 void *ptr = (void *)object->trace[i];
67674 - seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
67675 + seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
67676 }
67677 }
67678
67679 diff --git a/mm/maccess.c b/mm/maccess.c
67680 index d53adf9..03a24bf 100644
67681 --- a/mm/maccess.c
67682 +++ b/mm/maccess.c
67683 @@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
67684 set_fs(KERNEL_DS);
67685 pagefault_disable();
67686 ret = __copy_from_user_inatomic(dst,
67687 - (__force const void __user *)src, size);
67688 + (const void __force_user *)src, size);
67689 pagefault_enable();
67690 set_fs(old_fs);
67691
67692 @@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
67693
67694 set_fs(KERNEL_DS);
67695 pagefault_disable();
67696 - ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
67697 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
67698 pagefault_enable();
67699 set_fs(old_fs);
67700
67701 diff --git a/mm/madvise.c b/mm/madvise.c
67702 index 74bf193..feb6fd3 100644
67703 --- a/mm/madvise.c
67704 +++ b/mm/madvise.c
67705 @@ -45,6 +45,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
67706 pgoff_t pgoff;
67707 unsigned long new_flags = vma->vm_flags;
67708
67709 +#ifdef CONFIG_PAX_SEGMEXEC
67710 + struct vm_area_struct *vma_m;
67711 +#endif
67712 +
67713 switch (behavior) {
67714 case MADV_NORMAL:
67715 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
67716 @@ -110,6 +114,13 @@ success:
67717 /*
67718 * vm_flags is protected by the mmap_sem held in write mode.
67719 */
67720 +
67721 +#ifdef CONFIG_PAX_SEGMEXEC
67722 + vma_m = pax_find_mirror_vma(vma);
67723 + if (vma_m)
67724 + vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
67725 +#endif
67726 +
67727 vma->vm_flags = new_flags;
67728
67729 out:
67730 @@ -168,6 +179,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
67731 struct vm_area_struct ** prev,
67732 unsigned long start, unsigned long end)
67733 {
67734 +
67735 +#ifdef CONFIG_PAX_SEGMEXEC
67736 + struct vm_area_struct *vma_m;
67737 +#endif
67738 +
67739 *prev = vma;
67740 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
67741 return -EINVAL;
67742 @@ -180,6 +196,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
67743 zap_page_range(vma, start, end - start, &details);
67744 } else
67745 zap_page_range(vma, start, end - start, NULL);
67746 +
67747 +#ifdef CONFIG_PAX_SEGMEXEC
67748 + vma_m = pax_find_mirror_vma(vma);
67749 + if (vma_m) {
67750 + if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
67751 + struct zap_details details = {
67752 + .nonlinear_vma = vma_m,
67753 + .last_index = ULONG_MAX,
67754 + };
67755 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
67756 + } else
67757 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
67758 + }
67759 +#endif
67760 +
67761 return 0;
67762 }
67763
67764 @@ -376,6 +407,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
67765 if (end < start)
67766 goto out;
67767
67768 +#ifdef CONFIG_PAX_SEGMEXEC
67769 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
67770 + if (end > SEGMEXEC_TASK_SIZE)
67771 + goto out;
67772 + } else
67773 +#endif
67774 +
67775 + if (end > TASK_SIZE)
67776 + goto out;
67777 +
67778 error = 0;
67779 if (end == start)
67780 goto out;
67781 diff --git a/mm/memory-failure.c b/mm/memory-failure.c
67782 index 06d3479..0778eef 100644
67783 --- a/mm/memory-failure.c
67784 +++ b/mm/memory-failure.c
67785 @@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
67786
67787 int sysctl_memory_failure_recovery __read_mostly = 1;
67788
67789 -atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
67790 +atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
67791
67792 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
67793
67794 @@ -202,7 +202,7 @@ static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
67795 si.si_signo = SIGBUS;
67796 si.si_errno = 0;
67797 si.si_code = BUS_MCEERR_AO;
67798 - si.si_addr = (void *)addr;
67799 + si.si_addr = (void __user *)addr;
67800 #ifdef __ARCH_SI_TRAPNO
67801 si.si_trapno = trapno;
67802 #endif
67803 @@ -1010,7 +1010,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
67804 }
67805
67806 nr_pages = 1 << compound_trans_order(hpage);
67807 - atomic_long_add(nr_pages, &mce_bad_pages);
67808 + atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
67809
67810 /*
67811 * We need/can do nothing about count=0 pages.
67812 @@ -1040,7 +1040,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
67813 if (!PageHWPoison(hpage)
67814 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
67815 || (p != hpage && TestSetPageHWPoison(hpage))) {
67816 - atomic_long_sub(nr_pages, &mce_bad_pages);
67817 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
67818 return 0;
67819 }
67820 set_page_hwpoison_huge_page(hpage);
67821 @@ -1098,7 +1098,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
67822 }
67823 if (hwpoison_filter(p)) {
67824 if (TestClearPageHWPoison(p))
67825 - atomic_long_sub(nr_pages, &mce_bad_pages);
67826 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
67827 unlock_page(hpage);
67828 put_page(hpage);
67829 return 0;
67830 @@ -1315,7 +1315,7 @@ int unpoison_memory(unsigned long pfn)
67831 return 0;
67832 }
67833 if (TestClearPageHWPoison(p))
67834 - atomic_long_sub(nr_pages, &mce_bad_pages);
67835 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
67836 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
67837 return 0;
67838 }
67839 @@ -1329,7 +1329,7 @@ int unpoison_memory(unsigned long pfn)
67840 */
67841 if (TestClearPageHWPoison(page)) {
67842 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
67843 - atomic_long_sub(nr_pages, &mce_bad_pages);
67844 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
67845 freeit = 1;
67846 if (PageHuge(page))
67847 clear_page_hwpoison_huge_page(page);
67848 @@ -1442,7 +1442,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
67849 }
67850 done:
67851 if (!PageHWPoison(hpage))
67852 - atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
67853 + atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
67854 set_page_hwpoison_huge_page(hpage);
67855 dequeue_hwpoisoned_huge_page(hpage);
67856 /* keep elevated page count for bad page */
67857 @@ -1573,7 +1573,7 @@ int soft_offline_page(struct page *page, int flags)
67858 return ret;
67859
67860 done:
67861 - atomic_long_add(1, &mce_bad_pages);
67862 + atomic_long_add_unchecked(1, &mce_bad_pages);
67863 SetPageHWPoison(page);
67864 /* keep elevated page count for bad page */
67865 return ret;
67866 diff --git a/mm/memory.c b/mm/memory.c
67867 index 829d437..3d3926a 100644
67868 --- a/mm/memory.c
67869 +++ b/mm/memory.c
67870 @@ -457,8 +457,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
67871 return;
67872
67873 pmd = pmd_offset(pud, start);
67874 +
67875 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
67876 pud_clear(pud);
67877 pmd_free_tlb(tlb, pmd, start);
67878 +#endif
67879 +
67880 }
67881
67882 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
67883 @@ -489,9 +493,12 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
67884 if (end - 1 > ceiling - 1)
67885 return;
67886
67887 +#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
67888 pud = pud_offset(pgd, start);
67889 pgd_clear(pgd);
67890 pud_free_tlb(tlb, pud, start);
67891 +#endif
67892 +
67893 }
67894
67895 /*
67896 @@ -1566,12 +1573,6 @@ no_page_table:
67897 return page;
67898 }
67899
67900 -static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
67901 -{
67902 - return stack_guard_page_start(vma, addr) ||
67903 - stack_guard_page_end(vma, addr+PAGE_SIZE);
67904 -}
67905 -
67906 /**
67907 * __get_user_pages() - pin user pages in memory
67908 * @tsk: task_struct of target task
67909 @@ -1644,10 +1645,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
67910 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
67911 i = 0;
67912
67913 - do {
67914 + while (nr_pages) {
67915 struct vm_area_struct *vma;
67916
67917 - vma = find_extend_vma(mm, start);
67918 + vma = find_vma(mm, start);
67919 if (!vma && in_gate_area(mm, start)) {
67920 unsigned long pg = start & PAGE_MASK;
67921 pgd_t *pgd;
67922 @@ -1695,7 +1696,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
67923 goto next_page;
67924 }
67925
67926 - if (!vma ||
67927 + if (!vma || start < vma->vm_start ||
67928 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
67929 !(vm_flags & vma->vm_flags))
67930 return i ? : -EFAULT;
67931 @@ -1722,11 +1723,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
67932 int ret;
67933 unsigned int fault_flags = 0;
67934
67935 - /* For mlock, just skip the stack guard page. */
67936 - if (foll_flags & FOLL_MLOCK) {
67937 - if (stack_guard_page(vma, start))
67938 - goto next_page;
67939 - }
67940 if (foll_flags & FOLL_WRITE)
67941 fault_flags |= FAULT_FLAG_WRITE;
67942 if (nonblocking)
67943 @@ -1800,7 +1796,7 @@ next_page:
67944 start += PAGE_SIZE;
67945 nr_pages--;
67946 } while (nr_pages && start < vma->vm_end);
67947 - } while (nr_pages);
67948 + }
67949 return i;
67950 }
67951 EXPORT_SYMBOL(__get_user_pages);
67952 @@ -2007,6 +2003,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
67953 page_add_file_rmap(page);
67954 set_pte_at(mm, addr, pte, mk_pte(page, prot));
67955
67956 +#ifdef CONFIG_PAX_SEGMEXEC
67957 + pax_mirror_file_pte(vma, addr, page, ptl);
67958 +#endif
67959 +
67960 retval = 0;
67961 pte_unmap_unlock(pte, ptl);
67962 return retval;
67963 @@ -2041,10 +2041,22 @@ out:
67964 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
67965 struct page *page)
67966 {
67967 +
67968 +#ifdef CONFIG_PAX_SEGMEXEC
67969 + struct vm_area_struct *vma_m;
67970 +#endif
67971 +
67972 if (addr < vma->vm_start || addr >= vma->vm_end)
67973 return -EFAULT;
67974 if (!page_count(page))
67975 return -EINVAL;
67976 +
67977 +#ifdef CONFIG_PAX_SEGMEXEC
67978 + vma_m = pax_find_mirror_vma(vma);
67979 + if (vma_m)
67980 + vma_m->vm_flags |= VM_INSERTPAGE;
67981 +#endif
67982 +
67983 vma->vm_flags |= VM_INSERTPAGE;
67984 return insert_page(vma, addr, page, vma->vm_page_prot);
67985 }
67986 @@ -2130,6 +2142,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
67987 unsigned long pfn)
67988 {
67989 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
67990 + BUG_ON(vma->vm_mirror);
67991
67992 if (addr < vma->vm_start || addr >= vma->vm_end)
67993 return -EFAULT;
67994 @@ -2445,6 +2458,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
67995 copy_user_highpage(dst, src, va, vma);
67996 }
67997
67998 +#ifdef CONFIG_PAX_SEGMEXEC
67999 +static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
68000 +{
68001 + struct mm_struct *mm = vma->vm_mm;
68002 + spinlock_t *ptl;
68003 + pte_t *pte, entry;
68004 +
68005 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
68006 + entry = *pte;
68007 + if (!pte_present(entry)) {
68008 + if (!pte_none(entry)) {
68009 + BUG_ON(pte_file(entry));
68010 + free_swap_and_cache(pte_to_swp_entry(entry));
68011 + pte_clear_not_present_full(mm, address, pte, 0);
68012 + }
68013 + } else {
68014 + struct page *page;
68015 +
68016 + flush_cache_page(vma, address, pte_pfn(entry));
68017 + entry = ptep_clear_flush(vma, address, pte);
68018 + BUG_ON(pte_dirty(entry));
68019 + page = vm_normal_page(vma, address, entry);
68020 + if (page) {
68021 + update_hiwater_rss(mm);
68022 + if (PageAnon(page))
68023 + dec_mm_counter_fast(mm, MM_ANONPAGES);
68024 + else
68025 + dec_mm_counter_fast(mm, MM_FILEPAGES);
68026 + page_remove_rmap(page);
68027 + page_cache_release(page);
68028 + }
68029 + }
68030 + pte_unmap_unlock(pte, ptl);
68031 +}
68032 +
68033 +/* PaX: if vma is mirrored, synchronize the mirror's PTE
68034 + *
68035 + * the ptl of the lower mapped page is held on entry and is not released on exit
68036 + * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
68037 + */
68038 +static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
68039 +{
68040 + struct mm_struct *mm = vma->vm_mm;
68041 + unsigned long address_m;
68042 + spinlock_t *ptl_m;
68043 + struct vm_area_struct *vma_m;
68044 + pmd_t *pmd_m;
68045 + pte_t *pte_m, entry_m;
68046 +
68047 + BUG_ON(!page_m || !PageAnon(page_m));
68048 +
68049 + vma_m = pax_find_mirror_vma(vma);
68050 + if (!vma_m)
68051 + return;
68052 +
68053 + BUG_ON(!PageLocked(page_m));
68054 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
68055 + address_m = address + SEGMEXEC_TASK_SIZE;
68056 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
68057 + pte_m = pte_offset_map(pmd_m, address_m);
68058 + ptl_m = pte_lockptr(mm, pmd_m);
68059 + if (ptl != ptl_m) {
68060 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
68061 + if (!pte_none(*pte_m))
68062 + goto out;
68063 + }
68064 +
68065 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
68066 + page_cache_get(page_m);
68067 + page_add_anon_rmap(page_m, vma_m, address_m);
68068 + inc_mm_counter_fast(mm, MM_ANONPAGES);
68069 + set_pte_at(mm, address_m, pte_m, entry_m);
68070 + update_mmu_cache(vma_m, address_m, entry_m);
68071 +out:
68072 + if (ptl != ptl_m)
68073 + spin_unlock(ptl_m);
68074 + pte_unmap(pte_m);
68075 + unlock_page(page_m);
68076 +}
68077 +
68078 +void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
68079 +{
68080 + struct mm_struct *mm = vma->vm_mm;
68081 + unsigned long address_m;
68082 + spinlock_t *ptl_m;
68083 + struct vm_area_struct *vma_m;
68084 + pmd_t *pmd_m;
68085 + pte_t *pte_m, entry_m;
68086 +
68087 + BUG_ON(!page_m || PageAnon(page_m));
68088 +
68089 + vma_m = pax_find_mirror_vma(vma);
68090 + if (!vma_m)
68091 + return;
68092 +
68093 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
68094 + address_m = address + SEGMEXEC_TASK_SIZE;
68095 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
68096 + pte_m = pte_offset_map(pmd_m, address_m);
68097 + ptl_m = pte_lockptr(mm, pmd_m);
68098 + if (ptl != ptl_m) {
68099 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
68100 + if (!pte_none(*pte_m))
68101 + goto out;
68102 + }
68103 +
68104 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
68105 + page_cache_get(page_m);
68106 + page_add_file_rmap(page_m);
68107 + inc_mm_counter_fast(mm, MM_FILEPAGES);
68108 + set_pte_at(mm, address_m, pte_m, entry_m);
68109 + update_mmu_cache(vma_m, address_m, entry_m);
68110 +out:
68111 + if (ptl != ptl_m)
68112 + spin_unlock(ptl_m);
68113 + pte_unmap(pte_m);
68114 +}
68115 +
68116 +static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
68117 +{
68118 + struct mm_struct *mm = vma->vm_mm;
68119 + unsigned long address_m;
68120 + spinlock_t *ptl_m;
68121 + struct vm_area_struct *vma_m;
68122 + pmd_t *pmd_m;
68123 + pte_t *pte_m, entry_m;
68124 +
68125 + vma_m = pax_find_mirror_vma(vma);
68126 + if (!vma_m)
68127 + return;
68128 +
68129 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
68130 + address_m = address + SEGMEXEC_TASK_SIZE;
68131 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
68132 + pte_m = pte_offset_map(pmd_m, address_m);
68133 + ptl_m = pte_lockptr(mm, pmd_m);
68134 + if (ptl != ptl_m) {
68135 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
68136 + if (!pte_none(*pte_m))
68137 + goto out;
68138 + }
68139 +
68140 + entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
68141 + set_pte_at(mm, address_m, pte_m, entry_m);
68142 +out:
68143 + if (ptl != ptl_m)
68144 + spin_unlock(ptl_m);
68145 + pte_unmap(pte_m);
68146 +}
68147 +
68148 +static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
68149 +{
68150 + struct page *page_m;
68151 + pte_t entry;
68152 +
68153 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
68154 + goto out;
68155 +
68156 + entry = *pte;
68157 + page_m = vm_normal_page(vma, address, entry);
68158 + if (!page_m)
68159 + pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
68160 + else if (PageAnon(page_m)) {
68161 + if (pax_find_mirror_vma(vma)) {
68162 + pte_unmap_unlock(pte, ptl);
68163 + lock_page(page_m);
68164 + pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
68165 + if (pte_same(entry, *pte))
68166 + pax_mirror_anon_pte(vma, address, page_m, ptl);
68167 + else
68168 + unlock_page(page_m);
68169 + }
68170 + } else
68171 + pax_mirror_file_pte(vma, address, page_m, ptl);
68172 +
68173 +out:
68174 + pte_unmap_unlock(pte, ptl);
68175 +}
68176 +#endif
68177 +
68178 /*
68179 * This routine handles present pages, when users try to write
68180 * to a shared page. It is done by copying the page to a new address
68181 @@ -2656,6 +2849,12 @@ gotten:
68182 */
68183 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
68184 if (likely(pte_same(*page_table, orig_pte))) {
68185 +
68186 +#ifdef CONFIG_PAX_SEGMEXEC
68187 + if (pax_find_mirror_vma(vma))
68188 + BUG_ON(!trylock_page(new_page));
68189 +#endif
68190 +
68191 if (old_page) {
68192 if (!PageAnon(old_page)) {
68193 dec_mm_counter_fast(mm, MM_FILEPAGES);
68194 @@ -2707,6 +2906,10 @@ gotten:
68195 page_remove_rmap(old_page);
68196 }
68197
68198 +#ifdef CONFIG_PAX_SEGMEXEC
68199 + pax_mirror_anon_pte(vma, address, new_page, ptl);
68200 +#endif
68201 +
68202 /* Free the old page.. */
68203 new_page = old_page;
68204 ret |= VM_FAULT_WRITE;
68205 @@ -2986,6 +3189,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
68206 swap_free(entry);
68207 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
68208 try_to_free_swap(page);
68209 +
68210 +#ifdef CONFIG_PAX_SEGMEXEC
68211 + if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
68212 +#endif
68213 +
68214 unlock_page(page);
68215 if (swapcache) {
68216 /*
68217 @@ -3009,6 +3217,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
68218
68219 /* No need to invalidate - it was non-present before */
68220 update_mmu_cache(vma, address, page_table);
68221 +
68222 +#ifdef CONFIG_PAX_SEGMEXEC
68223 + pax_mirror_anon_pte(vma, address, page, ptl);
68224 +#endif
68225 +
68226 unlock:
68227 pte_unmap_unlock(page_table, ptl);
68228 out:
68229 @@ -3028,40 +3241,6 @@ out_release:
68230 }
68231
68232 /*
68233 - * This is like a special single-page "expand_{down|up}wards()",
68234 - * except we must first make sure that 'address{-|+}PAGE_SIZE'
68235 - * doesn't hit another vma.
68236 - */
68237 -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
68238 -{
68239 - address &= PAGE_MASK;
68240 - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
68241 - struct vm_area_struct *prev = vma->vm_prev;
68242 -
68243 - /*
68244 - * Is there a mapping abutting this one below?
68245 - *
68246 - * That's only ok if it's the same stack mapping
68247 - * that has gotten split..
68248 - */
68249 - if (prev && prev->vm_end == address)
68250 - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
68251 -
68252 - expand_downwards(vma, address - PAGE_SIZE);
68253 - }
68254 - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
68255 - struct vm_area_struct *next = vma->vm_next;
68256 -
68257 - /* As VM_GROWSDOWN but s/below/above/ */
68258 - if (next && next->vm_start == address + PAGE_SIZE)
68259 - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
68260 -
68261 - expand_upwards(vma, address + PAGE_SIZE);
68262 - }
68263 - return 0;
68264 -}
68265 -
68266 -/*
68267 * We enter with non-exclusive mmap_sem (to exclude vma changes,
68268 * but allow concurrent faults), and pte mapped but not yet locked.
68269 * We return with mmap_sem still held, but pte unmapped and unlocked.
68270 @@ -3070,27 +3249,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
68271 unsigned long address, pte_t *page_table, pmd_t *pmd,
68272 unsigned int flags)
68273 {
68274 - struct page *page;
68275 + struct page *page = NULL;
68276 spinlock_t *ptl;
68277 pte_t entry;
68278
68279 - pte_unmap(page_table);
68280 -
68281 - /* Check if we need to add a guard page to the stack */
68282 - if (check_stack_guard_page(vma, address) < 0)
68283 - return VM_FAULT_SIGBUS;
68284 -
68285 - /* Use the zero-page for reads */
68286 if (!(flags & FAULT_FLAG_WRITE)) {
68287 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
68288 vma->vm_page_prot));
68289 - page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
68290 + ptl = pte_lockptr(mm, pmd);
68291 + spin_lock(ptl);
68292 if (!pte_none(*page_table))
68293 goto unlock;
68294 goto setpte;
68295 }
68296
68297 /* Allocate our own private page. */
68298 + pte_unmap(page_table);
68299 +
68300 if (unlikely(anon_vma_prepare(vma)))
68301 goto oom;
68302 page = alloc_zeroed_user_highpage_movable(vma, address);
68303 @@ -3109,6 +3284,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
68304 if (!pte_none(*page_table))
68305 goto release;
68306
68307 +#ifdef CONFIG_PAX_SEGMEXEC
68308 + if (pax_find_mirror_vma(vma))
68309 + BUG_ON(!trylock_page(page));
68310 +#endif
68311 +
68312 inc_mm_counter_fast(mm, MM_ANONPAGES);
68313 page_add_new_anon_rmap(page, vma, address);
68314 setpte:
68315 @@ -3116,6 +3296,12 @@ setpte:
68316
68317 /* No need to invalidate - it was non-present before */
68318 update_mmu_cache(vma, address, page_table);
68319 +
68320 +#ifdef CONFIG_PAX_SEGMEXEC
68321 + if (page)
68322 + pax_mirror_anon_pte(vma, address, page, ptl);
68323 +#endif
68324 +
68325 unlock:
68326 pte_unmap_unlock(page_table, ptl);
68327 return 0;
68328 @@ -3259,6 +3445,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
68329 */
68330 /* Only go through if we didn't race with anybody else... */
68331 if (likely(pte_same(*page_table, orig_pte))) {
68332 +
68333 +#ifdef CONFIG_PAX_SEGMEXEC
68334 + if (anon && pax_find_mirror_vma(vma))
68335 + BUG_ON(!trylock_page(page));
68336 +#endif
68337 +
68338 flush_icache_page(vma, page);
68339 entry = mk_pte(page, vma->vm_page_prot);
68340 if (flags & FAULT_FLAG_WRITE)
68341 @@ -3278,6 +3470,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
68342
68343 /* no need to invalidate: a not-present page won't be cached */
68344 update_mmu_cache(vma, address, page_table);
68345 +
68346 +#ifdef CONFIG_PAX_SEGMEXEC
68347 + if (anon)
68348 + pax_mirror_anon_pte(vma, address, page, ptl);
68349 + else
68350 + pax_mirror_file_pte(vma, address, page, ptl);
68351 +#endif
68352 +
68353 } else {
68354 if (cow_page)
68355 mem_cgroup_uncharge_page(cow_page);
68356 @@ -3431,6 +3631,12 @@ int handle_pte_fault(struct mm_struct *mm,
68357 if (flags & FAULT_FLAG_WRITE)
68358 flush_tlb_fix_spurious_fault(vma, address);
68359 }
68360 +
68361 +#ifdef CONFIG_PAX_SEGMEXEC
68362 + pax_mirror_pte(vma, address, pte, pmd, ptl);
68363 + return 0;
68364 +#endif
68365 +
68366 unlock:
68367 pte_unmap_unlock(pte, ptl);
68368 return 0;
68369 @@ -3447,6 +3653,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
68370 pmd_t *pmd;
68371 pte_t *pte;
68372
68373 +#ifdef CONFIG_PAX_SEGMEXEC
68374 + struct vm_area_struct *vma_m;
68375 +#endif
68376 +
68377 __set_current_state(TASK_RUNNING);
68378
68379 count_vm_event(PGFAULT);
68380 @@ -3458,6 +3668,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
68381 if (unlikely(is_vm_hugetlb_page(vma)))
68382 return hugetlb_fault(mm, vma, address, flags);
68383
68384 +#ifdef CONFIG_PAX_SEGMEXEC
68385 + vma_m = pax_find_mirror_vma(vma);
68386 + if (vma_m) {
68387 + unsigned long address_m;
68388 + pgd_t *pgd_m;
68389 + pud_t *pud_m;
68390 + pmd_t *pmd_m;
68391 +
68392 + if (vma->vm_start > vma_m->vm_start) {
68393 + address_m = address;
68394 + address -= SEGMEXEC_TASK_SIZE;
68395 + vma = vma_m;
68396 + } else
68397 + address_m = address + SEGMEXEC_TASK_SIZE;
68398 +
68399 + pgd_m = pgd_offset(mm, address_m);
68400 + pud_m = pud_alloc(mm, pgd_m, address_m);
68401 + if (!pud_m)
68402 + return VM_FAULT_OOM;
68403 + pmd_m = pmd_alloc(mm, pud_m, address_m);
68404 + if (!pmd_m)
68405 + return VM_FAULT_OOM;
68406 + if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
68407 + return VM_FAULT_OOM;
68408 + pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
68409 + }
68410 +#endif
68411 +
68412 pgd = pgd_offset(mm, address);
68413 pud = pud_alloc(mm, pgd, address);
68414 if (!pud)
68415 @@ -3487,7 +3725,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
68416 * run pte_offset_map on the pmd, if an huge pmd could
68417 * materialize from under us from a different thread.
68418 */
68419 - if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
68420 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
68421 return VM_FAULT_OOM;
68422 /* if an huge pmd materialized from under us just retry later */
68423 if (unlikely(pmd_trans_huge(*pmd)))
68424 @@ -3591,7 +3829,7 @@ static int __init gate_vma_init(void)
68425 gate_vma.vm_start = FIXADDR_USER_START;
68426 gate_vma.vm_end = FIXADDR_USER_END;
68427 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
68428 - gate_vma.vm_page_prot = __P101;
68429 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
68430 /*
68431 * Make sure the vDSO gets into every core dump.
68432 * Dumping its contents makes post-mortem fully interpretable later
68433 diff --git a/mm/mempolicy.c b/mm/mempolicy.c
68434 index c3fdbcb..2e8ef90 100644
68435 --- a/mm/mempolicy.c
68436 +++ b/mm/mempolicy.c
68437 @@ -640,6 +640,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
68438 unsigned long vmstart;
68439 unsigned long vmend;
68440
68441 +#ifdef CONFIG_PAX_SEGMEXEC
68442 + struct vm_area_struct *vma_m;
68443 +#endif
68444 +
68445 vma = find_vma_prev(mm, start, &prev);
68446 if (!vma || vma->vm_start > start)
68447 return -EFAULT;
68448 @@ -678,6 +682,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
68449 err = policy_vma(vma, new_pol);
68450 if (err)
68451 goto out;
68452 +
68453 +#ifdef CONFIG_PAX_SEGMEXEC
68454 + vma_m = pax_find_mirror_vma(vma);
68455 + if (vma_m) {
68456 + err = policy_vma(vma_m, new_pol);
68457 + if (err)
68458 + goto out;
68459 + }
68460 +#endif
68461 +
68462 }
68463
68464 out:
68465 @@ -1111,6 +1125,17 @@ static long do_mbind(unsigned long start, unsigned long len,
68466
68467 if (end < start)
68468 return -EINVAL;
68469 +
68470 +#ifdef CONFIG_PAX_SEGMEXEC
68471 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
68472 + if (end > SEGMEXEC_TASK_SIZE)
68473 + return -EINVAL;
68474 + } else
68475 +#endif
68476 +
68477 + if (end > TASK_SIZE)
68478 + return -EINVAL;
68479 +
68480 if (end == start)
68481 return 0;
68482
68483 @@ -1329,6 +1354,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
68484 if (!mm)
68485 goto out;
68486
68487 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68488 + if (mm != current->mm &&
68489 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
68490 + err = -EPERM;
68491 + goto out;
68492 + }
68493 +#endif
68494 +
68495 /*
68496 * Check if this process has the right to modify the specified
68497 * process. The right exists if the process has administrative
68498 @@ -1338,8 +1371,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
68499 rcu_read_lock();
68500 tcred = __task_cred(task);
68501 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
68502 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
68503 - !capable(CAP_SYS_NICE)) {
68504 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
68505 rcu_read_unlock();
68506 err = -EPERM;
68507 goto out;
68508 diff --git a/mm/migrate.c b/mm/migrate.c
68509 index 177aca4..ab3a744 100644
68510 --- a/mm/migrate.c
68511 +++ b/mm/migrate.c
68512 @@ -1313,6 +1313,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
68513 if (!mm)
68514 return -EINVAL;
68515
68516 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68517 + if (mm != current->mm &&
68518 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
68519 + err = -EPERM;
68520 + goto out;
68521 + }
68522 +#endif
68523 +
68524 /*
68525 * Check if this process has the right to modify the specified
68526 * process. The right exists if the process has administrative
68527 @@ -1322,8 +1330,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
68528 rcu_read_lock();
68529 tcred = __task_cred(task);
68530 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
68531 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
68532 - !capable(CAP_SYS_NICE)) {
68533 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
68534 rcu_read_unlock();
68535 err = -EPERM;
68536 goto out;
68537 diff --git a/mm/mlock.c b/mm/mlock.c
68538 index 4f4f53b..9511904 100644
68539 --- a/mm/mlock.c
68540 +++ b/mm/mlock.c
68541 @@ -13,6 +13,7 @@
68542 #include <linux/pagemap.h>
68543 #include <linux/mempolicy.h>
68544 #include <linux/syscalls.h>
68545 +#include <linux/security.h>
68546 #include <linux/sched.h>
68547 #include <linux/export.h>
68548 #include <linux/rmap.h>
68549 @@ -385,6 +386,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
68550 return -EINVAL;
68551 if (end == start)
68552 return 0;
68553 + if (end > TASK_SIZE)
68554 + return -EINVAL;
68555 +
68556 vma = find_vma_prev(current->mm, start, &prev);
68557 if (!vma || vma->vm_start > start)
68558 return -ENOMEM;
68559 @@ -395,6 +399,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
68560 for (nstart = start ; ; ) {
68561 vm_flags_t newflags;
68562
68563 +#ifdef CONFIG_PAX_SEGMEXEC
68564 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
68565 + break;
68566 +#endif
68567 +
68568 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
68569
68570 newflags = vma->vm_flags | VM_LOCKED;
68571 @@ -500,6 +509,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
68572 lock_limit >>= PAGE_SHIFT;
68573
68574 /* check against resource limits */
68575 + gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
68576 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
68577 error = do_mlock(start, len, 1);
68578 up_write(&current->mm->mmap_sem);
68579 @@ -523,17 +533,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
68580 static int do_mlockall(int flags)
68581 {
68582 struct vm_area_struct * vma, * prev = NULL;
68583 - unsigned int def_flags = 0;
68584
68585 if (flags & MCL_FUTURE)
68586 - def_flags = VM_LOCKED;
68587 - current->mm->def_flags = def_flags;
68588 + current->mm->def_flags |= VM_LOCKED;
68589 + else
68590 + current->mm->def_flags &= ~VM_LOCKED;
68591 if (flags == MCL_FUTURE)
68592 goto out;
68593
68594 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
68595 vm_flags_t newflags;
68596
68597 +#ifdef CONFIG_PAX_SEGMEXEC
68598 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
68599 + break;
68600 +#endif
68601 +
68602 + BUG_ON(vma->vm_end > TASK_SIZE);
68603 newflags = vma->vm_flags | VM_LOCKED;
68604 if (!(flags & MCL_CURRENT))
68605 newflags &= ~VM_LOCKED;
68606 @@ -566,6 +582,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
68607 lock_limit >>= PAGE_SHIFT;
68608
68609 ret = -ENOMEM;
68610 + gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
68611 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
68612 capable(CAP_IPC_LOCK))
68613 ret = do_mlockall(flags);
68614 diff --git a/mm/mmap.c b/mm/mmap.c
68615 index eae90af..51ca80b 100644
68616 --- a/mm/mmap.c
68617 +++ b/mm/mmap.c
68618 @@ -46,6 +46,16 @@
68619 #define arch_rebalance_pgtables(addr, len) (addr)
68620 #endif
68621
68622 +static inline void verify_mm_writelocked(struct mm_struct *mm)
68623 +{
68624 +#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
68625 + if (unlikely(down_read_trylock(&mm->mmap_sem))) {
68626 + up_read(&mm->mmap_sem);
68627 + BUG();
68628 + }
68629 +#endif
68630 +}
68631 +
68632 static void unmap_region(struct mm_struct *mm,
68633 struct vm_area_struct *vma, struct vm_area_struct *prev,
68634 unsigned long start, unsigned long end);
68635 @@ -71,22 +81,32 @@ static void unmap_region(struct mm_struct *mm,
68636 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
68637 *
68638 */
68639 -pgprot_t protection_map[16] = {
68640 +pgprot_t protection_map[16] __read_only = {
68641 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
68642 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
68643 };
68644
68645 -pgprot_t vm_get_page_prot(unsigned long vm_flags)
68646 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
68647 {
68648 - return __pgprot(pgprot_val(protection_map[vm_flags &
68649 + pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
68650 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
68651 pgprot_val(arch_vm_get_page_prot(vm_flags)));
68652 +
68653 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68654 + if (!(__supported_pte_mask & _PAGE_NX) &&
68655 + (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
68656 + (vm_flags & (VM_READ | VM_WRITE)))
68657 + prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
68658 +#endif
68659 +
68660 + return prot;
68661 }
68662 EXPORT_SYMBOL(vm_get_page_prot);
68663
68664 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
68665 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
68666 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
68667 +unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
68668 /*
68669 * Make sure vm_committed_as in one cacheline and not cacheline shared with
68670 * other variables. It can be updated by several CPUs frequently.
68671 @@ -228,6 +248,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
68672 struct vm_area_struct *next = vma->vm_next;
68673
68674 might_sleep();
68675 + BUG_ON(vma->vm_mirror);
68676 if (vma->vm_ops && vma->vm_ops->close)
68677 vma->vm_ops->close(vma);
68678 if (vma->vm_file) {
68679 @@ -272,6 +293,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
68680 * not page aligned -Ram Gupta
68681 */
68682 rlim = rlimit(RLIMIT_DATA);
68683 + gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
68684 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
68685 (mm->end_data - mm->start_data) > rlim)
68686 goto out;
68687 @@ -689,6 +711,12 @@ static int
68688 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
68689 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
68690 {
68691 +
68692 +#ifdef CONFIG_PAX_SEGMEXEC
68693 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
68694 + return 0;
68695 +#endif
68696 +
68697 if (is_mergeable_vma(vma, file, vm_flags) &&
68698 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
68699 if (vma->vm_pgoff == vm_pgoff)
68700 @@ -708,6 +736,12 @@ static int
68701 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
68702 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
68703 {
68704 +
68705 +#ifdef CONFIG_PAX_SEGMEXEC
68706 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
68707 + return 0;
68708 +#endif
68709 +
68710 if (is_mergeable_vma(vma, file, vm_flags) &&
68711 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
68712 pgoff_t vm_pglen;
68713 @@ -750,13 +784,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
68714 struct vm_area_struct *vma_merge(struct mm_struct *mm,
68715 struct vm_area_struct *prev, unsigned long addr,
68716 unsigned long end, unsigned long vm_flags,
68717 - struct anon_vma *anon_vma, struct file *file,
68718 + struct anon_vma *anon_vma, struct file *file,
68719 pgoff_t pgoff, struct mempolicy *policy)
68720 {
68721 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
68722 struct vm_area_struct *area, *next;
68723 int err;
68724
68725 +#ifdef CONFIG_PAX_SEGMEXEC
68726 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
68727 + struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
68728 +
68729 + BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
68730 +#endif
68731 +
68732 /*
68733 * We later require that vma->vm_flags == vm_flags,
68734 * so this tests vma->vm_flags & VM_SPECIAL, too.
68735 @@ -772,6 +813,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
68736 if (next && next->vm_end == end) /* cases 6, 7, 8 */
68737 next = next->vm_next;
68738
68739 +#ifdef CONFIG_PAX_SEGMEXEC
68740 + if (prev)
68741 + prev_m = pax_find_mirror_vma(prev);
68742 + if (area)
68743 + area_m = pax_find_mirror_vma(area);
68744 + if (next)
68745 + next_m = pax_find_mirror_vma(next);
68746 +#endif
68747 +
68748 /*
68749 * Can it merge with the predecessor?
68750 */
68751 @@ -791,9 +841,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
68752 /* cases 1, 6 */
68753 err = vma_adjust(prev, prev->vm_start,
68754 next->vm_end, prev->vm_pgoff, NULL);
68755 - } else /* cases 2, 5, 7 */
68756 +
68757 +#ifdef CONFIG_PAX_SEGMEXEC
68758 + if (!err && prev_m)
68759 + err = vma_adjust(prev_m, prev_m->vm_start,
68760 + next_m->vm_end, prev_m->vm_pgoff, NULL);
68761 +#endif
68762 +
68763 + } else { /* cases 2, 5, 7 */
68764 err = vma_adjust(prev, prev->vm_start,
68765 end, prev->vm_pgoff, NULL);
68766 +
68767 +#ifdef CONFIG_PAX_SEGMEXEC
68768 + if (!err && prev_m)
68769 + err = vma_adjust(prev_m, prev_m->vm_start,
68770 + end_m, prev_m->vm_pgoff, NULL);
68771 +#endif
68772 +
68773 + }
68774 if (err)
68775 return NULL;
68776 khugepaged_enter_vma_merge(prev);
68777 @@ -807,12 +872,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
68778 mpol_equal(policy, vma_policy(next)) &&
68779 can_vma_merge_before(next, vm_flags,
68780 anon_vma, file, pgoff+pglen)) {
68781 - if (prev && addr < prev->vm_end) /* case 4 */
68782 + if (prev && addr < prev->vm_end) { /* case 4 */
68783 err = vma_adjust(prev, prev->vm_start,
68784 addr, prev->vm_pgoff, NULL);
68785 - else /* cases 3, 8 */
68786 +
68787 +#ifdef CONFIG_PAX_SEGMEXEC
68788 + if (!err && prev_m)
68789 + err = vma_adjust(prev_m, prev_m->vm_start,
68790 + addr_m, prev_m->vm_pgoff, NULL);
68791 +#endif
68792 +
68793 + } else { /* cases 3, 8 */
68794 err = vma_adjust(area, addr, next->vm_end,
68795 next->vm_pgoff - pglen, NULL);
68796 +
68797 +#ifdef CONFIG_PAX_SEGMEXEC
68798 + if (!err && area_m)
68799 + err = vma_adjust(area_m, addr_m, next_m->vm_end,
68800 + next_m->vm_pgoff - pglen, NULL);
68801 +#endif
68802 +
68803 + }
68804 if (err)
68805 return NULL;
68806 khugepaged_enter_vma_merge(area);
68807 @@ -921,14 +1001,11 @@ none:
68808 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
68809 struct file *file, long pages)
68810 {
68811 - const unsigned long stack_flags
68812 - = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
68813 -
68814 if (file) {
68815 mm->shared_vm += pages;
68816 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
68817 mm->exec_vm += pages;
68818 - } else if (flags & stack_flags)
68819 + } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
68820 mm->stack_vm += pages;
68821 if (flags & (VM_RESERVED|VM_IO))
68822 mm->reserved_vm += pages;
68823 @@ -955,7 +1032,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
68824 * (the exception is when the underlying filesystem is noexec
68825 * mounted, in which case we dont add PROT_EXEC.)
68826 */
68827 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
68828 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
68829 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
68830 prot |= PROT_EXEC;
68831
68832 @@ -981,7 +1058,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
68833 /* Obtain the address to map to. we verify (or select) it and ensure
68834 * that it represents a valid section of the address space.
68835 */
68836 - addr = get_unmapped_area(file, addr, len, pgoff, flags);
68837 + addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
68838 if (addr & ~PAGE_MASK)
68839 return addr;
68840
68841 @@ -992,6 +1069,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
68842 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
68843 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
68844
68845 +#ifdef CONFIG_PAX_MPROTECT
68846 + if (mm->pax_flags & MF_PAX_MPROTECT) {
68847 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
68848 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
68849 + gr_log_rwxmmap(file);
68850 +
68851 +#ifdef CONFIG_PAX_EMUPLT
68852 + vm_flags &= ~VM_EXEC;
68853 +#else
68854 + return -EPERM;
68855 +#endif
68856 +
68857 + }
68858 +
68859 + if (!(vm_flags & VM_EXEC))
68860 + vm_flags &= ~VM_MAYEXEC;
68861 +#else
68862 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
68863 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
68864 +#endif
68865 + else
68866 + vm_flags &= ~VM_MAYWRITE;
68867 + }
68868 +#endif
68869 +
68870 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68871 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
68872 + vm_flags &= ~VM_PAGEEXEC;
68873 +#endif
68874 +
68875 if (flags & MAP_LOCKED)
68876 if (!can_do_mlock())
68877 return -EPERM;
68878 @@ -1003,6 +1110,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
68879 locked += mm->locked_vm;
68880 lock_limit = rlimit(RLIMIT_MEMLOCK);
68881 lock_limit >>= PAGE_SHIFT;
68882 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
68883 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
68884 return -EAGAIN;
68885 }
68886 @@ -1073,6 +1181,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
68887 if (error)
68888 return error;
68889
68890 + if (!gr_acl_handle_mmap(file, prot))
68891 + return -EACCES;
68892 +
68893 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
68894 }
68895 EXPORT_SYMBOL(do_mmap_pgoff);
68896 @@ -1153,7 +1264,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
68897 vm_flags_t vm_flags = vma->vm_flags;
68898
68899 /* If it was private or non-writable, the write bit is already clear */
68900 - if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
68901 + if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
68902 return 0;
68903
68904 /* The backer wishes to know when pages are first written to? */
68905 @@ -1202,14 +1313,24 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
68906 unsigned long charged = 0;
68907 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
68908
68909 +#ifdef CONFIG_PAX_SEGMEXEC
68910 + struct vm_area_struct *vma_m = NULL;
68911 +#endif
68912 +
68913 + /*
68914 + * mm->mmap_sem is required to protect against another thread
68915 + * changing the mappings in case we sleep.
68916 + */
68917 + verify_mm_writelocked(mm);
68918 +
68919 /* Clear old maps */
68920 error = -ENOMEM;
68921 -munmap_back:
68922 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
68923 if (vma && vma->vm_start < addr + len) {
68924 if (do_munmap(mm, addr, len))
68925 return -ENOMEM;
68926 - goto munmap_back;
68927 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
68928 + BUG_ON(vma && vma->vm_start < addr + len);
68929 }
68930
68931 /* Check against address space limit. */
68932 @@ -1258,6 +1379,16 @@ munmap_back:
68933 goto unacct_error;
68934 }
68935
68936 +#ifdef CONFIG_PAX_SEGMEXEC
68937 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
68938 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
68939 + if (!vma_m) {
68940 + error = -ENOMEM;
68941 + goto free_vma;
68942 + }
68943 + }
68944 +#endif
68945 +
68946 vma->vm_mm = mm;
68947 vma->vm_start = addr;
68948 vma->vm_end = addr + len;
68949 @@ -1281,6 +1412,19 @@ munmap_back:
68950 error = file->f_op->mmap(file, vma);
68951 if (error)
68952 goto unmap_and_free_vma;
68953 +
68954 +#ifdef CONFIG_PAX_SEGMEXEC
68955 + if (vma_m && (vm_flags & VM_EXECUTABLE))
68956 + added_exe_file_vma(mm);
68957 +#endif
68958 +
68959 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68960 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
68961 + vma->vm_flags |= VM_PAGEEXEC;
68962 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
68963 + }
68964 +#endif
68965 +
68966 if (vm_flags & VM_EXECUTABLE)
68967 added_exe_file_vma(mm);
68968
68969 @@ -1316,6 +1460,11 @@ munmap_back:
68970 vma_link(mm, vma, prev, rb_link, rb_parent);
68971 file = vma->vm_file;
68972
68973 +#ifdef CONFIG_PAX_SEGMEXEC
68974 + if (vma_m)
68975 + BUG_ON(pax_mirror_vma(vma_m, vma));
68976 +#endif
68977 +
68978 /* Once vma denies write, undo our temporary denial count */
68979 if (correct_wcount)
68980 atomic_inc(&inode->i_writecount);
68981 @@ -1324,6 +1473,7 @@ out:
68982
68983 mm->total_vm += len >> PAGE_SHIFT;
68984 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
68985 + track_exec_limit(mm, addr, addr + len, vm_flags);
68986 if (vm_flags & VM_LOCKED) {
68987 if (!mlock_vma_pages_range(vma, addr, addr + len))
68988 mm->locked_vm += (len >> PAGE_SHIFT);
68989 @@ -1341,6 +1491,12 @@ unmap_and_free_vma:
68990 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
68991 charged = 0;
68992 free_vma:
68993 +
68994 +#ifdef CONFIG_PAX_SEGMEXEC
68995 + if (vma_m)
68996 + kmem_cache_free(vm_area_cachep, vma_m);
68997 +#endif
68998 +
68999 kmem_cache_free(vm_area_cachep, vma);
69000 unacct_error:
69001 if (charged)
69002 @@ -1348,6 +1504,44 @@ unacct_error:
69003 return error;
69004 }
69005
69006 +bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
69007 +{
69008 + if (!vma) {
69009 +#ifdef CONFIG_STACK_GROWSUP
69010 + if (addr > sysctl_heap_stack_gap)
69011 + vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
69012 + else
69013 + vma = find_vma(current->mm, 0);
69014 + if (vma && (vma->vm_flags & VM_GROWSUP))
69015 + return false;
69016 +#endif
69017 + return true;
69018 + }
69019 +
69020 + if (addr + len > vma->vm_start)
69021 + return false;
69022 +
69023 + if (vma->vm_flags & VM_GROWSDOWN)
69024 + return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
69025 +#ifdef CONFIG_STACK_GROWSUP
69026 + else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
69027 + return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
69028 +#endif
69029 +
69030 + return true;
69031 +}
69032 +
69033 +unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
69034 +{
69035 + if (vma->vm_start < len)
69036 + return -ENOMEM;
69037 + if (!(vma->vm_flags & VM_GROWSDOWN))
69038 + return vma->vm_start - len;
69039 + if (sysctl_heap_stack_gap <= vma->vm_start - len)
69040 + return vma->vm_start - len - sysctl_heap_stack_gap;
69041 + return -ENOMEM;
69042 +}
69043 +
69044 /* Get an address range which is currently unmapped.
69045 * For shmat() with addr=0.
69046 *
69047 @@ -1374,18 +1568,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
69048 if (flags & MAP_FIXED)
69049 return addr;
69050
69051 +#ifdef CONFIG_PAX_RANDMMAP
69052 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
69053 +#endif
69054 +
69055 if (addr) {
69056 addr = PAGE_ALIGN(addr);
69057 - vma = find_vma(mm, addr);
69058 - if (TASK_SIZE - len >= addr &&
69059 - (!vma || addr + len <= vma->vm_start))
69060 - return addr;
69061 + if (TASK_SIZE - len >= addr) {
69062 + vma = find_vma(mm, addr);
69063 + if (check_heap_stack_gap(vma, addr, len))
69064 + return addr;
69065 + }
69066 }
69067 if (len > mm->cached_hole_size) {
69068 - start_addr = addr = mm->free_area_cache;
69069 + start_addr = addr = mm->free_area_cache;
69070 } else {
69071 - start_addr = addr = TASK_UNMAPPED_BASE;
69072 - mm->cached_hole_size = 0;
69073 + start_addr = addr = mm->mmap_base;
69074 + mm->cached_hole_size = 0;
69075 }
69076
69077 full_search:
69078 @@ -1396,34 +1595,40 @@ full_search:
69079 * Start a new search - just in case we missed
69080 * some holes.
69081 */
69082 - if (start_addr != TASK_UNMAPPED_BASE) {
69083 - addr = TASK_UNMAPPED_BASE;
69084 - start_addr = addr;
69085 + if (start_addr != mm->mmap_base) {
69086 + start_addr = addr = mm->mmap_base;
69087 mm->cached_hole_size = 0;
69088 goto full_search;
69089 }
69090 return -ENOMEM;
69091 }
69092 - if (!vma || addr + len <= vma->vm_start) {
69093 - /*
69094 - * Remember the place where we stopped the search:
69095 - */
69096 - mm->free_area_cache = addr + len;
69097 - return addr;
69098 - }
69099 + if (check_heap_stack_gap(vma, addr, len))
69100 + break;
69101 if (addr + mm->cached_hole_size < vma->vm_start)
69102 mm->cached_hole_size = vma->vm_start - addr;
69103 addr = vma->vm_end;
69104 }
69105 +
69106 + /*
69107 + * Remember the place where we stopped the search:
69108 + */
69109 + mm->free_area_cache = addr + len;
69110 + return addr;
69111 }
69112 #endif
69113
69114 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
69115 {
69116 +
69117 +#ifdef CONFIG_PAX_SEGMEXEC
69118 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
69119 + return;
69120 +#endif
69121 +
69122 /*
69123 * Is this a new hole at the lowest possible address?
69124 */
69125 - if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
69126 + if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
69127 mm->free_area_cache = addr;
69128 mm->cached_hole_size = ~0UL;
69129 }
69130 @@ -1441,7 +1646,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
69131 {
69132 struct vm_area_struct *vma;
69133 struct mm_struct *mm = current->mm;
69134 - unsigned long addr = addr0;
69135 + unsigned long base = mm->mmap_base, addr = addr0;
69136
69137 /* requested length too big for entire address space */
69138 if (len > TASK_SIZE)
69139 @@ -1450,13 +1655,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
69140 if (flags & MAP_FIXED)
69141 return addr;
69142
69143 +#ifdef CONFIG_PAX_RANDMMAP
69144 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
69145 +#endif
69146 +
69147 /* requesting a specific address */
69148 if (addr) {
69149 addr = PAGE_ALIGN(addr);
69150 - vma = find_vma(mm, addr);
69151 - if (TASK_SIZE - len >= addr &&
69152 - (!vma || addr + len <= vma->vm_start))
69153 - return addr;
69154 + if (TASK_SIZE - len >= addr) {
69155 + vma = find_vma(mm, addr);
69156 + if (check_heap_stack_gap(vma, addr, len))
69157 + return addr;
69158 + }
69159 }
69160
69161 /* check if free_area_cache is useful for us */
69162 @@ -1471,7 +1681,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
69163 /* make sure it can fit in the remaining address space */
69164 if (addr > len) {
69165 vma = find_vma(mm, addr-len);
69166 - if (!vma || addr <= vma->vm_start)
69167 + if (check_heap_stack_gap(vma, addr - len, len))
69168 /* remember the address as a hint for next time */
69169 return (mm->free_area_cache = addr-len);
69170 }
69171 @@ -1488,7 +1698,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
69172 * return with success:
69173 */
69174 vma = find_vma(mm, addr);
69175 - if (!vma || addr+len <= vma->vm_start)
69176 + if (check_heap_stack_gap(vma, addr, len))
69177 /* remember the address as a hint for next time */
69178 return (mm->free_area_cache = addr);
69179
69180 @@ -1497,8 +1707,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
69181 mm->cached_hole_size = vma->vm_start - addr;
69182
69183 /* try just below the current vma->vm_start */
69184 - addr = vma->vm_start-len;
69185 - } while (len < vma->vm_start);
69186 + addr = skip_heap_stack_gap(vma, len);
69187 + } while (!IS_ERR_VALUE(addr));
69188
69189 bottomup:
69190 /*
69191 @@ -1507,13 +1717,21 @@ bottomup:
69192 * can happen with large stack limits and large mmap()
69193 * allocations.
69194 */
69195 + mm->mmap_base = TASK_UNMAPPED_BASE;
69196 +
69197 +#ifdef CONFIG_PAX_RANDMMAP
69198 + if (mm->pax_flags & MF_PAX_RANDMMAP)
69199 + mm->mmap_base += mm->delta_mmap;
69200 +#endif
69201 +
69202 + mm->free_area_cache = mm->mmap_base;
69203 mm->cached_hole_size = ~0UL;
69204 - mm->free_area_cache = TASK_UNMAPPED_BASE;
69205 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
69206 /*
69207 * Restore the topdown base:
69208 */
69209 - mm->free_area_cache = mm->mmap_base;
69210 + mm->mmap_base = base;
69211 + mm->free_area_cache = base;
69212 mm->cached_hole_size = ~0UL;
69213
69214 return addr;
69215 @@ -1522,6 +1740,12 @@ bottomup:
69216
69217 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
69218 {
69219 +
69220 +#ifdef CONFIG_PAX_SEGMEXEC
69221 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
69222 + return;
69223 +#endif
69224 +
69225 /*
69226 * Is this a new hole at the highest possible address?
69227 */
69228 @@ -1529,8 +1753,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
69229 mm->free_area_cache = addr;
69230
69231 /* dont allow allocations above current base */
69232 - if (mm->free_area_cache > mm->mmap_base)
69233 + if (mm->free_area_cache > mm->mmap_base) {
69234 mm->free_area_cache = mm->mmap_base;
69235 + mm->cached_hole_size = ~0UL;
69236 + }
69237 }
69238
69239 unsigned long
69240 @@ -1638,6 +1864,28 @@ out:
69241 return prev ? prev->vm_next : vma;
69242 }
69243
69244 +#ifdef CONFIG_PAX_SEGMEXEC
69245 +struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
69246 +{
69247 + struct vm_area_struct *vma_m;
69248 +
69249 + BUG_ON(!vma || vma->vm_start >= vma->vm_end);
69250 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
69251 + BUG_ON(vma->vm_mirror);
69252 + return NULL;
69253 + }
69254 + BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
69255 + vma_m = vma->vm_mirror;
69256 + BUG_ON(!vma_m || vma_m->vm_mirror != vma);
69257 + BUG_ON(vma->vm_file != vma_m->vm_file);
69258 + BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
69259 + BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
69260 + BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
69261 + BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
69262 + return vma_m;
69263 +}
69264 +#endif
69265 +
69266 /*
69267 * Verify that the stack growth is acceptable and
69268 * update accounting. This is shared with both the
69269 @@ -1654,6 +1902,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
69270 return -ENOMEM;
69271
69272 /* Stack limit test */
69273 + gr_learn_resource(current, RLIMIT_STACK, size, 1);
69274 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
69275 return -ENOMEM;
69276
69277 @@ -1664,6 +1913,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
69278 locked = mm->locked_vm + grow;
69279 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
69280 limit >>= PAGE_SHIFT;
69281 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
69282 if (locked > limit && !capable(CAP_IPC_LOCK))
69283 return -ENOMEM;
69284 }
69285 @@ -1694,37 +1944,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
69286 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
69287 * vma is the last one with address > vma->vm_end. Have to extend vma.
69288 */
69289 +#ifndef CONFIG_IA64
69290 +static
69291 +#endif
69292 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
69293 {
69294 int error;
69295 + bool locknext;
69296
69297 if (!(vma->vm_flags & VM_GROWSUP))
69298 return -EFAULT;
69299
69300 + /* Also guard against wrapping around to address 0. */
69301 + if (address < PAGE_ALIGN(address+1))
69302 + address = PAGE_ALIGN(address+1);
69303 + else
69304 + return -ENOMEM;
69305 +
69306 /*
69307 * We must make sure the anon_vma is allocated
69308 * so that the anon_vma locking is not a noop.
69309 */
69310 if (unlikely(anon_vma_prepare(vma)))
69311 return -ENOMEM;
69312 + locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
69313 + if (locknext && anon_vma_prepare(vma->vm_next))
69314 + return -ENOMEM;
69315 vma_lock_anon_vma(vma);
69316 + if (locknext)
69317 + vma_lock_anon_vma(vma->vm_next);
69318
69319 /*
69320 * vma->vm_start/vm_end cannot change under us because the caller
69321 * is required to hold the mmap_sem in read mode. We need the
69322 - * anon_vma lock to serialize against concurrent expand_stacks.
69323 - * Also guard against wrapping around to address 0.
69324 + * anon_vma locks to serialize against concurrent expand_stacks
69325 + * and expand_upwards.
69326 */
69327 - if (address < PAGE_ALIGN(address+4))
69328 - address = PAGE_ALIGN(address+4);
69329 - else {
69330 - vma_unlock_anon_vma(vma);
69331 - return -ENOMEM;
69332 - }
69333 error = 0;
69334
69335 /* Somebody else might have raced and expanded it already */
69336 - if (address > vma->vm_end) {
69337 + if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
69338 + error = -ENOMEM;
69339 + else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
69340 unsigned long size, grow;
69341
69342 size = address - vma->vm_start;
69343 @@ -1739,6 +2000,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
69344 }
69345 }
69346 }
69347 + if (locknext)
69348 + vma_unlock_anon_vma(vma->vm_next);
69349 vma_unlock_anon_vma(vma);
69350 khugepaged_enter_vma_merge(vma);
69351 return error;
69352 @@ -1752,6 +2015,8 @@ int expand_downwards(struct vm_area_struct *vma,
69353 unsigned long address)
69354 {
69355 int error;
69356 + bool lockprev = false;
69357 + struct vm_area_struct *prev;
69358
69359 /*
69360 * We must make sure the anon_vma is allocated
69361 @@ -1765,6 +2030,15 @@ int expand_downwards(struct vm_area_struct *vma,
69362 if (error)
69363 return error;
69364
69365 + prev = vma->vm_prev;
69366 +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
69367 + lockprev = prev && (prev->vm_flags & VM_GROWSUP);
69368 +#endif
69369 + if (lockprev && anon_vma_prepare(prev))
69370 + return -ENOMEM;
69371 + if (lockprev)
69372 + vma_lock_anon_vma(prev);
69373 +
69374 vma_lock_anon_vma(vma);
69375
69376 /*
69377 @@ -1774,9 +2048,17 @@ int expand_downwards(struct vm_area_struct *vma,
69378 */
69379
69380 /* Somebody else might have raced and expanded it already */
69381 - if (address < vma->vm_start) {
69382 + if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
69383 + error = -ENOMEM;
69384 + else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
69385 unsigned long size, grow;
69386
69387 +#ifdef CONFIG_PAX_SEGMEXEC
69388 + struct vm_area_struct *vma_m;
69389 +
69390 + vma_m = pax_find_mirror_vma(vma);
69391 +#endif
69392 +
69393 size = vma->vm_end - address;
69394 grow = (vma->vm_start - address) >> PAGE_SHIFT;
69395
69396 @@ -1786,11 +2068,22 @@ int expand_downwards(struct vm_area_struct *vma,
69397 if (!error) {
69398 vma->vm_start = address;
69399 vma->vm_pgoff -= grow;
69400 + track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
69401 +
69402 +#ifdef CONFIG_PAX_SEGMEXEC
69403 + if (vma_m) {
69404 + vma_m->vm_start -= grow << PAGE_SHIFT;
69405 + vma_m->vm_pgoff -= grow;
69406 + }
69407 +#endif
69408 +
69409 perf_event_mmap(vma);
69410 }
69411 }
69412 }
69413 vma_unlock_anon_vma(vma);
69414 + if (lockprev)
69415 + vma_unlock_anon_vma(prev);
69416 khugepaged_enter_vma_merge(vma);
69417 return error;
69418 }
69419 @@ -1860,6 +2153,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
69420 do {
69421 long nrpages = vma_pages(vma);
69422
69423 +#ifdef CONFIG_PAX_SEGMEXEC
69424 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
69425 + vma = remove_vma(vma);
69426 + continue;
69427 + }
69428 +#endif
69429 +
69430 mm->total_vm -= nrpages;
69431 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
69432 vma = remove_vma(vma);
69433 @@ -1905,6 +2205,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
69434 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
69435 vma->vm_prev = NULL;
69436 do {
69437 +
69438 +#ifdef CONFIG_PAX_SEGMEXEC
69439 + if (vma->vm_mirror) {
69440 + BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
69441 + vma->vm_mirror->vm_mirror = NULL;
69442 + vma->vm_mirror->vm_flags &= ~VM_EXEC;
69443 + vma->vm_mirror = NULL;
69444 + }
69445 +#endif
69446 +
69447 rb_erase(&vma->vm_rb, &mm->mm_rb);
69448 mm->map_count--;
69449 tail_vma = vma;
69450 @@ -1933,14 +2243,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
69451 struct vm_area_struct *new;
69452 int err = -ENOMEM;
69453
69454 +#ifdef CONFIG_PAX_SEGMEXEC
69455 + struct vm_area_struct *vma_m, *new_m = NULL;
69456 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
69457 +#endif
69458 +
69459 if (is_vm_hugetlb_page(vma) && (addr &
69460 ~(huge_page_mask(hstate_vma(vma)))))
69461 return -EINVAL;
69462
69463 +#ifdef CONFIG_PAX_SEGMEXEC
69464 + vma_m = pax_find_mirror_vma(vma);
69465 +#endif
69466 +
69467 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
69468 if (!new)
69469 goto out_err;
69470
69471 +#ifdef CONFIG_PAX_SEGMEXEC
69472 + if (vma_m) {
69473 + new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
69474 + if (!new_m) {
69475 + kmem_cache_free(vm_area_cachep, new);
69476 + goto out_err;
69477 + }
69478 + }
69479 +#endif
69480 +
69481 /* most fields are the same, copy all, and then fixup */
69482 *new = *vma;
69483
69484 @@ -1953,6 +2282,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
69485 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
69486 }
69487
69488 +#ifdef CONFIG_PAX_SEGMEXEC
69489 + if (vma_m) {
69490 + *new_m = *vma_m;
69491 + INIT_LIST_HEAD(&new_m->anon_vma_chain);
69492 + new_m->vm_mirror = new;
69493 + new->vm_mirror = new_m;
69494 +
69495 + if (new_below)
69496 + new_m->vm_end = addr_m;
69497 + else {
69498 + new_m->vm_start = addr_m;
69499 + new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
69500 + }
69501 + }
69502 +#endif
69503 +
69504 pol = mpol_dup(vma_policy(vma));
69505 if (IS_ERR(pol)) {
69506 err = PTR_ERR(pol);
69507 @@ -1978,6 +2323,42 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
69508 else
69509 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
69510
69511 +#ifdef CONFIG_PAX_SEGMEXEC
69512 + if (!err && vma_m) {
69513 + if (anon_vma_clone(new_m, vma_m))
69514 + goto out_free_mpol;
69515 +
69516 + mpol_get(pol);
69517 + vma_set_policy(new_m, pol);
69518 +
69519 + if (new_m->vm_file) {
69520 + get_file(new_m->vm_file);
69521 + if (vma_m->vm_flags & VM_EXECUTABLE)
69522 + added_exe_file_vma(mm);
69523 + }
69524 +
69525 + if (new_m->vm_ops && new_m->vm_ops->open)
69526 + new_m->vm_ops->open(new_m);
69527 +
69528 + if (new_below)
69529 + err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
69530 + ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
69531 + else
69532 + err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
69533 +
69534 + if (err) {
69535 + if (new_m->vm_ops && new_m->vm_ops->close)
69536 + new_m->vm_ops->close(new_m);
69537 + if (new_m->vm_file) {
69538 + if (vma_m->vm_flags & VM_EXECUTABLE)
69539 + removed_exe_file_vma(mm);
69540 + fput(new_m->vm_file);
69541 + }
69542 + mpol_put(pol);
69543 + }
69544 + }
69545 +#endif
69546 +
69547 /* Success. */
69548 if (!err)
69549 return 0;
69550 @@ -1990,10 +2371,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
69551 removed_exe_file_vma(mm);
69552 fput(new->vm_file);
69553 }
69554 - unlink_anon_vmas(new);
69555 out_free_mpol:
69556 mpol_put(pol);
69557 out_free_vma:
69558 +
69559 +#ifdef CONFIG_PAX_SEGMEXEC
69560 + if (new_m) {
69561 + unlink_anon_vmas(new_m);
69562 + kmem_cache_free(vm_area_cachep, new_m);
69563 + }
69564 +#endif
69565 +
69566 + unlink_anon_vmas(new);
69567 kmem_cache_free(vm_area_cachep, new);
69568 out_err:
69569 return err;
69570 @@ -2006,6 +2395,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
69571 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
69572 unsigned long addr, int new_below)
69573 {
69574 +
69575 +#ifdef CONFIG_PAX_SEGMEXEC
69576 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
69577 + BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
69578 + if (mm->map_count >= sysctl_max_map_count-1)
69579 + return -ENOMEM;
69580 + } else
69581 +#endif
69582 +
69583 if (mm->map_count >= sysctl_max_map_count)
69584 return -ENOMEM;
69585
69586 @@ -2017,11 +2415,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
69587 * work. This now handles partial unmappings.
69588 * Jeremy Fitzhardinge <jeremy@goop.org>
69589 */
69590 +#ifdef CONFIG_PAX_SEGMEXEC
69591 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69592 {
69593 + int ret = __do_munmap(mm, start, len);
69594 + if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
69595 + return ret;
69596 +
69597 + return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
69598 +}
69599 +
69600 +int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69601 +#else
69602 +int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69603 +#endif
69604 +{
69605 unsigned long end;
69606 struct vm_area_struct *vma, *prev, *last;
69607
69608 + /*
69609 + * mm->mmap_sem is required to protect against another thread
69610 + * changing the mappings in case we sleep.
69611 + */
69612 + verify_mm_writelocked(mm);
69613 +
69614 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
69615 return -EINVAL;
69616
69617 @@ -2096,6 +2513,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69618 /* Fix up all other VM information */
69619 remove_vma_list(mm, vma);
69620
69621 + track_exec_limit(mm, start, end, 0UL);
69622 +
69623 return 0;
69624 }
69625
69626 @@ -2108,22 +2527,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
69627
69628 profile_munmap(addr);
69629
69630 +#ifdef CONFIG_PAX_SEGMEXEC
69631 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
69632 + (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
69633 + return -EINVAL;
69634 +#endif
69635 +
69636 down_write(&mm->mmap_sem);
69637 ret = do_munmap(mm, addr, len);
69638 up_write(&mm->mmap_sem);
69639 return ret;
69640 }
69641
69642 -static inline void verify_mm_writelocked(struct mm_struct *mm)
69643 -{
69644 -#ifdef CONFIG_DEBUG_VM
69645 - if (unlikely(down_read_trylock(&mm->mmap_sem))) {
69646 - WARN_ON(1);
69647 - up_read(&mm->mmap_sem);
69648 - }
69649 -#endif
69650 -}
69651 -
69652 /*
69653 * this is really a simplified "do_mmap". it only handles
69654 * anonymous maps. eventually we may be able to do some
69655 @@ -2137,6 +2552,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
69656 struct rb_node ** rb_link, * rb_parent;
69657 pgoff_t pgoff = addr >> PAGE_SHIFT;
69658 int error;
69659 + unsigned long charged;
69660
69661 len = PAGE_ALIGN(len);
69662 if (!len)
69663 @@ -2148,16 +2564,30 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
69664
69665 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
69666
69667 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
69668 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
69669 + flags &= ~VM_EXEC;
69670 +
69671 +#ifdef CONFIG_PAX_MPROTECT
69672 + if (mm->pax_flags & MF_PAX_MPROTECT)
69673 + flags &= ~VM_MAYEXEC;
69674 +#endif
69675 +
69676 + }
69677 +#endif
69678 +
69679 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
69680 if (error & ~PAGE_MASK)
69681 return error;
69682
69683 + charged = len >> PAGE_SHIFT;
69684 +
69685 /*
69686 * mlock MCL_FUTURE?
69687 */
69688 if (mm->def_flags & VM_LOCKED) {
69689 unsigned long locked, lock_limit;
69690 - locked = len >> PAGE_SHIFT;
69691 + locked = charged;
69692 locked += mm->locked_vm;
69693 lock_limit = rlimit(RLIMIT_MEMLOCK);
69694 lock_limit >>= PAGE_SHIFT;
69695 @@ -2174,22 +2604,22 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
69696 /*
69697 * Clear old maps. this also does some error checking for us
69698 */
69699 - munmap_back:
69700 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
69701 if (vma && vma->vm_start < addr + len) {
69702 if (do_munmap(mm, addr, len))
69703 return -ENOMEM;
69704 - goto munmap_back;
69705 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
69706 + BUG_ON(vma && vma->vm_start < addr + len);
69707 }
69708
69709 /* Check against address space limits *after* clearing old maps... */
69710 - if (!may_expand_vm(mm, len >> PAGE_SHIFT))
69711 + if (!may_expand_vm(mm, charged))
69712 return -ENOMEM;
69713
69714 if (mm->map_count > sysctl_max_map_count)
69715 return -ENOMEM;
69716
69717 - if (security_vm_enough_memory(len >> PAGE_SHIFT))
69718 + if (security_vm_enough_memory(charged))
69719 return -ENOMEM;
69720
69721 /* Can we just expand an old private anonymous mapping? */
69722 @@ -2203,7 +2633,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
69723 */
69724 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69725 if (!vma) {
69726 - vm_unacct_memory(len >> PAGE_SHIFT);
69727 + vm_unacct_memory(charged);
69728 return -ENOMEM;
69729 }
69730
69731 @@ -2217,11 +2647,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
69732 vma_link(mm, vma, prev, rb_link, rb_parent);
69733 out:
69734 perf_event_mmap(vma);
69735 - mm->total_vm += len >> PAGE_SHIFT;
69736 + mm->total_vm += charged;
69737 if (flags & VM_LOCKED) {
69738 if (!mlock_vma_pages_range(vma, addr, addr + len))
69739 - mm->locked_vm += (len >> PAGE_SHIFT);
69740 + mm->locked_vm += charged;
69741 }
69742 + track_exec_limit(mm, addr, addr + len, flags);
69743 return addr;
69744 }
69745
69746 @@ -2268,8 +2699,10 @@ void exit_mmap(struct mm_struct *mm)
69747 * Walk the list again, actually closing and freeing it,
69748 * with preemption enabled, without holding any MM locks.
69749 */
69750 - while (vma)
69751 + while (vma) {
69752 + vma->vm_mirror = NULL;
69753 vma = remove_vma(vma);
69754 + }
69755
69756 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
69757 }
69758 @@ -2283,6 +2716,13 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
69759 struct vm_area_struct * __vma, * prev;
69760 struct rb_node ** rb_link, * rb_parent;
69761
69762 +#ifdef CONFIG_PAX_SEGMEXEC
69763 + struct vm_area_struct *vma_m = NULL;
69764 +#endif
69765 +
69766 + if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1))
69767 + return -EPERM;
69768 +
69769 /*
69770 * The vm_pgoff of a purely anonymous vma should be irrelevant
69771 * until its first write fault, when page's anon_vma and index
69772 @@ -2305,7 +2745,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
69773 if ((vma->vm_flags & VM_ACCOUNT) &&
69774 security_vm_enough_memory_mm(mm, vma_pages(vma)))
69775 return -ENOMEM;
69776 +
69777 +#ifdef CONFIG_PAX_SEGMEXEC
69778 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
69779 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69780 + if (!vma_m)
69781 + return -ENOMEM;
69782 + }
69783 +#endif
69784 +
69785 vma_link(mm, vma, prev, rb_link, rb_parent);
69786 +
69787 +#ifdef CONFIG_PAX_SEGMEXEC
69788 + if (vma_m)
69789 + BUG_ON(pax_mirror_vma(vma_m, vma));
69790 +#endif
69791 +
69792 return 0;
69793 }
69794
69795 @@ -2323,6 +2778,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
69796 struct rb_node **rb_link, *rb_parent;
69797 struct mempolicy *pol;
69798
69799 + BUG_ON(vma->vm_mirror);
69800 +
69801 /*
69802 * If anonymous vma has not yet been faulted, update new pgoff
69803 * to match new location, to increase its chance of merging.
69804 @@ -2373,6 +2830,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
69805 return NULL;
69806 }
69807
69808 +#ifdef CONFIG_PAX_SEGMEXEC
69809 +long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
69810 +{
69811 + struct vm_area_struct *prev_m;
69812 + struct rb_node **rb_link_m, *rb_parent_m;
69813 + struct mempolicy *pol_m;
69814 +
69815 + BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
69816 + BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
69817 + BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
69818 + *vma_m = *vma;
69819 + INIT_LIST_HEAD(&vma_m->anon_vma_chain);
69820 + if (anon_vma_clone(vma_m, vma))
69821 + return -ENOMEM;
69822 + pol_m = vma_policy(vma_m);
69823 + mpol_get(pol_m);
69824 + vma_set_policy(vma_m, pol_m);
69825 + vma_m->vm_start += SEGMEXEC_TASK_SIZE;
69826 + vma_m->vm_end += SEGMEXEC_TASK_SIZE;
69827 + vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
69828 + vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
69829 + if (vma_m->vm_file)
69830 + get_file(vma_m->vm_file);
69831 + if (vma_m->vm_ops && vma_m->vm_ops->open)
69832 + vma_m->vm_ops->open(vma_m);
69833 + find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
69834 + vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
69835 + vma_m->vm_mirror = vma;
69836 + vma->vm_mirror = vma_m;
69837 + return 0;
69838 +}
69839 +#endif
69840 +
69841 /*
69842 * Return true if the calling process may expand its vm space by the passed
69843 * number of pages
69844 @@ -2383,7 +2873,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
69845 unsigned long lim;
69846
69847 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
69848 -
69849 + gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
69850 if (cur + npages > lim)
69851 return 0;
69852 return 1;
69853 @@ -2454,6 +2944,22 @@ int install_special_mapping(struct mm_struct *mm,
69854 vma->vm_start = addr;
69855 vma->vm_end = addr + len;
69856
69857 +#ifdef CONFIG_PAX_MPROTECT
69858 + if (mm->pax_flags & MF_PAX_MPROTECT) {
69859 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
69860 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
69861 + return -EPERM;
69862 + if (!(vm_flags & VM_EXEC))
69863 + vm_flags &= ~VM_MAYEXEC;
69864 +#else
69865 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
69866 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
69867 +#endif
69868 + else
69869 + vm_flags &= ~VM_MAYWRITE;
69870 + }
69871 +#endif
69872 +
69873 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
69874 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
69875
69876 diff --git a/mm/mprotect.c b/mm/mprotect.c
69877 index 5a688a2..27e031c 100644
69878 --- a/mm/mprotect.c
69879 +++ b/mm/mprotect.c
69880 @@ -23,10 +23,16 @@
69881 #include <linux/mmu_notifier.h>
69882 #include <linux/migrate.h>
69883 #include <linux/perf_event.h>
69884 +
69885 +#ifdef CONFIG_PAX_MPROTECT
69886 +#include <linux/elf.h>
69887 +#endif
69888 +
69889 #include <asm/uaccess.h>
69890 #include <asm/pgtable.h>
69891 #include <asm/cacheflush.h>
69892 #include <asm/tlbflush.h>
69893 +#include <asm/mmu_context.h>
69894
69895 #ifndef pgprot_modify
69896 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
69897 @@ -141,6 +147,48 @@ static void change_protection(struct vm_area_struct *vma,
69898 flush_tlb_range(vma, start, end);
69899 }
69900
69901 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
69902 +/* called while holding the mmap semaphor for writing except stack expansion */
69903 +void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
69904 +{
69905 + unsigned long oldlimit, newlimit = 0UL;
69906 +
69907 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
69908 + return;
69909 +
69910 + spin_lock(&mm->page_table_lock);
69911 + oldlimit = mm->context.user_cs_limit;
69912 + if ((prot & VM_EXEC) && oldlimit < end)
69913 + /* USER_CS limit moved up */
69914 + newlimit = end;
69915 + else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
69916 + /* USER_CS limit moved down */
69917 + newlimit = start;
69918 +
69919 + if (newlimit) {
69920 + mm->context.user_cs_limit = newlimit;
69921 +
69922 +#ifdef CONFIG_SMP
69923 + wmb();
69924 + cpus_clear(mm->context.cpu_user_cs_mask);
69925 + cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
69926 +#endif
69927 +
69928 + set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
69929 + }
69930 + spin_unlock(&mm->page_table_lock);
69931 + if (newlimit == end) {
69932 + struct vm_area_struct *vma = find_vma(mm, oldlimit);
69933 +
69934 + for (; vma && vma->vm_start < end; vma = vma->vm_next)
69935 + if (is_vm_hugetlb_page(vma))
69936 + hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
69937 + else
69938 + change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
69939 + }
69940 +}
69941 +#endif
69942 +
69943 int
69944 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
69945 unsigned long start, unsigned long end, unsigned long newflags)
69946 @@ -153,11 +201,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
69947 int error;
69948 int dirty_accountable = 0;
69949
69950 +#ifdef CONFIG_PAX_SEGMEXEC
69951 + struct vm_area_struct *vma_m = NULL;
69952 + unsigned long start_m, end_m;
69953 +
69954 + start_m = start + SEGMEXEC_TASK_SIZE;
69955 + end_m = end + SEGMEXEC_TASK_SIZE;
69956 +#endif
69957 +
69958 if (newflags == oldflags) {
69959 *pprev = vma;
69960 return 0;
69961 }
69962
69963 + if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
69964 + struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
69965 +
69966 + if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
69967 + return -ENOMEM;
69968 +
69969 + if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
69970 + return -ENOMEM;
69971 + }
69972 +
69973 /*
69974 * If we make a private mapping writable we increase our commit;
69975 * but (without finer accounting) cannot reduce our commit if we
69976 @@ -174,6 +240,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
69977 }
69978 }
69979
69980 +#ifdef CONFIG_PAX_SEGMEXEC
69981 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
69982 + if (start != vma->vm_start) {
69983 + error = split_vma(mm, vma, start, 1);
69984 + if (error)
69985 + goto fail;
69986 + BUG_ON(!*pprev || (*pprev)->vm_next == vma);
69987 + *pprev = (*pprev)->vm_next;
69988 + }
69989 +
69990 + if (end != vma->vm_end) {
69991 + error = split_vma(mm, vma, end, 0);
69992 + if (error)
69993 + goto fail;
69994 + }
69995 +
69996 + if (pax_find_mirror_vma(vma)) {
69997 + error = __do_munmap(mm, start_m, end_m - start_m);
69998 + if (error)
69999 + goto fail;
70000 + } else {
70001 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
70002 + if (!vma_m) {
70003 + error = -ENOMEM;
70004 + goto fail;
70005 + }
70006 + vma->vm_flags = newflags;
70007 + error = pax_mirror_vma(vma_m, vma);
70008 + if (error) {
70009 + vma->vm_flags = oldflags;
70010 + goto fail;
70011 + }
70012 + }
70013 + }
70014 +#endif
70015 +
70016 /*
70017 * First try to merge with previous and/or next vma.
70018 */
70019 @@ -204,9 +306,21 @@ success:
70020 * vm_flags and vm_page_prot are protected by the mmap_sem
70021 * held in write mode.
70022 */
70023 +
70024 +#ifdef CONFIG_PAX_SEGMEXEC
70025 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
70026 + pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
70027 +#endif
70028 +
70029 vma->vm_flags = newflags;
70030 +
70031 +#ifdef CONFIG_PAX_MPROTECT
70032 + if (mm->binfmt && mm->binfmt->handle_mprotect)
70033 + mm->binfmt->handle_mprotect(vma, newflags);
70034 +#endif
70035 +
70036 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
70037 - vm_get_page_prot(newflags));
70038 + vm_get_page_prot(vma->vm_flags));
70039
70040 if (vma_wants_writenotify(vma)) {
70041 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
70042 @@ -248,6 +362,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
70043 end = start + len;
70044 if (end <= start)
70045 return -ENOMEM;
70046 +
70047 +#ifdef CONFIG_PAX_SEGMEXEC
70048 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
70049 + if (end > SEGMEXEC_TASK_SIZE)
70050 + return -EINVAL;
70051 + } else
70052 +#endif
70053 +
70054 + if (end > TASK_SIZE)
70055 + return -EINVAL;
70056 +
70057 if (!arch_validate_prot(prot))
70058 return -EINVAL;
70059
70060 @@ -255,7 +380,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
70061 /*
70062 * Does the application expect PROT_READ to imply PROT_EXEC:
70063 */
70064 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
70065 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
70066 prot |= PROT_EXEC;
70067
70068 vm_flags = calc_vm_prot_bits(prot);
70069 @@ -287,6 +412,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
70070 if (start > vma->vm_start)
70071 prev = vma;
70072
70073 +#ifdef CONFIG_PAX_MPROTECT
70074 + if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
70075 + current->mm->binfmt->handle_mprotect(vma, vm_flags);
70076 +#endif
70077 +
70078 for (nstart = start ; ; ) {
70079 unsigned long newflags;
70080
70081 @@ -296,6 +426,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
70082
70083 /* newflags >> 4 shift VM_MAY% in place of VM_% */
70084 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
70085 + if (prot & (PROT_WRITE | PROT_EXEC))
70086 + gr_log_rwxmprotect(vma->vm_file);
70087 +
70088 + error = -EACCES;
70089 + goto out;
70090 + }
70091 +
70092 + if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
70093 error = -EACCES;
70094 goto out;
70095 }
70096 @@ -310,6 +448,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
70097 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
70098 if (error)
70099 goto out;
70100 +
70101 + track_exec_limit(current->mm, nstart, tmp, vm_flags);
70102 +
70103 nstart = tmp;
70104
70105 if (nstart < prev->vm_end)
70106 diff --git a/mm/mremap.c b/mm/mremap.c
70107 index d6959cb..18a402a 100644
70108 --- a/mm/mremap.c
70109 +++ b/mm/mremap.c
70110 @@ -106,6 +106,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
70111 continue;
70112 pte = ptep_get_and_clear(mm, old_addr, old_pte);
70113 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
70114 +
70115 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
70116 + if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
70117 + pte = pte_exprotect(pte);
70118 +#endif
70119 +
70120 set_pte_at(mm, new_addr, new_pte, pte);
70121 }
70122
70123 @@ -290,6 +296,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
70124 if (is_vm_hugetlb_page(vma))
70125 goto Einval;
70126
70127 +#ifdef CONFIG_PAX_SEGMEXEC
70128 + if (pax_find_mirror_vma(vma))
70129 + goto Einval;
70130 +#endif
70131 +
70132 /* We can't remap across vm area boundaries */
70133 if (old_len > vma->vm_end - addr)
70134 goto Efault;
70135 @@ -346,20 +357,25 @@ static unsigned long mremap_to(unsigned long addr,
70136 unsigned long ret = -EINVAL;
70137 unsigned long charged = 0;
70138 unsigned long map_flags;
70139 + unsigned long pax_task_size = TASK_SIZE;
70140
70141 if (new_addr & ~PAGE_MASK)
70142 goto out;
70143
70144 - if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
70145 +#ifdef CONFIG_PAX_SEGMEXEC
70146 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
70147 + pax_task_size = SEGMEXEC_TASK_SIZE;
70148 +#endif
70149 +
70150 + pax_task_size -= PAGE_SIZE;
70151 +
70152 + if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
70153 goto out;
70154
70155 /* Check if the location we're moving into overlaps the
70156 * old location at all, and fail if it does.
70157 */
70158 - if ((new_addr <= addr) && (new_addr+new_len) > addr)
70159 - goto out;
70160 -
70161 - if ((addr <= new_addr) && (addr+old_len) > new_addr)
70162 + if (addr + old_len > new_addr && new_addr + new_len > addr)
70163 goto out;
70164
70165 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
70166 @@ -431,6 +447,7 @@ unsigned long do_mremap(unsigned long addr,
70167 struct vm_area_struct *vma;
70168 unsigned long ret = -EINVAL;
70169 unsigned long charged = 0;
70170 + unsigned long pax_task_size = TASK_SIZE;
70171
70172 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
70173 goto out;
70174 @@ -449,6 +466,17 @@ unsigned long do_mremap(unsigned long addr,
70175 if (!new_len)
70176 goto out;
70177
70178 +#ifdef CONFIG_PAX_SEGMEXEC
70179 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
70180 + pax_task_size = SEGMEXEC_TASK_SIZE;
70181 +#endif
70182 +
70183 + pax_task_size -= PAGE_SIZE;
70184 +
70185 + if (new_len > pax_task_size || addr > pax_task_size-new_len ||
70186 + old_len > pax_task_size || addr > pax_task_size-old_len)
70187 + goto out;
70188 +
70189 if (flags & MREMAP_FIXED) {
70190 if (flags & MREMAP_MAYMOVE)
70191 ret = mremap_to(addr, old_len, new_addr, new_len);
70192 @@ -498,6 +526,7 @@ unsigned long do_mremap(unsigned long addr,
70193 addr + new_len);
70194 }
70195 ret = addr;
70196 + track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
70197 goto out;
70198 }
70199 }
70200 @@ -524,7 +553,13 @@ unsigned long do_mremap(unsigned long addr,
70201 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
70202 if (ret)
70203 goto out;
70204 +
70205 + map_flags = vma->vm_flags;
70206 ret = move_vma(vma, addr, old_len, new_len, new_addr);
70207 + if (!(ret & ~PAGE_MASK)) {
70208 + track_exec_limit(current->mm, addr, addr + old_len, 0UL);
70209 + track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
70210 + }
70211 }
70212 out:
70213 if (ret & ~PAGE_MASK)
70214 diff --git a/mm/nobootmem.c b/mm/nobootmem.c
70215 index 7fa41b4..6087460 100644
70216 --- a/mm/nobootmem.c
70217 +++ b/mm/nobootmem.c
70218 @@ -110,19 +110,30 @@ static void __init __free_pages_memory(unsigned long start, unsigned long end)
70219 unsigned long __init free_all_memory_core_early(int nodeid)
70220 {
70221 int i;
70222 - u64 start, end;
70223 + u64 start, end, startrange, endrange;
70224 unsigned long count = 0;
70225 - struct range *range = NULL;
70226 + struct range *range = NULL, rangerange = { 0, 0 };
70227 int nr_range;
70228
70229 nr_range = get_free_all_memory_range(&range, nodeid);
70230 + startrange = __pa(range) >> PAGE_SHIFT;
70231 + endrange = (__pa(range + nr_range) - 1) >> PAGE_SHIFT;
70232
70233 for (i = 0; i < nr_range; i++) {
70234 start = range[i].start;
70235 end = range[i].end;
70236 + if (start <= endrange && startrange < end) {
70237 + BUG_ON(rangerange.start | rangerange.end);
70238 + rangerange = range[i];
70239 + continue;
70240 + }
70241 count += end - start;
70242 __free_pages_memory(start, end);
70243 }
70244 + start = rangerange.start;
70245 + end = rangerange.end;
70246 + count += end - start;
70247 + __free_pages_memory(start, end);
70248
70249 return count;
70250 }
70251 diff --git a/mm/nommu.c b/mm/nommu.c
70252 index b982290..7d73f53 100644
70253 --- a/mm/nommu.c
70254 +++ b/mm/nommu.c
70255 @@ -62,7 +62,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
70256 int sysctl_overcommit_ratio = 50; /* default is 50% */
70257 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
70258 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
70259 -int heap_stack_gap = 0;
70260
70261 atomic_long_t mmap_pages_allocated;
70262
70263 @@ -825,15 +824,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
70264 EXPORT_SYMBOL(find_vma);
70265
70266 /*
70267 - * find a VMA
70268 - * - we don't extend stack VMAs under NOMMU conditions
70269 - */
70270 -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
70271 -{
70272 - return find_vma(mm, addr);
70273 -}
70274 -
70275 -/*
70276 * expand a stack to a given address
70277 * - not supported under NOMMU conditions
70278 */
70279 @@ -1553,6 +1543,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
70280
70281 /* most fields are the same, copy all, and then fixup */
70282 *new = *vma;
70283 + INIT_LIST_HEAD(&new->anon_vma_chain);
70284 *region = *vma->vm_region;
70285 new->vm_region = region;
70286
70287 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
70288 index 485be89..c059ad3 100644
70289 --- a/mm/page_alloc.c
70290 +++ b/mm/page_alloc.c
70291 @@ -341,7 +341,7 @@ out:
70292 * This usage means that zero-order pages may not be compound.
70293 */
70294
70295 -static void free_compound_page(struct page *page)
70296 +void free_compound_page(struct page *page)
70297 {
70298 __free_pages_ok(page, compound_order(page));
70299 }
70300 @@ -654,6 +654,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
70301 int i;
70302 int bad = 0;
70303
70304 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
70305 + unsigned long index = 1UL << order;
70306 +#endif
70307 +
70308 trace_mm_page_free_direct(page, order);
70309 kmemcheck_free_shadow(page, order);
70310
70311 @@ -669,6 +673,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
70312 debug_check_no_obj_freed(page_address(page),
70313 PAGE_SIZE << order);
70314 }
70315 +
70316 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
70317 + for (; index; --index)
70318 + sanitize_highpage(page + index - 1);
70319 +#endif
70320 +
70321 arch_free_page(page, order);
70322 kernel_map_pages(page, 1 << order, 0);
70323
70324 @@ -784,8 +794,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
70325 arch_alloc_page(page, order);
70326 kernel_map_pages(page, 1 << order, 1);
70327
70328 +#ifndef CONFIG_PAX_MEMORY_SANITIZE
70329 if (gfp_flags & __GFP_ZERO)
70330 prep_zero_page(page, order, gfp_flags);
70331 +#endif
70332
70333 if (order && (gfp_flags & __GFP_COMP))
70334 prep_compound_page(page, order);
70335 @@ -3357,7 +3369,13 @@ static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
70336 unsigned long pfn;
70337
70338 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
70339 +#ifdef CONFIG_X86_32
70340 + /* boot failures in VMware 8 on 32bit vanilla since
70341 + this change */
70342 + if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
70343 +#else
70344 if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
70345 +#endif
70346 return 1;
70347 }
70348 return 0;
70349 diff --git a/mm/percpu.c b/mm/percpu.c
70350 index 716eb4a..8d10419 100644
70351 --- a/mm/percpu.c
70352 +++ b/mm/percpu.c
70353 @@ -121,7 +121,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
70354 static unsigned int pcpu_high_unit_cpu __read_mostly;
70355
70356 /* the address of the first chunk which starts with the kernel static area */
70357 -void *pcpu_base_addr __read_mostly;
70358 +void *pcpu_base_addr __read_only;
70359 EXPORT_SYMBOL_GPL(pcpu_base_addr);
70360
70361 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
70362 diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
70363 index e920aa3..78fe584 100644
70364 --- a/mm/process_vm_access.c
70365 +++ b/mm/process_vm_access.c
70366 @@ -13,6 +13,7 @@
70367 #include <linux/uio.h>
70368 #include <linux/sched.h>
70369 #include <linux/highmem.h>
70370 +#include <linux/security.h>
70371 #include <linux/ptrace.h>
70372 #include <linux/slab.h>
70373 #include <linux/syscalls.h>
70374 @@ -258,19 +259,19 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
70375 size_t iov_l_curr_offset = 0;
70376 ssize_t iov_len;
70377
70378 + return -ENOSYS; // PaX: until properly audited
70379 +
70380 /*
70381 * Work out how many pages of struct pages we're going to need
70382 * when eventually calling get_user_pages
70383 */
70384 for (i = 0; i < riovcnt; i++) {
70385 iov_len = rvec[i].iov_len;
70386 - if (iov_len > 0) {
70387 - nr_pages_iov = ((unsigned long)rvec[i].iov_base
70388 - + iov_len)
70389 - / PAGE_SIZE - (unsigned long)rvec[i].iov_base
70390 - / PAGE_SIZE + 1;
70391 - nr_pages = max(nr_pages, nr_pages_iov);
70392 - }
70393 + if (iov_len <= 0)
70394 + continue;
70395 + nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
70396 + (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
70397 + nr_pages = max(nr_pages, nr_pages_iov);
70398 }
70399
70400 if (nr_pages == 0)
70401 @@ -298,8 +299,13 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
70402 goto free_proc_pages;
70403 }
70404
70405 + if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
70406 + rc = -EPERM;
70407 + goto put_task_struct;
70408 + }
70409 +
70410 task_lock(task);
70411 - if (__ptrace_may_access(task, PTRACE_MODE_ATTACH)) {
70412 + if (ptrace_may_access_nolock(task, PTRACE_MODE_ATTACH)) {
70413 task_unlock(task);
70414 rc = -EPERM;
70415 goto put_task_struct;
70416 diff --git a/mm/rmap.c b/mm/rmap.c
70417 index a4fd368..e0ffec7 100644
70418 --- a/mm/rmap.c
70419 +++ b/mm/rmap.c
70420 @@ -152,6 +152,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
70421 struct anon_vma *anon_vma = vma->anon_vma;
70422 struct anon_vma_chain *avc;
70423
70424 +#ifdef CONFIG_PAX_SEGMEXEC
70425 + struct anon_vma_chain *avc_m = NULL;
70426 +#endif
70427 +
70428 might_sleep();
70429 if (unlikely(!anon_vma)) {
70430 struct mm_struct *mm = vma->vm_mm;
70431 @@ -161,6 +165,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
70432 if (!avc)
70433 goto out_enomem;
70434
70435 +#ifdef CONFIG_PAX_SEGMEXEC
70436 + avc_m = anon_vma_chain_alloc(GFP_KERNEL);
70437 + if (!avc_m)
70438 + goto out_enomem_free_avc;
70439 +#endif
70440 +
70441 anon_vma = find_mergeable_anon_vma(vma);
70442 allocated = NULL;
70443 if (!anon_vma) {
70444 @@ -174,6 +184,21 @@ int anon_vma_prepare(struct vm_area_struct *vma)
70445 /* page_table_lock to protect against threads */
70446 spin_lock(&mm->page_table_lock);
70447 if (likely(!vma->anon_vma)) {
70448 +
70449 +#ifdef CONFIG_PAX_SEGMEXEC
70450 + struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
70451 +
70452 + if (vma_m) {
70453 + BUG_ON(vma_m->anon_vma);
70454 + vma_m->anon_vma = anon_vma;
70455 + avc_m->anon_vma = anon_vma;
70456 + avc_m->vma = vma;
70457 + list_add(&avc_m->same_vma, &vma_m->anon_vma_chain);
70458 + list_add(&avc_m->same_anon_vma, &anon_vma->head);
70459 + avc_m = NULL;
70460 + }
70461 +#endif
70462 +
70463 vma->anon_vma = anon_vma;
70464 avc->anon_vma = anon_vma;
70465 avc->vma = vma;
70466 @@ -187,12 +212,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
70467
70468 if (unlikely(allocated))
70469 put_anon_vma(allocated);
70470 +
70471 +#ifdef CONFIG_PAX_SEGMEXEC
70472 + if (unlikely(avc_m))
70473 + anon_vma_chain_free(avc_m);
70474 +#endif
70475 +
70476 if (unlikely(avc))
70477 anon_vma_chain_free(avc);
70478 }
70479 return 0;
70480
70481 out_enomem_free_avc:
70482 +
70483 +#ifdef CONFIG_PAX_SEGMEXEC
70484 + if (avc_m)
70485 + anon_vma_chain_free(avc_m);
70486 +#endif
70487 +
70488 anon_vma_chain_free(avc);
70489 out_enomem:
70490 return -ENOMEM;
70491 @@ -243,7 +280,7 @@ static void anon_vma_chain_link(struct vm_area_struct *vma,
70492 * Attach the anon_vmas from src to dst.
70493 * Returns 0 on success, -ENOMEM on failure.
70494 */
70495 -int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
70496 +int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
70497 {
70498 struct anon_vma_chain *avc, *pavc;
70499 struct anon_vma *root = NULL;
70500 @@ -276,7 +313,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
70501 * the corresponding VMA in the parent process is attached to.
70502 * Returns 0 on success, non-zero on failure.
70503 */
70504 -int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
70505 +int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
70506 {
70507 struct anon_vma_chain *avc;
70508 struct anon_vma *anon_vma;
70509 diff --git a/mm/shmem.c b/mm/shmem.c
70510 index 6c253f7..367e20a 100644
70511 --- a/mm/shmem.c
70512 +++ b/mm/shmem.c
70513 @@ -31,7 +31,7 @@
70514 #include <linux/export.h>
70515 #include <linux/swap.h>
70516
70517 -static struct vfsmount *shm_mnt;
70518 +struct vfsmount *shm_mnt;
70519
70520 #ifdef CONFIG_SHMEM
70521 /*
70522 @@ -74,7 +74,7 @@ static struct vfsmount *shm_mnt;
70523 #define BOGO_DIRENT_SIZE 20
70524
70525 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
70526 -#define SHORT_SYMLINK_LEN 128
70527 +#define SHORT_SYMLINK_LEN 64
70528
70529 struct shmem_xattr {
70530 struct list_head list; /* anchored by shmem_inode_info->xattr_list */
70531 @@ -2180,8 +2180,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
70532 int err = -ENOMEM;
70533
70534 /* Round up to L1_CACHE_BYTES to resist false sharing */
70535 - sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
70536 - L1_CACHE_BYTES), GFP_KERNEL);
70537 + sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
70538 if (!sbinfo)
70539 return -ENOMEM;
70540
70541 diff --git a/mm/slab.c b/mm/slab.c
70542 index 83311c9a..fcf8f86 100644
70543 --- a/mm/slab.c
70544 +++ b/mm/slab.c
70545 @@ -151,7 +151,7 @@
70546
70547 /* Legal flag mask for kmem_cache_create(). */
70548 #if DEBUG
70549 -# define CREATE_MASK (SLAB_RED_ZONE | \
70550 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
70551 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
70552 SLAB_CACHE_DMA | \
70553 SLAB_STORE_USER | \
70554 @@ -159,7 +159,7 @@
70555 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
70556 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
70557 #else
70558 -# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
70559 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
70560 SLAB_CACHE_DMA | \
70561 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
70562 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
70563 @@ -288,7 +288,7 @@ struct kmem_list3 {
70564 * Need this for bootstrapping a per node allocator.
70565 */
70566 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
70567 -static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
70568 +static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
70569 #define CACHE_CACHE 0
70570 #define SIZE_AC MAX_NUMNODES
70571 #define SIZE_L3 (2 * MAX_NUMNODES)
70572 @@ -389,10 +389,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
70573 if ((x)->max_freeable < i) \
70574 (x)->max_freeable = i; \
70575 } while (0)
70576 -#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
70577 -#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
70578 -#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
70579 -#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
70580 +#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
70581 +#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
70582 +#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
70583 +#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
70584 #else
70585 #define STATS_INC_ACTIVE(x) do { } while (0)
70586 #define STATS_DEC_ACTIVE(x) do { } while (0)
70587 @@ -538,7 +538,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
70588 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
70589 */
70590 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
70591 - const struct slab *slab, void *obj)
70592 + const struct slab *slab, const void *obj)
70593 {
70594 u32 offset = (obj - slab->s_mem);
70595 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
70596 @@ -564,7 +564,7 @@ struct cache_names {
70597 static struct cache_names __initdata cache_names[] = {
70598 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
70599 #include <linux/kmalloc_sizes.h>
70600 - {NULL,}
70601 + {NULL}
70602 #undef CACHE
70603 };
70604
70605 @@ -1572,7 +1572,7 @@ void __init kmem_cache_init(void)
70606 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
70607 sizes[INDEX_AC].cs_size,
70608 ARCH_KMALLOC_MINALIGN,
70609 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
70610 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
70611 NULL);
70612
70613 if (INDEX_AC != INDEX_L3) {
70614 @@ -1580,7 +1580,7 @@ void __init kmem_cache_init(void)
70615 kmem_cache_create(names[INDEX_L3].name,
70616 sizes[INDEX_L3].cs_size,
70617 ARCH_KMALLOC_MINALIGN,
70618 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
70619 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
70620 NULL);
70621 }
70622
70623 @@ -1598,7 +1598,7 @@ void __init kmem_cache_init(void)
70624 sizes->cs_cachep = kmem_cache_create(names->name,
70625 sizes->cs_size,
70626 ARCH_KMALLOC_MINALIGN,
70627 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
70628 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
70629 NULL);
70630 }
70631 #ifdef CONFIG_ZONE_DMA
70632 @@ -4322,10 +4322,10 @@ static int s_show(struct seq_file *m, void *p)
70633 }
70634 /* cpu stats */
70635 {
70636 - unsigned long allochit = atomic_read(&cachep->allochit);
70637 - unsigned long allocmiss = atomic_read(&cachep->allocmiss);
70638 - unsigned long freehit = atomic_read(&cachep->freehit);
70639 - unsigned long freemiss = atomic_read(&cachep->freemiss);
70640 + unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
70641 + unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
70642 + unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
70643 + unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
70644
70645 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
70646 allochit, allocmiss, freehit, freemiss);
70647 @@ -4584,13 +4584,62 @@ static int __init slab_proc_init(void)
70648 {
70649 proc_create("slabinfo",S_IWUSR|S_IRUSR,NULL,&proc_slabinfo_operations);
70650 #ifdef CONFIG_DEBUG_SLAB_LEAK
70651 - proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
70652 + proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
70653 #endif
70654 return 0;
70655 }
70656 module_init(slab_proc_init);
70657 #endif
70658
70659 +void check_object_size(const void *ptr, unsigned long n, bool to)
70660 +{
70661 +
70662 +#ifdef CONFIG_PAX_USERCOPY
70663 + struct page *page;
70664 + struct kmem_cache *cachep = NULL;
70665 + struct slab *slabp;
70666 + unsigned int objnr;
70667 + unsigned long offset;
70668 + const char *type;
70669 +
70670 + if (!n)
70671 + return;
70672 +
70673 + type = "<null>";
70674 + if (ZERO_OR_NULL_PTR(ptr))
70675 + goto report;
70676 +
70677 + if (!virt_addr_valid(ptr))
70678 + return;
70679 +
70680 + page = virt_to_head_page(ptr);
70681 +
70682 + type = "<process stack>";
70683 + if (!PageSlab(page)) {
70684 + if (object_is_on_stack(ptr, n) == -1)
70685 + goto report;
70686 + return;
70687 + }
70688 +
70689 + cachep = page_get_cache(page);
70690 + type = cachep->name;
70691 + if (!(cachep->flags & SLAB_USERCOPY))
70692 + goto report;
70693 +
70694 + slabp = page_get_slab(page);
70695 + objnr = obj_to_index(cachep, slabp, ptr);
70696 + BUG_ON(objnr >= cachep->num);
70697 + offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
70698 + if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
70699 + return;
70700 +
70701 +report:
70702 + pax_report_usercopy(ptr, n, to, type);
70703 +#endif
70704 +
70705 +}
70706 +EXPORT_SYMBOL(check_object_size);
70707 +
70708 /**
70709 * ksize - get the actual amount of memory allocated for a given object
70710 * @objp: Pointer to the object
70711 diff --git a/mm/slob.c b/mm/slob.c
70712 index 8105be4..579da9d 100644
70713 --- a/mm/slob.c
70714 +++ b/mm/slob.c
70715 @@ -29,7 +29,7 @@
70716 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
70717 * alloc_pages() directly, allocating compound pages so the page order
70718 * does not have to be separately tracked, and also stores the exact
70719 - * allocation size in page->private so that it can be used to accurately
70720 + * allocation size in slob_page->size so that it can be used to accurately
70721 * provide ksize(). These objects are detected in kfree() because slob_page()
70722 * is false for them.
70723 *
70724 @@ -58,6 +58,7 @@
70725 */
70726
70727 #include <linux/kernel.h>
70728 +#include <linux/sched.h>
70729 #include <linux/slab.h>
70730 #include <linux/mm.h>
70731 #include <linux/swap.h> /* struct reclaim_state */
70732 @@ -102,7 +103,8 @@ struct slob_page {
70733 unsigned long flags; /* mandatory */
70734 atomic_t _count; /* mandatory */
70735 slobidx_t units; /* free units left in page */
70736 - unsigned long pad[2];
70737 + unsigned long pad[1];
70738 + unsigned long size; /* size when >=PAGE_SIZE */
70739 slob_t *free; /* first free slob_t in page */
70740 struct list_head list; /* linked list of free pages */
70741 };
70742 @@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
70743 */
70744 static inline int is_slob_page(struct slob_page *sp)
70745 {
70746 - return PageSlab((struct page *)sp);
70747 + return PageSlab((struct page *)sp) && !sp->size;
70748 }
70749
70750 static inline void set_slob_page(struct slob_page *sp)
70751 @@ -150,7 +152,7 @@ static inline void clear_slob_page(struct slob_page *sp)
70752
70753 static inline struct slob_page *slob_page(const void *addr)
70754 {
70755 - return (struct slob_page *)virt_to_page(addr);
70756 + return (struct slob_page *)virt_to_head_page(addr);
70757 }
70758
70759 /*
70760 @@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
70761 /*
70762 * Return the size of a slob block.
70763 */
70764 -static slobidx_t slob_units(slob_t *s)
70765 +static slobidx_t slob_units(const slob_t *s)
70766 {
70767 if (s->units > 0)
70768 return s->units;
70769 @@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
70770 /*
70771 * Return the next free slob block pointer after this one.
70772 */
70773 -static slob_t *slob_next(slob_t *s)
70774 +static slob_t *slob_next(const slob_t *s)
70775 {
70776 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
70777 slobidx_t next;
70778 @@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
70779 /*
70780 * Returns true if s is the last free block in its page.
70781 */
70782 -static int slob_last(slob_t *s)
70783 +static int slob_last(const slob_t *s)
70784 {
70785 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
70786 }
70787 @@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
70788 if (!page)
70789 return NULL;
70790
70791 + set_slob_page(page);
70792 return page_address(page);
70793 }
70794
70795 @@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
70796 if (!b)
70797 return NULL;
70798 sp = slob_page(b);
70799 - set_slob_page(sp);
70800
70801 spin_lock_irqsave(&slob_lock, flags);
70802 sp->units = SLOB_UNITS(PAGE_SIZE);
70803 sp->free = b;
70804 + sp->size = 0;
70805 INIT_LIST_HEAD(&sp->list);
70806 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
70807 set_slob_page_free(sp, slob_list);
70808 @@ -476,10 +479,9 @@ out:
70809 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
70810 */
70811
70812 -void *__kmalloc_node(size_t size, gfp_t gfp, int node)
70813 +static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
70814 {
70815 - unsigned int *m;
70816 - int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70817 + slob_t *m;
70818 void *ret;
70819
70820 gfp &= gfp_allowed_mask;
70821 @@ -494,7 +496,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
70822
70823 if (!m)
70824 return NULL;
70825 - *m = size;
70826 + BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
70827 + BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
70828 + m[0].units = size;
70829 + m[1].units = align;
70830 ret = (void *)m + align;
70831
70832 trace_kmalloc_node(_RET_IP_, ret,
70833 @@ -506,16 +511,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
70834 gfp |= __GFP_COMP;
70835 ret = slob_new_pages(gfp, order, node);
70836 if (ret) {
70837 - struct page *page;
70838 - page = virt_to_page(ret);
70839 - page->private = size;
70840 + struct slob_page *sp;
70841 + sp = slob_page(ret);
70842 + sp->size = size;
70843 }
70844
70845 trace_kmalloc_node(_RET_IP_, ret,
70846 size, PAGE_SIZE << order, gfp, node);
70847 }
70848
70849 - kmemleak_alloc(ret, size, 1, gfp);
70850 + return ret;
70851 +}
70852 +
70853 +void *__kmalloc_node(size_t size, gfp_t gfp, int node)
70854 +{
70855 + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70856 + void *ret = __kmalloc_node_align(size, gfp, node, align);
70857 +
70858 + if (!ZERO_OR_NULL_PTR(ret))
70859 + kmemleak_alloc(ret, size, 1, gfp);
70860 return ret;
70861 }
70862 EXPORT_SYMBOL(__kmalloc_node);
70863 @@ -533,13 +547,92 @@ void kfree(const void *block)
70864 sp = slob_page(block);
70865 if (is_slob_page(sp)) {
70866 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70867 - unsigned int *m = (unsigned int *)(block - align);
70868 - slob_free(m, *m + align);
70869 - } else
70870 + slob_t *m = (slob_t *)(block - align);
70871 + slob_free(m, m[0].units + align);
70872 + } else {
70873 + clear_slob_page(sp);
70874 + free_slob_page(sp);
70875 + sp->size = 0;
70876 put_page(&sp->page);
70877 + }
70878 }
70879 EXPORT_SYMBOL(kfree);
70880
70881 +void check_object_size(const void *ptr, unsigned long n, bool to)
70882 +{
70883 +
70884 +#ifdef CONFIG_PAX_USERCOPY
70885 + struct slob_page *sp;
70886 + const slob_t *free;
70887 + const void *base;
70888 + unsigned long flags;
70889 + const char *type;
70890 +
70891 + if (!n)
70892 + return;
70893 +
70894 + type = "<null>";
70895 + if (ZERO_OR_NULL_PTR(ptr))
70896 + goto report;
70897 +
70898 + if (!virt_addr_valid(ptr))
70899 + return;
70900 +
70901 + type = "<process stack>";
70902 + sp = slob_page(ptr);
70903 + if (!PageSlab((struct page*)sp)) {
70904 + if (object_is_on_stack(ptr, n) == -1)
70905 + goto report;
70906 + return;
70907 + }
70908 +
70909 + type = "<slob>";
70910 + if (sp->size) {
70911 + base = page_address(&sp->page);
70912 + if (base <= ptr && n <= sp->size - (ptr - base))
70913 + return;
70914 + goto report;
70915 + }
70916 +
70917 + /* some tricky double walking to find the chunk */
70918 + spin_lock_irqsave(&slob_lock, flags);
70919 + base = (void *)((unsigned long)ptr & PAGE_MASK);
70920 + free = sp->free;
70921 +
70922 + while (!slob_last(free) && (void *)free <= ptr) {
70923 + base = free + slob_units(free);
70924 + free = slob_next(free);
70925 + }
70926 +
70927 + while (base < (void *)free) {
70928 + slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
70929 + int size = SLOB_UNIT * SLOB_UNITS(m + align);
70930 + int offset;
70931 +
70932 + if (ptr < base + align)
70933 + break;
70934 +
70935 + offset = ptr - base - align;
70936 + if (offset >= m) {
70937 + base += size;
70938 + continue;
70939 + }
70940 +
70941 + if (n > m - offset)
70942 + break;
70943 +
70944 + spin_unlock_irqrestore(&slob_lock, flags);
70945 + return;
70946 + }
70947 +
70948 + spin_unlock_irqrestore(&slob_lock, flags);
70949 +report:
70950 + pax_report_usercopy(ptr, n, to, type);
70951 +#endif
70952 +
70953 +}
70954 +EXPORT_SYMBOL(check_object_size);
70955 +
70956 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
70957 size_t ksize(const void *block)
70958 {
70959 @@ -552,10 +645,10 @@ size_t ksize(const void *block)
70960 sp = slob_page(block);
70961 if (is_slob_page(sp)) {
70962 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70963 - unsigned int *m = (unsigned int *)(block - align);
70964 - return SLOB_UNITS(*m) * SLOB_UNIT;
70965 + slob_t *m = (slob_t *)(block - align);
70966 + return SLOB_UNITS(m[0].units) * SLOB_UNIT;
70967 } else
70968 - return sp->page.private;
70969 + return sp->size;
70970 }
70971 EXPORT_SYMBOL(ksize);
70972
70973 @@ -571,8 +664,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
70974 {
70975 struct kmem_cache *c;
70976
70977 +#ifdef CONFIG_PAX_USERCOPY
70978 + c = __kmalloc_node_align(sizeof(struct kmem_cache),
70979 + GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
70980 +#else
70981 c = slob_alloc(sizeof(struct kmem_cache),
70982 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
70983 +#endif
70984
70985 if (c) {
70986 c->name = name;
70987 @@ -614,17 +712,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
70988
70989 lockdep_trace_alloc(flags);
70990
70991 +#ifdef CONFIG_PAX_USERCOPY
70992 + b = __kmalloc_node_align(c->size, flags, node, c->align);
70993 +#else
70994 if (c->size < PAGE_SIZE) {
70995 b = slob_alloc(c->size, flags, c->align, node);
70996 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
70997 SLOB_UNITS(c->size) * SLOB_UNIT,
70998 flags, node);
70999 } else {
71000 + struct slob_page *sp;
71001 +
71002 b = slob_new_pages(flags, get_order(c->size), node);
71003 + sp = slob_page(b);
71004 + sp->size = c->size;
71005 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
71006 PAGE_SIZE << get_order(c->size),
71007 flags, node);
71008 }
71009 +#endif
71010
71011 if (c->ctor)
71012 c->ctor(b);
71013 @@ -636,10 +742,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
71014
71015 static void __kmem_cache_free(void *b, int size)
71016 {
71017 - if (size < PAGE_SIZE)
71018 + struct slob_page *sp = slob_page(b);
71019 +
71020 + if (is_slob_page(sp))
71021 slob_free(b, size);
71022 - else
71023 + else {
71024 + clear_slob_page(sp);
71025 + free_slob_page(sp);
71026 + sp->size = 0;
71027 slob_free_pages(b, get_order(size));
71028 + }
71029 }
71030
71031 static void kmem_rcu_free(struct rcu_head *head)
71032 @@ -652,17 +764,31 @@ static void kmem_rcu_free(struct rcu_head *head)
71033
71034 void kmem_cache_free(struct kmem_cache *c, void *b)
71035 {
71036 + int size = c->size;
71037 +
71038 +#ifdef CONFIG_PAX_USERCOPY
71039 + if (size + c->align < PAGE_SIZE) {
71040 + size += c->align;
71041 + b -= c->align;
71042 + }
71043 +#endif
71044 +
71045 kmemleak_free_recursive(b, c->flags);
71046 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
71047 struct slob_rcu *slob_rcu;
71048 - slob_rcu = b + (c->size - sizeof(struct slob_rcu));
71049 - slob_rcu->size = c->size;
71050 + slob_rcu = b + (size - sizeof(struct slob_rcu));
71051 + slob_rcu->size = size;
71052 call_rcu(&slob_rcu->head, kmem_rcu_free);
71053 } else {
71054 - __kmem_cache_free(b, c->size);
71055 + __kmem_cache_free(b, size);
71056 }
71057
71058 +#ifdef CONFIG_PAX_USERCOPY
71059 + trace_kfree(_RET_IP_, b);
71060 +#else
71061 trace_kmem_cache_free(_RET_IP_, b);
71062 +#endif
71063 +
71064 }
71065 EXPORT_SYMBOL(kmem_cache_free);
71066
71067 diff --git a/mm/slub.c b/mm/slub.c
71068 index 1a919f0..1739c9b 100644
71069 --- a/mm/slub.c
71070 +++ b/mm/slub.c
71071 @@ -208,7 +208,7 @@ struct track {
71072
71073 enum track_item { TRACK_ALLOC, TRACK_FREE };
71074
71075 -#ifdef CONFIG_SYSFS
71076 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71077 static int sysfs_slab_add(struct kmem_cache *);
71078 static int sysfs_slab_alias(struct kmem_cache *, const char *);
71079 static void sysfs_slab_remove(struct kmem_cache *);
71080 @@ -530,7 +530,7 @@ static void print_track(const char *s, struct track *t)
71081 if (!t->addr)
71082 return;
71083
71084 - printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
71085 + printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
71086 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
71087 #ifdef CONFIG_STACKTRACE
71088 {
71089 @@ -2559,6 +2559,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
71090
71091 page = virt_to_head_page(x);
71092
71093 + BUG_ON(!PageSlab(page));
71094 +
71095 slab_free(s, page, x, _RET_IP_);
71096
71097 trace_kmem_cache_free(_RET_IP_, x);
71098 @@ -2592,7 +2594,7 @@ static int slub_min_objects;
71099 * Merge control. If this is set then no merging of slab caches will occur.
71100 * (Could be removed. This was introduced to pacify the merge skeptics.)
71101 */
71102 -static int slub_nomerge;
71103 +static int slub_nomerge = 1;
71104
71105 /*
71106 * Calculate the order of allocation given an slab object size.
71107 @@ -3042,7 +3044,7 @@ static int kmem_cache_open(struct kmem_cache *s,
71108 else
71109 s->cpu_partial = 30;
71110
71111 - s->refcount = 1;
71112 + atomic_set(&s->refcount, 1);
71113 #ifdef CONFIG_NUMA
71114 s->remote_node_defrag_ratio = 1000;
71115 #endif
71116 @@ -3146,8 +3148,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
71117 void kmem_cache_destroy(struct kmem_cache *s)
71118 {
71119 down_write(&slub_lock);
71120 - s->refcount--;
71121 - if (!s->refcount) {
71122 + if (atomic_dec_and_test(&s->refcount)) {
71123 list_del(&s->list);
71124 up_write(&slub_lock);
71125 if (kmem_cache_close(s)) {
71126 @@ -3358,6 +3359,50 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
71127 EXPORT_SYMBOL(__kmalloc_node);
71128 #endif
71129
71130 +void check_object_size(const void *ptr, unsigned long n, bool to)
71131 +{
71132 +
71133 +#ifdef CONFIG_PAX_USERCOPY
71134 + struct page *page;
71135 + struct kmem_cache *s = NULL;
71136 + unsigned long offset;
71137 + const char *type;
71138 +
71139 + if (!n)
71140 + return;
71141 +
71142 + type = "<null>";
71143 + if (ZERO_OR_NULL_PTR(ptr))
71144 + goto report;
71145 +
71146 + if (!virt_addr_valid(ptr))
71147 + return;
71148 +
71149 + page = virt_to_head_page(ptr);
71150 +
71151 + type = "<process stack>";
71152 + if (!PageSlab(page)) {
71153 + if (object_is_on_stack(ptr, n) == -1)
71154 + goto report;
71155 + return;
71156 + }
71157 +
71158 + s = page->slab;
71159 + type = s->name;
71160 + if (!(s->flags & SLAB_USERCOPY))
71161 + goto report;
71162 +
71163 + offset = (ptr - page_address(page)) % s->size;
71164 + if (offset <= s->objsize && n <= s->objsize - offset)
71165 + return;
71166 +
71167 +report:
71168 + pax_report_usercopy(ptr, n, to, type);
71169 +#endif
71170 +
71171 +}
71172 +EXPORT_SYMBOL(check_object_size);
71173 +
71174 size_t ksize(const void *object)
71175 {
71176 struct page *page;
71177 @@ -3632,7 +3677,7 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
71178 int node;
71179
71180 list_add(&s->list, &slab_caches);
71181 - s->refcount = -1;
71182 + atomic_set(&s->refcount, -1);
71183
71184 for_each_node_state(node, N_NORMAL_MEMORY) {
71185 struct kmem_cache_node *n = get_node(s, node);
71186 @@ -3749,17 +3794,17 @@ void __init kmem_cache_init(void)
71187
71188 /* Caches that are not of the two-to-the-power-of size */
71189 if (KMALLOC_MIN_SIZE <= 32) {
71190 - kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
71191 + kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
71192 caches++;
71193 }
71194
71195 if (KMALLOC_MIN_SIZE <= 64) {
71196 - kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
71197 + kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
71198 caches++;
71199 }
71200
71201 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
71202 - kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
71203 + kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
71204 caches++;
71205 }
71206
71207 @@ -3827,7 +3872,7 @@ static int slab_unmergeable(struct kmem_cache *s)
71208 /*
71209 * We may have set a slab to be unmergeable during bootstrap.
71210 */
71211 - if (s->refcount < 0)
71212 + if (atomic_read(&s->refcount) < 0)
71213 return 1;
71214
71215 return 0;
71216 @@ -3886,7 +3931,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
71217 down_write(&slub_lock);
71218 s = find_mergeable(size, align, flags, name, ctor);
71219 if (s) {
71220 - s->refcount++;
71221 + atomic_inc(&s->refcount);
71222 /*
71223 * Adjust the object sizes so that we clear
71224 * the complete object on kzalloc.
71225 @@ -3895,7 +3940,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
71226 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
71227
71228 if (sysfs_slab_alias(s, name)) {
71229 - s->refcount--;
71230 + atomic_dec(&s->refcount);
71231 goto err;
71232 }
71233 up_write(&slub_lock);
71234 @@ -4023,7 +4068,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
71235 }
71236 #endif
71237
71238 -#ifdef CONFIG_SYSFS
71239 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71240 static int count_inuse(struct page *page)
71241 {
71242 return page->inuse;
71243 @@ -4410,12 +4455,12 @@ static void resiliency_test(void)
71244 validate_slab_cache(kmalloc_caches[9]);
71245 }
71246 #else
71247 -#ifdef CONFIG_SYSFS
71248 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71249 static void resiliency_test(void) {};
71250 #endif
71251 #endif
71252
71253 -#ifdef CONFIG_SYSFS
71254 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71255 enum slab_stat_type {
71256 SL_ALL, /* All slabs */
71257 SL_PARTIAL, /* Only partially allocated slabs */
71258 @@ -4656,7 +4701,7 @@ SLAB_ATTR_RO(ctor);
71259
71260 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
71261 {
71262 - return sprintf(buf, "%d\n", s->refcount - 1);
71263 + return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
71264 }
71265 SLAB_ATTR_RO(aliases);
71266
71267 @@ -5223,6 +5268,7 @@ static char *create_unique_id(struct kmem_cache *s)
71268 return name;
71269 }
71270
71271 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71272 static int sysfs_slab_add(struct kmem_cache *s)
71273 {
71274 int err;
71275 @@ -5285,6 +5331,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
71276 kobject_del(&s->kobj);
71277 kobject_put(&s->kobj);
71278 }
71279 +#endif
71280
71281 /*
71282 * Need to buffer aliases during bootup until sysfs becomes
71283 @@ -5298,6 +5345,7 @@ struct saved_alias {
71284
71285 static struct saved_alias *alias_list;
71286
71287 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71288 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
71289 {
71290 struct saved_alias *al;
71291 @@ -5320,6 +5368,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
71292 alias_list = al;
71293 return 0;
71294 }
71295 +#endif
71296
71297 static int __init slab_sysfs_init(void)
71298 {
71299 diff --git a/mm/swap.c b/mm/swap.c
71300 index a91caf7..b887e735 100644
71301 --- a/mm/swap.c
71302 +++ b/mm/swap.c
71303 @@ -31,6 +31,7 @@
71304 #include <linux/backing-dev.h>
71305 #include <linux/memcontrol.h>
71306 #include <linux/gfp.h>
71307 +#include <linux/hugetlb.h>
71308
71309 #include "internal.h"
71310
71311 @@ -71,6 +72,8 @@ static void __put_compound_page(struct page *page)
71312
71313 __page_cache_release(page);
71314 dtor = get_compound_page_dtor(page);
71315 + if (!PageHuge(page))
71316 + BUG_ON(dtor != free_compound_page);
71317 (*dtor)(page);
71318 }
71319
71320 diff --git a/mm/swapfile.c b/mm/swapfile.c
71321 index b1cd120..aaae885 100644
71322 --- a/mm/swapfile.c
71323 +++ b/mm/swapfile.c
71324 @@ -61,7 +61,7 @@ static DEFINE_MUTEX(swapon_mutex);
71325
71326 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
71327 /* Activity counter to indicate that a swapon or swapoff has occurred */
71328 -static atomic_t proc_poll_event = ATOMIC_INIT(0);
71329 +static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
71330
71331 static inline unsigned char swap_count(unsigned char ent)
71332 {
71333 @@ -1670,7 +1670,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
71334 }
71335 filp_close(swap_file, NULL);
71336 err = 0;
71337 - atomic_inc(&proc_poll_event);
71338 + atomic_inc_unchecked(&proc_poll_event);
71339 wake_up_interruptible(&proc_poll_wait);
71340
71341 out_dput:
71342 @@ -1686,8 +1686,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
71343
71344 poll_wait(file, &proc_poll_wait, wait);
71345
71346 - if (seq->poll_event != atomic_read(&proc_poll_event)) {
71347 - seq->poll_event = atomic_read(&proc_poll_event);
71348 + if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
71349 + seq->poll_event = atomic_read_unchecked(&proc_poll_event);
71350 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
71351 }
71352
71353 @@ -1785,7 +1785,7 @@ static int swaps_open(struct inode *inode, struct file *file)
71354 return ret;
71355
71356 seq = file->private_data;
71357 - seq->poll_event = atomic_read(&proc_poll_event);
71358 + seq->poll_event = atomic_read_unchecked(&proc_poll_event);
71359 return 0;
71360 }
71361
71362 @@ -2123,7 +2123,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
71363 (p->flags & SWP_DISCARDABLE) ? "D" : "");
71364
71365 mutex_unlock(&swapon_mutex);
71366 - atomic_inc(&proc_poll_event);
71367 + atomic_inc_unchecked(&proc_poll_event);
71368 wake_up_interruptible(&proc_poll_wait);
71369
71370 if (S_ISREG(inode->i_mode))
71371 diff --git a/mm/util.c b/mm/util.c
71372 index 136ac4f..5117eef 100644
71373 --- a/mm/util.c
71374 +++ b/mm/util.c
71375 @@ -114,6 +114,7 @@ EXPORT_SYMBOL(memdup_user);
71376 * allocated buffer. Use this if you don't want to free the buffer immediately
71377 * like, for example, with RCU.
71378 */
71379 +#undef __krealloc
71380 void *__krealloc(const void *p, size_t new_size, gfp_t flags)
71381 {
71382 void *ret;
71383 @@ -147,6 +148,7 @@ EXPORT_SYMBOL(__krealloc);
71384 * behaves exactly like kmalloc(). If @size is 0 and @p is not a
71385 * %NULL pointer, the object pointed to is freed.
71386 */
71387 +#undef krealloc
71388 void *krealloc(const void *p, size_t new_size, gfp_t flags)
71389 {
71390 void *ret;
71391 @@ -243,6 +245,12 @@ void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
71392 void arch_pick_mmap_layout(struct mm_struct *mm)
71393 {
71394 mm->mmap_base = TASK_UNMAPPED_BASE;
71395 +
71396 +#ifdef CONFIG_PAX_RANDMMAP
71397 + if (mm->pax_flags & MF_PAX_RANDMMAP)
71398 + mm->mmap_base += mm->delta_mmap;
71399 +#endif
71400 +
71401 mm->get_unmapped_area = arch_get_unmapped_area;
71402 mm->unmap_area = arch_unmap_area;
71403 }
71404 diff --git a/mm/vmalloc.c b/mm/vmalloc.c
71405 index 27be2f0..0aef2c2 100644
71406 --- a/mm/vmalloc.c
71407 +++ b/mm/vmalloc.c
71408 @@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
71409
71410 pte = pte_offset_kernel(pmd, addr);
71411 do {
71412 - pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
71413 - WARN_ON(!pte_none(ptent) && !pte_present(ptent));
71414 +
71415 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71416 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
71417 + BUG_ON(!pte_exec(*pte));
71418 + set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
71419 + continue;
71420 + }
71421 +#endif
71422 +
71423 + {
71424 + pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
71425 + WARN_ON(!pte_none(ptent) && !pte_present(ptent));
71426 + }
71427 } while (pte++, addr += PAGE_SIZE, addr != end);
71428 }
71429
71430 @@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
71431 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
71432 {
71433 pte_t *pte;
71434 + int ret = -ENOMEM;
71435
71436 /*
71437 * nr is a running index into the array which helps higher level
71438 @@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
71439 pte = pte_alloc_kernel(pmd, addr);
71440 if (!pte)
71441 return -ENOMEM;
71442 +
71443 + pax_open_kernel();
71444 do {
71445 struct page *page = pages[*nr];
71446
71447 - if (WARN_ON(!pte_none(*pte)))
71448 - return -EBUSY;
71449 - if (WARN_ON(!page))
71450 - return -ENOMEM;
71451 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71452 + if (pgprot_val(prot) & _PAGE_NX)
71453 +#endif
71454 +
71455 + if (WARN_ON(!pte_none(*pte))) {
71456 + ret = -EBUSY;
71457 + goto out;
71458 + }
71459 + if (WARN_ON(!page)) {
71460 + ret = -ENOMEM;
71461 + goto out;
71462 + }
71463 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
71464 (*nr)++;
71465 } while (pte++, addr += PAGE_SIZE, addr != end);
71466 - return 0;
71467 + ret = 0;
71468 +out:
71469 + pax_close_kernel();
71470 + return ret;
71471 }
71472
71473 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
71474 @@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void *x)
71475 * and fall back on vmalloc() if that fails. Others
71476 * just put it in the vmalloc space.
71477 */
71478 -#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
71479 +#ifdef CONFIG_MODULES
71480 +#ifdef MODULES_VADDR
71481 unsigned long addr = (unsigned long)x;
71482 if (addr >= MODULES_VADDR && addr < MODULES_END)
71483 return 1;
71484 #endif
71485 +
71486 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71487 + if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
71488 + return 1;
71489 +#endif
71490 +
71491 +#endif
71492 +
71493 return is_vmalloc_addr(x);
71494 }
71495
71496 @@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
71497
71498 if (!pgd_none(*pgd)) {
71499 pud_t *pud = pud_offset(pgd, addr);
71500 +#ifdef CONFIG_X86
71501 + if (!pud_large(*pud))
71502 +#endif
71503 if (!pud_none(*pud)) {
71504 pmd_t *pmd = pmd_offset(pud, addr);
71505 +#ifdef CONFIG_X86
71506 + if (!pmd_large(*pmd))
71507 +#endif
71508 if (!pmd_none(*pmd)) {
71509 pte_t *ptep, pte;
71510
71511 @@ -1294,6 +1334,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
71512 struct vm_struct *area;
71513
71514 BUG_ON(in_interrupt());
71515 +
71516 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
71517 + if (flags & VM_KERNEXEC) {
71518 + if (start != VMALLOC_START || end != VMALLOC_END)
71519 + return NULL;
71520 + start = (unsigned long)MODULES_EXEC_VADDR;
71521 + end = (unsigned long)MODULES_EXEC_END;
71522 + }
71523 +#endif
71524 +
71525 if (flags & VM_IOREMAP) {
71526 int bit = fls(size);
71527
71528 @@ -1526,6 +1576,11 @@ void *vmap(struct page **pages, unsigned int count,
71529 if (count > totalram_pages)
71530 return NULL;
71531
71532 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
71533 + if (!(pgprot_val(prot) & _PAGE_NX))
71534 + flags |= VM_KERNEXEC;
71535 +#endif
71536 +
71537 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
71538 __builtin_return_address(0));
71539 if (!area)
71540 @@ -1627,6 +1682,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
71541 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
71542 goto fail;
71543
71544 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
71545 + if (!(pgprot_val(prot) & _PAGE_NX))
71546 + area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
71547 + VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
71548 + else
71549 +#endif
71550 +
71551 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
71552 start, end, node, gfp_mask, caller);
71553 if (!area)
71554 @@ -1679,6 +1741,7 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
71555 gfp_mask, prot, node, caller);
71556 }
71557
71558 +#undef __vmalloc
71559 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
71560 {
71561 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
71562 @@ -1702,6 +1765,7 @@ static inline void *__vmalloc_node_flags(unsigned long size,
71563 * For tight control over page level allocator and protection flags
71564 * use __vmalloc() instead.
71565 */
71566 +#undef vmalloc
71567 void *vmalloc(unsigned long size)
71568 {
71569 return __vmalloc_node_flags(size, -1, GFP_KERNEL | __GFP_HIGHMEM);
71570 @@ -1718,6 +1782,7 @@ EXPORT_SYMBOL(vmalloc);
71571 * For tight control over page level allocator and protection flags
71572 * use __vmalloc() instead.
71573 */
71574 +#undef vzalloc
71575 void *vzalloc(unsigned long size)
71576 {
71577 return __vmalloc_node_flags(size, -1,
71578 @@ -1732,6 +1797,7 @@ EXPORT_SYMBOL(vzalloc);
71579 * The resulting memory area is zeroed so it can be mapped to userspace
71580 * without leaking data.
71581 */
71582 +#undef vmalloc_user
71583 void *vmalloc_user(unsigned long size)
71584 {
71585 struct vm_struct *area;
71586 @@ -1759,6 +1825,7 @@ EXPORT_SYMBOL(vmalloc_user);
71587 * For tight control over page level allocator and protection flags
71588 * use __vmalloc() instead.
71589 */
71590 +#undef vmalloc_node
71591 void *vmalloc_node(unsigned long size, int node)
71592 {
71593 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
71594 @@ -1778,6 +1845,7 @@ EXPORT_SYMBOL(vmalloc_node);
71595 * For tight control over page level allocator and protection flags
71596 * use __vmalloc_node() instead.
71597 */
71598 +#undef vzalloc_node
71599 void *vzalloc_node(unsigned long size, int node)
71600 {
71601 return __vmalloc_node_flags(size, node,
71602 @@ -1800,10 +1868,10 @@ EXPORT_SYMBOL(vzalloc_node);
71603 * For tight control over page level allocator and protection flags
71604 * use __vmalloc() instead.
71605 */
71606 -
71607 +#undef vmalloc_exec
71608 void *vmalloc_exec(unsigned long size)
71609 {
71610 - return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
71611 + return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
71612 -1, __builtin_return_address(0));
71613 }
71614
71615 @@ -1822,6 +1890,7 @@ void *vmalloc_exec(unsigned long size)
71616 * Allocate enough 32bit PA addressable pages to cover @size from the
71617 * page level allocator and map them into contiguous kernel virtual space.
71618 */
71619 +#undef vmalloc_32
71620 void *vmalloc_32(unsigned long size)
71621 {
71622 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
71623 @@ -1836,6 +1905,7 @@ EXPORT_SYMBOL(vmalloc_32);
71624 * The resulting memory area is 32bit addressable and zeroed so it can be
71625 * mapped to userspace without leaking data.
71626 */
71627 +#undef vmalloc_32_user
71628 void *vmalloc_32_user(unsigned long size)
71629 {
71630 struct vm_struct *area;
71631 @@ -2098,6 +2168,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
71632 unsigned long uaddr = vma->vm_start;
71633 unsigned long usize = vma->vm_end - vma->vm_start;
71634
71635 + BUG_ON(vma->vm_mirror);
71636 +
71637 if ((PAGE_SIZE-1) & (unsigned long)addr)
71638 return -EINVAL;
71639
71640 diff --git a/mm/vmstat.c b/mm/vmstat.c
71641 index 8fd603b..cf0d930 100644
71642 --- a/mm/vmstat.c
71643 +++ b/mm/vmstat.c
71644 @@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
71645 *
71646 * vm_stat contains the global counters
71647 */
71648 -atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
71649 +atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
71650 EXPORT_SYMBOL(vm_stat);
71651
71652 #ifdef CONFIG_SMP
71653 @@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
71654 v = p->vm_stat_diff[i];
71655 p->vm_stat_diff[i] = 0;
71656 local_irq_restore(flags);
71657 - atomic_long_add(v, &zone->vm_stat[i]);
71658 + atomic_long_add_unchecked(v, &zone->vm_stat[i]);
71659 global_diff[i] += v;
71660 #ifdef CONFIG_NUMA
71661 /* 3 seconds idle till flush */
71662 @@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
71663
71664 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
71665 if (global_diff[i])
71666 - atomic_long_add(global_diff[i], &vm_stat[i]);
71667 + atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
71668 }
71669
71670 #endif
71671 @@ -1208,10 +1208,20 @@ static int __init setup_vmstat(void)
71672 start_cpu_timer(cpu);
71673 #endif
71674 #ifdef CONFIG_PROC_FS
71675 - proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
71676 - proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
71677 - proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
71678 - proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
71679 + {
71680 + mode_t gr_mode = S_IRUGO;
71681 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
71682 + gr_mode = S_IRUSR;
71683 +#endif
71684 + proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
71685 + proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
71686 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
71687 + proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
71688 +#else
71689 + proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
71690 +#endif
71691 + proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
71692 + }
71693 #endif
71694 return 0;
71695 }
71696 diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
71697 index 5471628..cef8398 100644
71698 --- a/net/8021q/vlan.c
71699 +++ b/net/8021q/vlan.c
71700 @@ -588,8 +588,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
71701 err = -EPERM;
71702 if (!capable(CAP_NET_ADMIN))
71703 break;
71704 - if ((args.u.name_type >= 0) &&
71705 - (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
71706 + if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
71707 struct vlan_net *vn;
71708
71709 vn = net_generic(net, vlan_net_id);
71710 diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
71711 index fdfdb57..38d368c 100644
71712 --- a/net/9p/trans_fd.c
71713 +++ b/net/9p/trans_fd.c
71714 @@ -423,7 +423,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
71715 oldfs = get_fs();
71716 set_fs(get_ds());
71717 /* The cast to a user pointer is valid due to the set_fs() */
71718 - ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
71719 + ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
71720 set_fs(oldfs);
71721
71722 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
71723 diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
71724 index f41f026..fe76ea8 100644
71725 --- a/net/atm/atm_misc.c
71726 +++ b/net/atm/atm_misc.c
71727 @@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
71728 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
71729 return 1;
71730 atm_return(vcc, truesize);
71731 - atomic_inc(&vcc->stats->rx_drop);
71732 + atomic_inc_unchecked(&vcc->stats->rx_drop);
71733 return 0;
71734 }
71735 EXPORT_SYMBOL(atm_charge);
71736 @@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
71737 }
71738 }
71739 atm_return(vcc, guess);
71740 - atomic_inc(&vcc->stats->rx_drop);
71741 + atomic_inc_unchecked(&vcc->stats->rx_drop);
71742 return NULL;
71743 }
71744 EXPORT_SYMBOL(atm_alloc_charge);
71745 @@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
71746
71747 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
71748 {
71749 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
71750 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
71751 __SONET_ITEMS
71752 #undef __HANDLE_ITEM
71753 }
71754 @@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
71755
71756 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
71757 {
71758 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
71759 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
71760 __SONET_ITEMS
71761 #undef __HANDLE_ITEM
71762 }
71763 diff --git a/net/atm/lec.h b/net/atm/lec.h
71764 index dfc0719..47c5322 100644
71765 --- a/net/atm/lec.h
71766 +++ b/net/atm/lec.h
71767 @@ -48,7 +48,7 @@ struct lane2_ops {
71768 const u8 *tlvs, u32 sizeoftlvs);
71769 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
71770 const u8 *tlvs, u32 sizeoftlvs);
71771 -};
71772 +} __no_const;
71773
71774 /*
71775 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
71776 diff --git a/net/atm/mpc.h b/net/atm/mpc.h
71777 index 0919a88..a23d54e 100644
71778 --- a/net/atm/mpc.h
71779 +++ b/net/atm/mpc.h
71780 @@ -33,7 +33,7 @@ struct mpoa_client {
71781 struct mpc_parameters parameters; /* parameters for this client */
71782
71783 const struct net_device_ops *old_ops;
71784 - struct net_device_ops new_ops;
71785 + net_device_ops_no_const new_ops;
71786 };
71787
71788
71789 diff --git a/net/atm/proc.c b/net/atm/proc.c
71790 index 0d020de..011c7bb 100644
71791 --- a/net/atm/proc.c
71792 +++ b/net/atm/proc.c
71793 @@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
71794 const struct k_atm_aal_stats *stats)
71795 {
71796 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
71797 - atomic_read(&stats->tx), atomic_read(&stats->tx_err),
71798 - atomic_read(&stats->rx), atomic_read(&stats->rx_err),
71799 - atomic_read(&stats->rx_drop));
71800 + atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
71801 + atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
71802 + atomic_read_unchecked(&stats->rx_drop));
71803 }
71804
71805 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
71806 diff --git a/net/atm/resources.c b/net/atm/resources.c
71807 index 23f45ce..c748f1a 100644
71808 --- a/net/atm/resources.c
71809 +++ b/net/atm/resources.c
71810 @@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
71811 static void copy_aal_stats(struct k_atm_aal_stats *from,
71812 struct atm_aal_stats *to)
71813 {
71814 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
71815 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
71816 __AAL_STAT_ITEMS
71817 #undef __HANDLE_ITEM
71818 }
71819 @@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
71820 static void subtract_aal_stats(struct k_atm_aal_stats *from,
71821 struct atm_aal_stats *to)
71822 {
71823 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
71824 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
71825 __AAL_STAT_ITEMS
71826 #undef __HANDLE_ITEM
71827 }
71828 diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
71829 index 3512e25..2b33401 100644
71830 --- a/net/batman-adv/bat_iv_ogm.c
71831 +++ b/net/batman-adv/bat_iv_ogm.c
71832 @@ -541,7 +541,7 @@ void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes)
71833
71834 /* change sequence number to network order */
71835 batman_ogm_packet->seqno =
71836 - htonl((uint32_t)atomic_read(&hard_iface->seqno));
71837 + htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
71838
71839 batman_ogm_packet->ttvn = atomic_read(&bat_priv->ttvn);
71840 batman_ogm_packet->tt_crc = htons((uint16_t)
71841 @@ -561,7 +561,7 @@ void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes)
71842 else
71843 batman_ogm_packet->gw_flags = NO_FLAGS;
71844
71845 - atomic_inc(&hard_iface->seqno);
71846 + atomic_inc_unchecked(&hard_iface->seqno);
71847
71848 slide_own_bcast_window(hard_iface);
71849 bat_ogm_queue_add(bat_priv, hard_iface->packet_buff,
71850 @@ -922,7 +922,7 @@ static void bat_ogm_process(const struct ethhdr *ethhdr,
71851 return;
71852
71853 /* could be changed by schedule_own_packet() */
71854 - if_incoming_seqno = atomic_read(&if_incoming->seqno);
71855 + if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
71856
71857 has_directlink_flag = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0);
71858
71859 diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
71860 index 7704df4..beb4e16 100644
71861 --- a/net/batman-adv/hard-interface.c
71862 +++ b/net/batman-adv/hard-interface.c
71863 @@ -326,8 +326,8 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
71864 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
71865 dev_add_pack(&hard_iface->batman_adv_ptype);
71866
71867 - atomic_set(&hard_iface->seqno, 1);
71868 - atomic_set(&hard_iface->frag_seqno, 1);
71869 + atomic_set_unchecked(&hard_iface->seqno, 1);
71870 + atomic_set_unchecked(&hard_iface->frag_seqno, 1);
71871 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
71872 hard_iface->net_dev->name);
71873
71874 diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
71875 index f9cc957..efd9dae 100644
71876 --- a/net/batman-adv/soft-interface.c
71877 +++ b/net/batman-adv/soft-interface.c
71878 @@ -634,7 +634,7 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
71879
71880 /* set broadcast sequence number */
71881 bcast_packet->seqno =
71882 - htonl(atomic_inc_return(&bat_priv->bcast_seqno));
71883 + htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
71884
71885 add_bcast_packet_to_list(bat_priv, skb, 1);
71886
71887 @@ -828,7 +828,7 @@ struct net_device *softif_create(const char *name)
71888 atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
71889
71890 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
71891 - atomic_set(&bat_priv->bcast_seqno, 1);
71892 + atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
71893 atomic_set(&bat_priv->ttvn, 0);
71894 atomic_set(&bat_priv->tt_local_changes, 0);
71895 atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
71896 diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
71897 index ab8d0fe..ceba3fd 100644
71898 --- a/net/batman-adv/types.h
71899 +++ b/net/batman-adv/types.h
71900 @@ -38,8 +38,8 @@ struct hard_iface {
71901 int16_t if_num;
71902 char if_status;
71903 struct net_device *net_dev;
71904 - atomic_t seqno;
71905 - atomic_t frag_seqno;
71906 + atomic_unchecked_t seqno;
71907 + atomic_unchecked_t frag_seqno;
71908 unsigned char *packet_buff;
71909 int packet_len;
71910 struct kobject *hardif_obj;
71911 @@ -154,7 +154,7 @@ struct bat_priv {
71912 atomic_t orig_interval; /* uint */
71913 atomic_t hop_penalty; /* uint */
71914 atomic_t log_level; /* uint */
71915 - atomic_t bcast_seqno;
71916 + atomic_unchecked_t bcast_seqno;
71917 atomic_t bcast_queue_left;
71918 atomic_t batman_queue_left;
71919 atomic_t ttvn; /* translation table version number */
71920 diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
71921 index 07d1c1d..7e9bea9 100644
71922 --- a/net/batman-adv/unicast.c
71923 +++ b/net/batman-adv/unicast.c
71924 @@ -264,7 +264,7 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
71925 frag1->flags = UNI_FRAG_HEAD | large_tail;
71926 frag2->flags = large_tail;
71927
71928 - seqno = atomic_add_return(2, &hard_iface->frag_seqno);
71929 + seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
71930 frag1->seqno = htons(seqno - 1);
71931 frag2->seqno = htons(seqno);
71932
71933 diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
71934 index c1c597e..05ebb40 100644
71935 --- a/net/bluetooth/hci_conn.c
71936 +++ b/net/bluetooth/hci_conn.c
71937 @@ -234,7 +234,7 @@ void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16])
71938 memset(&cp, 0, sizeof(cp));
71939
71940 cp.handle = cpu_to_le16(conn->handle);
71941 - memcpy(cp.ltk, ltk, sizeof(ltk));
71942 + memcpy(cp.ltk, ltk, sizeof(cp.ltk));
71943
71944 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
71945 }
71946 diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
71947 index 17b5b1c..826d872 100644
71948 --- a/net/bluetooth/l2cap_core.c
71949 +++ b/net/bluetooth/l2cap_core.c
71950 @@ -2176,8 +2176,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
71951 break;
71952
71953 case L2CAP_CONF_RFC:
71954 - if (olen == sizeof(rfc))
71955 - memcpy(&rfc, (void *)val, olen);
71956 + if (olen != sizeof(rfc))
71957 + break;
71958 +
71959 + memcpy(&rfc, (void *)val, olen);
71960
71961 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
71962 rfc.mode != chan->mode)
71963 @@ -2265,8 +2267,10 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
71964
71965 switch (type) {
71966 case L2CAP_CONF_RFC:
71967 - if (olen == sizeof(rfc))
71968 - memcpy(&rfc, (void *)val, olen);
71969 + if (olen != sizeof(rfc))
71970 + break;
71971 +
71972 + memcpy(&rfc, (void *)val, olen);
71973 goto done;
71974 }
71975 }
71976 diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
71977 index a5f4e57..910ee6d 100644
71978 --- a/net/bridge/br_multicast.c
71979 +++ b/net/bridge/br_multicast.c
71980 @@ -1485,7 +1485,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
71981 nexthdr = ip6h->nexthdr;
71982 offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr);
71983
71984 - if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
71985 + if (nexthdr != IPPROTO_ICMPV6)
71986 return 0;
71987
71988 /* Okay, we found ICMPv6 header */
71989 diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
71990 index 5864cc4..121f3a3 100644
71991 --- a/net/bridge/netfilter/ebtables.c
71992 +++ b/net/bridge/netfilter/ebtables.c
71993 @@ -1513,7 +1513,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
71994 tmp.valid_hooks = t->table->valid_hooks;
71995 }
71996 mutex_unlock(&ebt_mutex);
71997 - if (copy_to_user(user, &tmp, *len) != 0){
71998 + if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
71999 BUGPRINT("c2u Didn't work\n");
72000 ret = -EFAULT;
72001 break;
72002 diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
72003 index a986280..13444a1 100644
72004 --- a/net/caif/caif_socket.c
72005 +++ b/net/caif/caif_socket.c
72006 @@ -48,19 +48,20 @@ static struct dentry *debugfsdir;
72007 #ifdef CONFIG_DEBUG_FS
72008 struct debug_fs_counter {
72009 atomic_t caif_nr_socks;
72010 - atomic_t caif_sock_create;
72011 - atomic_t num_connect_req;
72012 - atomic_t num_connect_resp;
72013 - atomic_t num_connect_fail_resp;
72014 - atomic_t num_disconnect;
72015 - atomic_t num_remote_shutdown_ind;
72016 - atomic_t num_tx_flow_off_ind;
72017 - atomic_t num_tx_flow_on_ind;
72018 - atomic_t num_rx_flow_off;
72019 - atomic_t num_rx_flow_on;
72020 + atomic_unchecked_t caif_sock_create;
72021 + atomic_unchecked_t num_connect_req;
72022 + atomic_unchecked_t num_connect_resp;
72023 + atomic_unchecked_t num_connect_fail_resp;
72024 + atomic_unchecked_t num_disconnect;
72025 + atomic_unchecked_t num_remote_shutdown_ind;
72026 + atomic_unchecked_t num_tx_flow_off_ind;
72027 + atomic_unchecked_t num_tx_flow_on_ind;
72028 + atomic_unchecked_t num_rx_flow_off;
72029 + atomic_unchecked_t num_rx_flow_on;
72030 };
72031 static struct debug_fs_counter cnt;
72032 #define dbfs_atomic_inc(v) atomic_inc_return(v)
72033 +#define dbfs_atomic_inc_unchecked(v) atomic_inc_return_unchecked(v)
72034 #define dbfs_atomic_dec(v) atomic_dec_return(v)
72035 #else
72036 #define dbfs_atomic_inc(v) 0
72037 @@ -161,7 +162,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
72038 atomic_read(&cf_sk->sk.sk_rmem_alloc),
72039 sk_rcvbuf_lowwater(cf_sk));
72040 set_rx_flow_off(cf_sk);
72041 - dbfs_atomic_inc(&cnt.num_rx_flow_off);
72042 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
72043 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
72044 }
72045
72046 @@ -172,7 +173,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
72047 set_rx_flow_off(cf_sk);
72048 if (net_ratelimit())
72049 pr_debug("sending flow OFF due to rmem_schedule\n");
72050 - dbfs_atomic_inc(&cnt.num_rx_flow_off);
72051 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
72052 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
72053 }
72054 skb->dev = NULL;
72055 @@ -233,14 +234,14 @@ static void caif_ctrl_cb(struct cflayer *layr,
72056 switch (flow) {
72057 case CAIF_CTRLCMD_FLOW_ON_IND:
72058 /* OK from modem to start sending again */
72059 - dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
72060 + dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_on_ind);
72061 set_tx_flow_on(cf_sk);
72062 cf_sk->sk.sk_state_change(&cf_sk->sk);
72063 break;
72064
72065 case CAIF_CTRLCMD_FLOW_OFF_IND:
72066 /* Modem asks us to shut up */
72067 - dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
72068 + dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_off_ind);
72069 set_tx_flow_off(cf_sk);
72070 cf_sk->sk.sk_state_change(&cf_sk->sk);
72071 break;
72072 @@ -249,7 +250,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
72073 /* We're now connected */
72074 caif_client_register_refcnt(&cf_sk->layer,
72075 cfsk_hold, cfsk_put);
72076 - dbfs_atomic_inc(&cnt.num_connect_resp);
72077 + dbfs_atomic_inc_unchecked(&cnt.num_connect_resp);
72078 cf_sk->sk.sk_state = CAIF_CONNECTED;
72079 set_tx_flow_on(cf_sk);
72080 cf_sk->sk.sk_state_change(&cf_sk->sk);
72081 @@ -263,7 +264,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
72082
72083 case CAIF_CTRLCMD_INIT_FAIL_RSP:
72084 /* Connect request failed */
72085 - dbfs_atomic_inc(&cnt.num_connect_fail_resp);
72086 + dbfs_atomic_inc_unchecked(&cnt.num_connect_fail_resp);
72087 cf_sk->sk.sk_err = ECONNREFUSED;
72088 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
72089 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
72090 @@ -277,7 +278,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
72091
72092 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
72093 /* Modem has closed this connection, or device is down. */
72094 - dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
72095 + dbfs_atomic_inc_unchecked(&cnt.num_remote_shutdown_ind);
72096 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
72097 cf_sk->sk.sk_err = ECONNRESET;
72098 set_rx_flow_on(cf_sk);
72099 @@ -297,7 +298,7 @@ static void caif_check_flow_release(struct sock *sk)
72100 return;
72101
72102 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
72103 - dbfs_atomic_inc(&cnt.num_rx_flow_on);
72104 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_on);
72105 set_rx_flow_on(cf_sk);
72106 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
72107 }
72108 @@ -854,7 +855,7 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
72109 /*ifindex = id of the interface.*/
72110 cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
72111
72112 - dbfs_atomic_inc(&cnt.num_connect_req);
72113 + dbfs_atomic_inc_unchecked(&cnt.num_connect_req);
72114 cf_sk->layer.receive = caif_sktrecv_cb;
72115
72116 err = caif_connect_client(sock_net(sk), &cf_sk->conn_req,
72117 @@ -943,7 +944,7 @@ static int caif_release(struct socket *sock)
72118 spin_unlock_bh(&sk->sk_receive_queue.lock);
72119 sock->sk = NULL;
72120
72121 - dbfs_atomic_inc(&cnt.num_disconnect);
72122 + dbfs_atomic_inc_unchecked(&cnt.num_disconnect);
72123
72124 WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir));
72125 if (cf_sk->debugfs_socket_dir != NULL)
72126 @@ -1122,7 +1123,7 @@ static int caif_create(struct net *net, struct socket *sock, int protocol,
72127 cf_sk->conn_req.protocol = protocol;
72128 /* Increase the number of sockets created. */
72129 dbfs_atomic_inc(&cnt.caif_nr_socks);
72130 - num = dbfs_atomic_inc(&cnt.caif_sock_create);
72131 + num = dbfs_atomic_inc_unchecked(&cnt.caif_sock_create);
72132 #ifdef CONFIG_DEBUG_FS
72133 if (!IS_ERR(debugfsdir)) {
72134
72135 diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
72136 index 5cf5222..6f704ad 100644
72137 --- a/net/caif/cfctrl.c
72138 +++ b/net/caif/cfctrl.c
72139 @@ -9,6 +9,7 @@
72140 #include <linux/stddef.h>
72141 #include <linux/spinlock.h>
72142 #include <linux/slab.h>
72143 +#include <linux/sched.h>
72144 #include <net/caif/caif_layer.h>
72145 #include <net/caif/cfpkt.h>
72146 #include <net/caif/cfctrl.h>
72147 @@ -42,8 +43,8 @@ struct cflayer *cfctrl_create(void)
72148 memset(&dev_info, 0, sizeof(dev_info));
72149 dev_info.id = 0xff;
72150 cfsrvl_init(&this->serv, 0, &dev_info, false);
72151 - atomic_set(&this->req_seq_no, 1);
72152 - atomic_set(&this->rsp_seq_no, 1);
72153 + atomic_set_unchecked(&this->req_seq_no, 1);
72154 + atomic_set_unchecked(&this->rsp_seq_no, 1);
72155 this->serv.layer.receive = cfctrl_recv;
72156 sprintf(this->serv.layer.name, "ctrl");
72157 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
72158 @@ -129,8 +130,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
72159 struct cfctrl_request_info *req)
72160 {
72161 spin_lock_bh(&ctrl->info_list_lock);
72162 - atomic_inc(&ctrl->req_seq_no);
72163 - req->sequence_no = atomic_read(&ctrl->req_seq_no);
72164 + atomic_inc_unchecked(&ctrl->req_seq_no);
72165 + req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
72166 list_add_tail(&req->list, &ctrl->list);
72167 spin_unlock_bh(&ctrl->info_list_lock);
72168 }
72169 @@ -148,7 +149,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
72170 if (p != first)
72171 pr_warn("Requests are not received in order\n");
72172
72173 - atomic_set(&ctrl->rsp_seq_no,
72174 + atomic_set_unchecked(&ctrl->rsp_seq_no,
72175 p->sequence_no);
72176 list_del(&p->list);
72177 goto out;
72178 diff --git a/net/can/gw.c b/net/can/gw.c
72179 index 3d79b12..8de85fa 100644
72180 --- a/net/can/gw.c
72181 +++ b/net/can/gw.c
72182 @@ -96,7 +96,7 @@ struct cf_mod {
72183 struct {
72184 void (*xor)(struct can_frame *cf, struct cgw_csum_xor *xor);
72185 void (*crc8)(struct can_frame *cf, struct cgw_csum_crc8 *crc8);
72186 - } csumfunc;
72187 + } __no_const csumfunc;
72188 };
72189
72190
72191 diff --git a/net/compat.c b/net/compat.c
72192 index 6def90e..c6992fa 100644
72193 --- a/net/compat.c
72194 +++ b/net/compat.c
72195 @@ -71,9 +71,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
72196 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
72197 __get_user(kmsg->msg_flags, &umsg->msg_flags))
72198 return -EFAULT;
72199 - kmsg->msg_name = compat_ptr(tmp1);
72200 - kmsg->msg_iov = compat_ptr(tmp2);
72201 - kmsg->msg_control = compat_ptr(tmp3);
72202 + kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
72203 + kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
72204 + kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
72205 return 0;
72206 }
72207
72208 @@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
72209
72210 if (kern_msg->msg_namelen) {
72211 if (mode == VERIFY_READ) {
72212 - int err = move_addr_to_kernel(kern_msg->msg_name,
72213 + int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
72214 kern_msg->msg_namelen,
72215 kern_address);
72216 if (err < 0)
72217 @@ -96,7 +96,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
72218 kern_msg->msg_name = NULL;
72219
72220 tot_len = iov_from_user_compat_to_kern(kern_iov,
72221 - (struct compat_iovec __user *)kern_msg->msg_iov,
72222 + (struct compat_iovec __force_user *)kern_msg->msg_iov,
72223 kern_msg->msg_iovlen);
72224 if (tot_len >= 0)
72225 kern_msg->msg_iov = kern_iov;
72226 @@ -116,20 +116,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
72227
72228 #define CMSG_COMPAT_FIRSTHDR(msg) \
72229 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
72230 - (struct compat_cmsghdr __user *)((msg)->msg_control) : \
72231 + (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
72232 (struct compat_cmsghdr __user *)NULL)
72233
72234 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
72235 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
72236 (ucmlen) <= (unsigned long) \
72237 ((mhdr)->msg_controllen - \
72238 - ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
72239 + ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
72240
72241 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
72242 struct compat_cmsghdr __user *cmsg, int cmsg_len)
72243 {
72244 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
72245 - if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
72246 + if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
72247 msg->msg_controllen)
72248 return NULL;
72249 return (struct compat_cmsghdr __user *)ptr;
72250 @@ -221,7 +221,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
72251 {
72252 struct compat_timeval ctv;
72253 struct compat_timespec cts[3];
72254 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
72255 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
72256 struct compat_cmsghdr cmhdr;
72257 int cmlen;
72258
72259 @@ -273,7 +273,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
72260
72261 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
72262 {
72263 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
72264 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
72265 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
72266 int fdnum = scm->fp->count;
72267 struct file **fp = scm->fp->fp;
72268 @@ -370,7 +370,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
72269 return -EFAULT;
72270 old_fs = get_fs();
72271 set_fs(KERNEL_DS);
72272 - err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
72273 + err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
72274 set_fs(old_fs);
72275
72276 return err;
72277 @@ -431,7 +431,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
72278 len = sizeof(ktime);
72279 old_fs = get_fs();
72280 set_fs(KERNEL_DS);
72281 - err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
72282 + err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
72283 set_fs(old_fs);
72284
72285 if (!err) {
72286 @@ -566,7 +566,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
72287 case MCAST_JOIN_GROUP:
72288 case MCAST_LEAVE_GROUP:
72289 {
72290 - struct compat_group_req __user *gr32 = (void *)optval;
72291 + struct compat_group_req __user *gr32 = (void __user *)optval;
72292 struct group_req __user *kgr =
72293 compat_alloc_user_space(sizeof(struct group_req));
72294 u32 interface;
72295 @@ -587,7 +587,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
72296 case MCAST_BLOCK_SOURCE:
72297 case MCAST_UNBLOCK_SOURCE:
72298 {
72299 - struct compat_group_source_req __user *gsr32 = (void *)optval;
72300 + struct compat_group_source_req __user *gsr32 = (void __user *)optval;
72301 struct group_source_req __user *kgsr = compat_alloc_user_space(
72302 sizeof(struct group_source_req));
72303 u32 interface;
72304 @@ -608,7 +608,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
72305 }
72306 case MCAST_MSFILTER:
72307 {
72308 - struct compat_group_filter __user *gf32 = (void *)optval;
72309 + struct compat_group_filter __user *gf32 = (void __user *)optval;
72310 struct group_filter __user *kgf;
72311 u32 interface, fmode, numsrc;
72312
72313 @@ -646,7 +646,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
72314 char __user *optval, int __user *optlen,
72315 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
72316 {
72317 - struct compat_group_filter __user *gf32 = (void *)optval;
72318 + struct compat_group_filter __user *gf32 = (void __user *)optval;
72319 struct group_filter __user *kgf;
72320 int __user *koptlen;
72321 u32 interface, fmode, numsrc;
72322 diff --git a/net/core/datagram.c b/net/core/datagram.c
72323 index 68bbf9f..5ef0d12 100644
72324 --- a/net/core/datagram.c
72325 +++ b/net/core/datagram.c
72326 @@ -285,7 +285,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
72327 }
72328
72329 kfree_skb(skb);
72330 - atomic_inc(&sk->sk_drops);
72331 + atomic_inc_unchecked(&sk->sk_drops);
72332 sk_mem_reclaim_partial(sk);
72333
72334 return err;
72335 diff --git a/net/core/dev.c b/net/core/dev.c
72336 index 5a13edf..a6f2bd2 100644
72337 --- a/net/core/dev.c
72338 +++ b/net/core/dev.c
72339 @@ -1139,10 +1139,14 @@ void dev_load(struct net *net, const char *name)
72340 if (no_module && capable(CAP_NET_ADMIN))
72341 no_module = request_module("netdev-%s", name);
72342 if (no_module && capable(CAP_SYS_MODULE)) {
72343 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
72344 + ___request_module(true, "grsec_modharden_netdev", "%s", name);
72345 +#else
72346 if (!request_module("%s", name))
72347 pr_err("Loading kernel module for a network device "
72348 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
72349 "instead\n", name);
72350 +#endif
72351 }
72352 }
72353 EXPORT_SYMBOL(dev_load);
72354 @@ -1573,7 +1577,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
72355 {
72356 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
72357 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
72358 - atomic_long_inc(&dev->rx_dropped);
72359 + atomic_long_inc_unchecked(&dev->rx_dropped);
72360 kfree_skb(skb);
72361 return NET_RX_DROP;
72362 }
72363 @@ -1583,7 +1587,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
72364 nf_reset(skb);
72365
72366 if (unlikely(!is_skb_forwardable(dev, skb))) {
72367 - atomic_long_inc(&dev->rx_dropped);
72368 + atomic_long_inc_unchecked(&dev->rx_dropped);
72369 kfree_skb(skb);
72370 return NET_RX_DROP;
72371 }
72372 @@ -2036,7 +2040,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
72373
72374 struct dev_gso_cb {
72375 void (*destructor)(struct sk_buff *skb);
72376 -};
72377 +} __no_const;
72378
72379 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
72380
72381 @@ -2970,7 +2974,7 @@ enqueue:
72382
72383 local_irq_restore(flags);
72384
72385 - atomic_long_inc(&skb->dev->rx_dropped);
72386 + atomic_long_inc_unchecked(&skb->dev->rx_dropped);
72387 kfree_skb(skb);
72388 return NET_RX_DROP;
72389 }
72390 @@ -3044,7 +3048,7 @@ int netif_rx_ni(struct sk_buff *skb)
72391 }
72392 EXPORT_SYMBOL(netif_rx_ni);
72393
72394 -static void net_tx_action(struct softirq_action *h)
72395 +static void net_tx_action(void)
72396 {
72397 struct softnet_data *sd = &__get_cpu_var(softnet_data);
72398
72399 @@ -3333,7 +3337,7 @@ ncls:
72400 if (pt_prev) {
72401 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
72402 } else {
72403 - atomic_long_inc(&skb->dev->rx_dropped);
72404 + atomic_long_inc_unchecked(&skb->dev->rx_dropped);
72405 kfree_skb(skb);
72406 /* Jamal, now you will not able to escape explaining
72407 * me how you were going to use this. :-)
72408 @@ -3891,7 +3895,7 @@ void netif_napi_del(struct napi_struct *napi)
72409 }
72410 EXPORT_SYMBOL(netif_napi_del);
72411
72412 -static void net_rx_action(struct softirq_action *h)
72413 +static void net_rx_action(void)
72414 {
72415 struct softnet_data *sd = &__get_cpu_var(softnet_data);
72416 unsigned long time_limit = jiffies + 2;
72417 @@ -5949,7 +5953,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
72418 } else {
72419 netdev_stats_to_stats64(storage, &dev->stats);
72420 }
72421 - storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
72422 + storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
72423 return storage;
72424 }
72425 EXPORT_SYMBOL(dev_get_stats);
72426 diff --git a/net/core/flow.c b/net/core/flow.c
72427 index e318c7e..168b1d0 100644
72428 --- a/net/core/flow.c
72429 +++ b/net/core/flow.c
72430 @@ -61,7 +61,7 @@ struct flow_cache {
72431 struct timer_list rnd_timer;
72432 };
72433
72434 -atomic_t flow_cache_genid = ATOMIC_INIT(0);
72435 +atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
72436 EXPORT_SYMBOL(flow_cache_genid);
72437 static struct flow_cache flow_cache_global;
72438 static struct kmem_cache *flow_cachep __read_mostly;
72439 @@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
72440
72441 static int flow_entry_valid(struct flow_cache_entry *fle)
72442 {
72443 - if (atomic_read(&flow_cache_genid) != fle->genid)
72444 + if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
72445 return 0;
72446 if (fle->object && !fle->object->ops->check(fle->object))
72447 return 0;
72448 @@ -259,7 +259,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
72449 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
72450 fcp->hash_count++;
72451 }
72452 - } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
72453 + } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
72454 flo = fle->object;
72455 if (!flo)
72456 goto ret_object;
72457 @@ -280,7 +280,7 @@ nocache:
72458 }
72459 flo = resolver(net, key, family, dir, flo, ctx);
72460 if (fle) {
72461 - fle->genid = atomic_read(&flow_cache_genid);
72462 + fle->genid = atomic_read_unchecked(&flow_cache_genid);
72463 if (!IS_ERR(flo))
72464 fle->object = flo;
72465 else
72466 diff --git a/net/core/iovec.c b/net/core/iovec.c
72467 index c40f27e..7f49254 100644
72468 --- a/net/core/iovec.c
72469 +++ b/net/core/iovec.c
72470 @@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
72471 if (m->msg_namelen) {
72472 if (mode == VERIFY_READ) {
72473 void __user *namep;
72474 - namep = (void __user __force *) m->msg_name;
72475 + namep = (void __force_user *) m->msg_name;
72476 err = move_addr_to_kernel(namep, m->msg_namelen,
72477 address);
72478 if (err < 0)
72479 @@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
72480 }
72481
72482 size = m->msg_iovlen * sizeof(struct iovec);
72483 - if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
72484 + if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
72485 return -EFAULT;
72486
72487 m->msg_iov = iov;
72488 diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
72489 index 9083e82..1673203 100644
72490 --- a/net/core/rtnetlink.c
72491 +++ b/net/core/rtnetlink.c
72492 @@ -57,7 +57,7 @@ struct rtnl_link {
72493 rtnl_doit_func doit;
72494 rtnl_dumpit_func dumpit;
72495 rtnl_calcit_func calcit;
72496 -};
72497 +} __no_const;
72498
72499 static DEFINE_MUTEX(rtnl_mutex);
72500 static u16 min_ifinfo_dump_size;
72501 diff --git a/net/core/scm.c b/net/core/scm.c
72502 index ff52ad0..aff1c0f 100644
72503 --- a/net/core/scm.c
72504 +++ b/net/core/scm.c
72505 @@ -220,7 +220,7 @@ EXPORT_SYMBOL(__scm_send);
72506 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
72507 {
72508 struct cmsghdr __user *cm
72509 - = (__force struct cmsghdr __user *)msg->msg_control;
72510 + = (struct cmsghdr __force_user *)msg->msg_control;
72511 struct cmsghdr cmhdr;
72512 int cmlen = CMSG_LEN(len);
72513 int err;
72514 @@ -243,7 +243,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
72515 err = -EFAULT;
72516 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
72517 goto out;
72518 - if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
72519 + if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
72520 goto out;
72521 cmlen = CMSG_SPACE(len);
72522 if (msg->msg_controllen < cmlen)
72523 @@ -259,7 +259,7 @@ EXPORT_SYMBOL(put_cmsg);
72524 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
72525 {
72526 struct cmsghdr __user *cm
72527 - = (__force struct cmsghdr __user*)msg->msg_control;
72528 + = (struct cmsghdr __force_user *)msg->msg_control;
72529
72530 int fdmax = 0;
72531 int fdnum = scm->fp->count;
72532 @@ -279,7 +279,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
72533 if (fdnum < fdmax)
72534 fdmax = fdnum;
72535
72536 - for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
72537 + for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
72538 i++, cmfptr++)
72539 {
72540 int new_fd;
72541 diff --git a/net/core/sock.c b/net/core/sock.c
72542 index b23f174..b9a0d26 100644
72543 --- a/net/core/sock.c
72544 +++ b/net/core/sock.c
72545 @@ -289,7 +289,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
72546 struct sk_buff_head *list = &sk->sk_receive_queue;
72547
72548 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
72549 - atomic_inc(&sk->sk_drops);
72550 + atomic_inc_unchecked(&sk->sk_drops);
72551 trace_sock_rcvqueue_full(sk, skb);
72552 return -ENOMEM;
72553 }
72554 @@ -299,7 +299,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
72555 return err;
72556
72557 if (!sk_rmem_schedule(sk, skb->truesize)) {
72558 - atomic_inc(&sk->sk_drops);
72559 + atomic_inc_unchecked(&sk->sk_drops);
72560 return -ENOBUFS;
72561 }
72562
72563 @@ -319,7 +319,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
72564 skb_dst_force(skb);
72565
72566 spin_lock_irqsave(&list->lock, flags);
72567 - skb->dropcount = atomic_read(&sk->sk_drops);
72568 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
72569 __skb_queue_tail(list, skb);
72570 spin_unlock_irqrestore(&list->lock, flags);
72571
72572 @@ -339,7 +339,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
72573 skb->dev = NULL;
72574
72575 if (sk_rcvqueues_full(sk, skb)) {
72576 - atomic_inc(&sk->sk_drops);
72577 + atomic_inc_unchecked(&sk->sk_drops);
72578 goto discard_and_relse;
72579 }
72580 if (nested)
72581 @@ -357,7 +357,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
72582 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
72583 } else if (sk_add_backlog(sk, skb)) {
72584 bh_unlock_sock(sk);
72585 - atomic_inc(&sk->sk_drops);
72586 + atomic_inc_unchecked(&sk->sk_drops);
72587 goto discard_and_relse;
72588 }
72589
72590 @@ -917,7 +917,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
72591 if (len > sizeof(peercred))
72592 len = sizeof(peercred);
72593 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
72594 - if (copy_to_user(optval, &peercred, len))
72595 + if (len > sizeof(peercred) || copy_to_user(optval, &peercred, len))
72596 return -EFAULT;
72597 goto lenout;
72598 }
72599 @@ -930,7 +930,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
72600 return -ENOTCONN;
72601 if (lv < len)
72602 return -EINVAL;
72603 - if (copy_to_user(optval, address, len))
72604 + if (len > sizeof(address) || copy_to_user(optval, address, len))
72605 return -EFAULT;
72606 goto lenout;
72607 }
72608 @@ -963,7 +963,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
72609
72610 if (len > lv)
72611 len = lv;
72612 - if (copy_to_user(optval, &v, len))
72613 + if (len > sizeof(v) || copy_to_user(optval, &v, len))
72614 return -EFAULT;
72615 lenout:
72616 if (put_user(len, optlen))
72617 @@ -2020,7 +2020,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
72618 */
72619 smp_wmb();
72620 atomic_set(&sk->sk_refcnt, 1);
72621 - atomic_set(&sk->sk_drops, 0);
72622 + atomic_set_unchecked(&sk->sk_drops, 0);
72623 }
72624 EXPORT_SYMBOL(sock_init_data);
72625
72626 diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
72627 index 02e75d1..9a57a7c 100644
72628 --- a/net/decnet/sysctl_net_decnet.c
72629 +++ b/net/decnet/sysctl_net_decnet.c
72630 @@ -174,7 +174,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
72631
72632 if (len > *lenp) len = *lenp;
72633
72634 - if (copy_to_user(buffer, addr, len))
72635 + if (len > sizeof addr || copy_to_user(buffer, addr, len))
72636 return -EFAULT;
72637
72638 *lenp = len;
72639 @@ -237,7 +237,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
72640
72641 if (len > *lenp) len = *lenp;
72642
72643 - if (copy_to_user(buffer, devname, len))
72644 + if (len > sizeof devname || copy_to_user(buffer, devname, len))
72645 return -EFAULT;
72646
72647 *lenp = len;
72648 diff --git a/net/econet/Kconfig b/net/econet/Kconfig
72649 index 39a2d29..f39c0fe 100644
72650 --- a/net/econet/Kconfig
72651 +++ b/net/econet/Kconfig
72652 @@ -4,7 +4,7 @@
72653
72654 config ECONET
72655 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
72656 - depends on EXPERIMENTAL && INET
72657 + depends on EXPERIMENTAL && INET && BROKEN
72658 ---help---
72659 Econet is a fairly old and slow networking protocol mainly used by
72660 Acorn computers to access file and print servers. It uses native
72661 diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
72662 index 92fc5f6..b790d91 100644
72663 --- a/net/ipv4/fib_frontend.c
72664 +++ b/net/ipv4/fib_frontend.c
72665 @@ -970,12 +970,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
72666 #ifdef CONFIG_IP_ROUTE_MULTIPATH
72667 fib_sync_up(dev);
72668 #endif
72669 - atomic_inc(&net->ipv4.dev_addr_genid);
72670 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
72671 rt_cache_flush(dev_net(dev), -1);
72672 break;
72673 case NETDEV_DOWN:
72674 fib_del_ifaddr(ifa, NULL);
72675 - atomic_inc(&net->ipv4.dev_addr_genid);
72676 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
72677 if (ifa->ifa_dev->ifa_list == NULL) {
72678 /* Last address was deleted from this interface.
72679 * Disable IP.
72680 @@ -1011,7 +1011,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
72681 #ifdef CONFIG_IP_ROUTE_MULTIPATH
72682 fib_sync_up(dev);
72683 #endif
72684 - atomic_inc(&net->ipv4.dev_addr_genid);
72685 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
72686 rt_cache_flush(dev_net(dev), -1);
72687 break;
72688 case NETDEV_DOWN:
72689 diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
72690 index 80106d8..232e898 100644
72691 --- a/net/ipv4/fib_semantics.c
72692 +++ b/net/ipv4/fib_semantics.c
72693 @@ -699,7 +699,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
72694 nh->nh_saddr = inet_select_addr(nh->nh_dev,
72695 nh->nh_gw,
72696 nh->nh_parent->fib_scope);
72697 - nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
72698 + nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
72699
72700 return nh->nh_saddr;
72701 }
72702 diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
72703 index ccee270..db23c3c 100644
72704 --- a/net/ipv4/inet_diag.c
72705 +++ b/net/ipv4/inet_diag.c
72706 @@ -114,8 +114,14 @@ static int inet_csk_diag_fill(struct sock *sk,
72707 r->idiag_retrans = 0;
72708
72709 r->id.idiag_if = sk->sk_bound_dev_if;
72710 +
72711 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72712 + r->id.idiag_cookie[0] = 0;
72713 + r->id.idiag_cookie[1] = 0;
72714 +#else
72715 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
72716 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
72717 +#endif
72718
72719 r->id.idiag_sport = inet->inet_sport;
72720 r->id.idiag_dport = inet->inet_dport;
72721 @@ -210,8 +216,15 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
72722 r->idiag_family = tw->tw_family;
72723 r->idiag_retrans = 0;
72724 r->id.idiag_if = tw->tw_bound_dev_if;
72725 +
72726 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72727 + r->id.idiag_cookie[0] = 0;
72728 + r->id.idiag_cookie[1] = 0;
72729 +#else
72730 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
72731 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
72732 +#endif
72733 +
72734 r->id.idiag_sport = tw->tw_sport;
72735 r->id.idiag_dport = tw->tw_dport;
72736 r->id.idiag_src[0] = tw->tw_rcv_saddr;
72737 @@ -294,12 +307,14 @@ static int inet_diag_get_exact(struct sk_buff *in_skb,
72738 if (sk == NULL)
72739 goto unlock;
72740
72741 +#ifndef CONFIG_GRKERNSEC_HIDESYM
72742 err = -ESTALE;
72743 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
72744 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
72745 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
72746 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
72747 goto out;
72748 +#endif
72749
72750 err = -ENOMEM;
72751 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
72752 @@ -589,8 +604,14 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
72753 r->idiag_retrans = req->retrans;
72754
72755 r->id.idiag_if = sk->sk_bound_dev_if;
72756 +
72757 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72758 + r->id.idiag_cookie[0] = 0;
72759 + r->id.idiag_cookie[1] = 0;
72760 +#else
72761 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
72762 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
72763 +#endif
72764
72765 tmo = req->expires - jiffies;
72766 if (tmo < 0)
72767 diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
72768 index 984ec65..97ac518 100644
72769 --- a/net/ipv4/inet_hashtables.c
72770 +++ b/net/ipv4/inet_hashtables.c
72771 @@ -18,12 +18,15 @@
72772 #include <linux/sched.h>
72773 #include <linux/slab.h>
72774 #include <linux/wait.h>
72775 +#include <linux/security.h>
72776
72777 #include <net/inet_connection_sock.h>
72778 #include <net/inet_hashtables.h>
72779 #include <net/secure_seq.h>
72780 #include <net/ip.h>
72781
72782 +extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
72783 +
72784 /*
72785 * Allocate and initialize a new local port bind bucket.
72786 * The bindhash mutex for snum's hash chain must be held here.
72787 @@ -530,6 +533,8 @@ ok:
72788 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
72789 spin_unlock(&head->lock);
72790
72791 + gr_update_task_in_ip_table(current, inet_sk(sk));
72792 +
72793 if (tw) {
72794 inet_twsk_deschedule(tw, death_row);
72795 while (twrefcnt) {
72796 diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
72797 index 86f13c67..59a35b5 100644
72798 --- a/net/ipv4/inetpeer.c
72799 +++ b/net/ipv4/inetpeer.c
72800 @@ -436,8 +436,8 @@ relookup:
72801 if (p) {
72802 p->daddr = *daddr;
72803 atomic_set(&p->refcnt, 1);
72804 - atomic_set(&p->rid, 0);
72805 - atomic_set(&p->ip_id_count,
72806 + atomic_set_unchecked(&p->rid, 0);
72807 + atomic_set_unchecked(&p->ip_id_count,
72808 (daddr->family == AF_INET) ?
72809 secure_ip_id(daddr->addr.a4) :
72810 secure_ipv6_id(daddr->addr.a6));
72811 diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
72812 index fdaabf2..0ec3205 100644
72813 --- a/net/ipv4/ip_fragment.c
72814 +++ b/net/ipv4/ip_fragment.c
72815 @@ -316,7 +316,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
72816 return 0;
72817
72818 start = qp->rid;
72819 - end = atomic_inc_return(&peer->rid);
72820 + end = atomic_inc_return_unchecked(&peer->rid);
72821 qp->rid = end;
72822
72823 rc = qp->q.fragments && (end - start) > max;
72824 diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
72825 index 09ff51b..d3968eb 100644
72826 --- a/net/ipv4/ip_sockglue.c
72827 +++ b/net/ipv4/ip_sockglue.c
72828 @@ -1111,7 +1111,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
72829 len = min_t(unsigned int, len, opt->optlen);
72830 if (put_user(len, optlen))
72831 return -EFAULT;
72832 - if (copy_to_user(optval, opt->__data, len))
72833 + if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
72834 + copy_to_user(optval, opt->__data, len))
72835 return -EFAULT;
72836 return 0;
72837 }
72838 @@ -1239,7 +1240,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
72839 if (sk->sk_type != SOCK_STREAM)
72840 return -ENOPROTOOPT;
72841
72842 - msg.msg_control = optval;
72843 + msg.msg_control = (void __force_kernel *)optval;
72844 msg.msg_controllen = len;
72845 msg.msg_flags = flags;
72846
72847 diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
72848 index 99ec116..c5628fe 100644
72849 --- a/net/ipv4/ipconfig.c
72850 +++ b/net/ipv4/ipconfig.c
72851 @@ -318,7 +318,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
72852
72853 mm_segment_t oldfs = get_fs();
72854 set_fs(get_ds());
72855 - res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
72856 + res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
72857 set_fs(oldfs);
72858 return res;
72859 }
72860 @@ -329,7 +329,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
72861
72862 mm_segment_t oldfs = get_fs();
72863 set_fs(get_ds());
72864 - res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
72865 + res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
72866 set_fs(oldfs);
72867 return res;
72868 }
72869 @@ -340,7 +340,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
72870
72871 mm_segment_t oldfs = get_fs();
72872 set_fs(get_ds());
72873 - res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
72874 + res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
72875 set_fs(oldfs);
72876 return res;
72877 }
72878 diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
72879 index 2133c30..5c4b40b 100644
72880 --- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
72881 +++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
72882 @@ -399,7 +399,7 @@ static unsigned char asn1_octets_decode(struct asn1_ctx *ctx,
72883
72884 *len = 0;
72885
72886 - *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
72887 + *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
72888 if (*octets == NULL)
72889 return 0;
72890
72891 diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
72892 index 43d4c3b..1914409 100644
72893 --- a/net/ipv4/ping.c
72894 +++ b/net/ipv4/ping.c
72895 @@ -836,7 +836,7 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f,
72896 sk_rmem_alloc_get(sp),
72897 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
72898 atomic_read(&sp->sk_refcnt), sp,
72899 - atomic_read(&sp->sk_drops), len);
72900 + atomic_read_unchecked(&sp->sk_drops), len);
72901 }
72902
72903 static int ping_seq_show(struct seq_file *seq, void *v)
72904 diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
72905 index 007e2eb..85a18a0 100644
72906 --- a/net/ipv4/raw.c
72907 +++ b/net/ipv4/raw.c
72908 @@ -303,7 +303,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
72909 int raw_rcv(struct sock *sk, struct sk_buff *skb)
72910 {
72911 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
72912 - atomic_inc(&sk->sk_drops);
72913 + atomic_inc_unchecked(&sk->sk_drops);
72914 kfree_skb(skb);
72915 return NET_RX_DROP;
72916 }
72917 @@ -738,16 +738,20 @@ static int raw_init(struct sock *sk)
72918
72919 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
72920 {
72921 + struct icmp_filter filter;
72922 +
72923 if (optlen > sizeof(struct icmp_filter))
72924 optlen = sizeof(struct icmp_filter);
72925 - if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
72926 + if (copy_from_user(&filter, optval, optlen))
72927 return -EFAULT;
72928 + raw_sk(sk)->filter = filter;
72929 return 0;
72930 }
72931
72932 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
72933 {
72934 int len, ret = -EFAULT;
72935 + struct icmp_filter filter;
72936
72937 if (get_user(len, optlen))
72938 goto out;
72939 @@ -757,8 +761,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
72940 if (len > sizeof(struct icmp_filter))
72941 len = sizeof(struct icmp_filter);
72942 ret = -EFAULT;
72943 - if (put_user(len, optlen) ||
72944 - copy_to_user(optval, &raw_sk(sk)->filter, len))
72945 + filter = raw_sk(sk)->filter;
72946 + if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
72947 goto out;
72948 ret = 0;
72949 out: return ret;
72950 @@ -986,7 +990,13 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
72951 sk_wmem_alloc_get(sp),
72952 sk_rmem_alloc_get(sp),
72953 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
72954 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
72955 + atomic_read(&sp->sk_refcnt),
72956 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72957 + NULL,
72958 +#else
72959 + sp,
72960 +#endif
72961 + atomic_read_unchecked(&sp->sk_drops));
72962 }
72963
72964 static int raw_seq_show(struct seq_file *seq, void *v)
72965 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
72966 index 94cdbc5..0cb0063 100644
72967 --- a/net/ipv4/route.c
72968 +++ b/net/ipv4/route.c
72969 @@ -313,7 +313,7 @@ static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
72970
72971 static inline int rt_genid(struct net *net)
72972 {
72973 - return atomic_read(&net->ipv4.rt_genid);
72974 + return atomic_read_unchecked(&net->ipv4.rt_genid);
72975 }
72976
72977 #ifdef CONFIG_PROC_FS
72978 @@ -937,7 +937,7 @@ static void rt_cache_invalidate(struct net *net)
72979 unsigned char shuffle;
72980
72981 get_random_bytes(&shuffle, sizeof(shuffle));
72982 - atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
72983 + atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
72984 redirect_genid++;
72985 }
72986
72987 @@ -3022,7 +3022,7 @@ static int rt_fill_info(struct net *net,
72988 error = rt->dst.error;
72989 if (peer) {
72990 inet_peer_refcheck(rt->peer);
72991 - id = atomic_read(&peer->ip_id_count) & 0xffff;
72992 + id = atomic_read_unchecked(&peer->ip_id_count) & 0xffff;
72993 if (peer->tcp_ts_stamp) {
72994 ts = peer->tcp_ts;
72995 tsage = get_seconds() - peer->tcp_ts_stamp;
72996 diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
72997 index a9db4b1..3c03301 100644
72998 --- a/net/ipv4/tcp_ipv4.c
72999 +++ b/net/ipv4/tcp_ipv4.c
73000 @@ -87,6 +87,9 @@ int sysctl_tcp_tw_reuse __read_mostly;
73001 int sysctl_tcp_low_latency __read_mostly;
73002 EXPORT_SYMBOL(sysctl_tcp_low_latency);
73003
73004 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73005 +extern int grsec_enable_blackhole;
73006 +#endif
73007
73008 #ifdef CONFIG_TCP_MD5SIG
73009 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
73010 @@ -1627,6 +1630,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
73011 return 0;
73012
73013 reset:
73014 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73015 + if (!grsec_enable_blackhole)
73016 +#endif
73017 tcp_v4_send_reset(rsk, skb);
73018 discard:
73019 kfree_skb(skb);
73020 @@ -1689,12 +1695,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
73021 TCP_SKB_CB(skb)->sacked = 0;
73022
73023 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
73024 - if (!sk)
73025 + if (!sk) {
73026 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73027 + ret = 1;
73028 +#endif
73029 goto no_tcp_socket;
73030 -
73031 + }
73032 process:
73033 - if (sk->sk_state == TCP_TIME_WAIT)
73034 + if (sk->sk_state == TCP_TIME_WAIT) {
73035 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73036 + ret = 2;
73037 +#endif
73038 goto do_time_wait;
73039 + }
73040
73041 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
73042 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
73043 @@ -1744,6 +1757,10 @@ no_tcp_socket:
73044 bad_packet:
73045 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
73046 } else {
73047 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73048 + if (!grsec_enable_blackhole || (ret == 1 &&
73049 + (skb->dev->flags & IFF_LOOPBACK)))
73050 +#endif
73051 tcp_v4_send_reset(NULL, skb);
73052 }
73053
73054 @@ -2404,7 +2421,11 @@ static void get_openreq4(const struct sock *sk, const struct request_sock *req,
73055 0, /* non standard timer */
73056 0, /* open_requests have no inode */
73057 atomic_read(&sk->sk_refcnt),
73058 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73059 + NULL,
73060 +#else
73061 req,
73062 +#endif
73063 len);
73064 }
73065
73066 @@ -2454,7 +2475,12 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
73067 sock_i_uid(sk),
73068 icsk->icsk_probes_out,
73069 sock_i_ino(sk),
73070 - atomic_read(&sk->sk_refcnt), sk,
73071 + atomic_read(&sk->sk_refcnt),
73072 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73073 + NULL,
73074 +#else
73075 + sk,
73076 +#endif
73077 jiffies_to_clock_t(icsk->icsk_rto),
73078 jiffies_to_clock_t(icsk->icsk_ack.ato),
73079 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
73080 @@ -2482,7 +2508,13 @@ static void get_timewait4_sock(const struct inet_timewait_sock *tw,
73081 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
73082 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
73083 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
73084 - atomic_read(&tw->tw_refcnt), tw, len);
73085 + atomic_read(&tw->tw_refcnt),
73086 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73087 + NULL,
73088 +#else
73089 + tw,
73090 +#endif
73091 + len);
73092 }
73093
73094 #define TMPSZ 150
73095 diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
73096 index 66363b6..b0654a3 100644
73097 --- a/net/ipv4/tcp_minisocks.c
73098 +++ b/net/ipv4/tcp_minisocks.c
73099 @@ -27,6 +27,10 @@
73100 #include <net/inet_common.h>
73101 #include <net/xfrm.h>
73102
73103 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73104 +extern int grsec_enable_blackhole;
73105 +#endif
73106 +
73107 int sysctl_tcp_syncookies __read_mostly = 1;
73108 EXPORT_SYMBOL(sysctl_tcp_syncookies);
73109
73110 @@ -751,6 +755,10 @@ listen_overflow:
73111
73112 embryonic_reset:
73113 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
73114 +
73115 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73116 + if (!grsec_enable_blackhole)
73117 +#endif
73118 if (!(flg & TCP_FLAG_RST))
73119 req->rsk_ops->send_reset(sk, skb);
73120
73121 diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
73122 index 85ee7eb..53277ab 100644
73123 --- a/net/ipv4/tcp_probe.c
73124 +++ b/net/ipv4/tcp_probe.c
73125 @@ -202,7 +202,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
73126 if (cnt + width >= len)
73127 break;
73128
73129 - if (copy_to_user(buf + cnt, tbuf, width))
73130 + if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
73131 return -EFAULT;
73132 cnt += width;
73133 }
73134 diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
73135 index 2e0f0af..e2948bf 100644
73136 --- a/net/ipv4/tcp_timer.c
73137 +++ b/net/ipv4/tcp_timer.c
73138 @@ -22,6 +22,10 @@
73139 #include <linux/gfp.h>
73140 #include <net/tcp.h>
73141
73142 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73143 +extern int grsec_lastack_retries;
73144 +#endif
73145 +
73146 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
73147 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
73148 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
73149 @@ -199,6 +203,13 @@ static int tcp_write_timeout(struct sock *sk)
73150 }
73151 }
73152
73153 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73154 + if ((sk->sk_state == TCP_LAST_ACK) &&
73155 + (grsec_lastack_retries > 0) &&
73156 + (grsec_lastack_retries < retry_until))
73157 + retry_until = grsec_lastack_retries;
73158 +#endif
73159 +
73160 if (retransmits_timed_out(sk, retry_until,
73161 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
73162 /* Has it gone just too far? */
73163 diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
73164 index 5a65eea..bd913a1 100644
73165 --- a/net/ipv4/udp.c
73166 +++ b/net/ipv4/udp.c
73167 @@ -86,6 +86,7 @@
73168 #include <linux/types.h>
73169 #include <linux/fcntl.h>
73170 #include <linux/module.h>
73171 +#include <linux/security.h>
73172 #include <linux/socket.h>
73173 #include <linux/sockios.h>
73174 #include <linux/igmp.h>
73175 @@ -108,6 +109,10 @@
73176 #include <trace/events/udp.h>
73177 #include "udp_impl.h"
73178
73179 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73180 +extern int grsec_enable_blackhole;
73181 +#endif
73182 +
73183 struct udp_table udp_table __read_mostly;
73184 EXPORT_SYMBOL(udp_table);
73185
73186 @@ -565,6 +570,9 @@ found:
73187 return s;
73188 }
73189
73190 +extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
73191 +extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
73192 +
73193 /*
73194 * This routine is called by the ICMP module when it gets some
73195 * sort of error condition. If err < 0 then the socket should
73196 @@ -856,9 +864,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
73197 dport = usin->sin_port;
73198 if (dport == 0)
73199 return -EINVAL;
73200 +
73201 + err = gr_search_udp_sendmsg(sk, usin);
73202 + if (err)
73203 + return err;
73204 } else {
73205 if (sk->sk_state != TCP_ESTABLISHED)
73206 return -EDESTADDRREQ;
73207 +
73208 + err = gr_search_udp_sendmsg(sk, NULL);
73209 + if (err)
73210 + return err;
73211 +
73212 daddr = inet->inet_daddr;
73213 dport = inet->inet_dport;
73214 /* Open fast path for connected socket.
73215 @@ -1099,7 +1116,7 @@ static unsigned int first_packet_length(struct sock *sk)
73216 udp_lib_checksum_complete(skb)) {
73217 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
73218 IS_UDPLITE(sk));
73219 - atomic_inc(&sk->sk_drops);
73220 + atomic_inc_unchecked(&sk->sk_drops);
73221 __skb_unlink(skb, rcvq);
73222 __skb_queue_tail(&list_kill, skb);
73223 }
73224 @@ -1185,6 +1202,10 @@ try_again:
73225 if (!skb)
73226 goto out;
73227
73228 + err = gr_search_udp_recvmsg(sk, skb);
73229 + if (err)
73230 + goto out_free;
73231 +
73232 ulen = skb->len - sizeof(struct udphdr);
73233 copied = len;
73234 if (copied > ulen)
73235 @@ -1487,7 +1508,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
73236
73237 drop:
73238 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
73239 - atomic_inc(&sk->sk_drops);
73240 + atomic_inc_unchecked(&sk->sk_drops);
73241 kfree_skb(skb);
73242 return -1;
73243 }
73244 @@ -1506,7 +1527,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
73245 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
73246
73247 if (!skb1) {
73248 - atomic_inc(&sk->sk_drops);
73249 + atomic_inc_unchecked(&sk->sk_drops);
73250 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
73251 IS_UDPLITE(sk));
73252 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
73253 @@ -1675,6 +1696,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
73254 goto csum_error;
73255
73256 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
73257 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73258 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
73259 +#endif
73260 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
73261
73262 /*
73263 @@ -2098,8 +2122,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
73264 sk_wmem_alloc_get(sp),
73265 sk_rmem_alloc_get(sp),
73266 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
73267 - atomic_read(&sp->sk_refcnt), sp,
73268 - atomic_read(&sp->sk_drops), len);
73269 + atomic_read(&sp->sk_refcnt),
73270 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73271 + NULL,
73272 +#else
73273 + sp,
73274 +#endif
73275 + atomic_read_unchecked(&sp->sk_drops), len);
73276 }
73277
73278 int udp4_seq_show(struct seq_file *seq, void *v)
73279 diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
73280 index 36806de..b86f74c 100644
73281 --- a/net/ipv6/addrconf.c
73282 +++ b/net/ipv6/addrconf.c
73283 @@ -2149,7 +2149,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
73284 p.iph.ihl = 5;
73285 p.iph.protocol = IPPROTO_IPV6;
73286 p.iph.ttl = 64;
73287 - ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
73288 + ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
73289
73290 if (ops->ndo_do_ioctl) {
73291 mm_segment_t oldfs = get_fs();
73292 diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
73293 index 1567fb1..29af910 100644
73294 --- a/net/ipv6/inet6_connection_sock.c
73295 +++ b/net/ipv6/inet6_connection_sock.c
73296 @@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
73297 #ifdef CONFIG_XFRM
73298 {
73299 struct rt6_info *rt = (struct rt6_info *)dst;
73300 - rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
73301 + rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
73302 }
73303 #endif
73304 }
73305 @@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
73306 #ifdef CONFIG_XFRM
73307 if (dst) {
73308 struct rt6_info *rt = (struct rt6_info *)dst;
73309 - if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
73310 + if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
73311 __sk_dst_reset(sk);
73312 dst = NULL;
73313 }
73314 diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
73315 index 26cb08c..8af9877 100644
73316 --- a/net/ipv6/ipv6_sockglue.c
73317 +++ b/net/ipv6/ipv6_sockglue.c
73318 @@ -960,7 +960,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
73319 if (sk->sk_type != SOCK_STREAM)
73320 return -ENOPROTOOPT;
73321
73322 - msg.msg_control = optval;
73323 + msg.msg_control = (void __force_kernel *)optval;
73324 msg.msg_controllen = len;
73325 msg.msg_flags = flags;
73326
73327 diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
73328 index 331af3b..7789844 100644
73329 --- a/net/ipv6/raw.c
73330 +++ b/net/ipv6/raw.c
73331 @@ -377,7 +377,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
73332 {
73333 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
73334 skb_checksum_complete(skb)) {
73335 - atomic_inc(&sk->sk_drops);
73336 + atomic_inc_unchecked(&sk->sk_drops);
73337 kfree_skb(skb);
73338 return NET_RX_DROP;
73339 }
73340 @@ -404,7 +404,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
73341 struct raw6_sock *rp = raw6_sk(sk);
73342
73343 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
73344 - atomic_inc(&sk->sk_drops);
73345 + atomic_inc_unchecked(&sk->sk_drops);
73346 kfree_skb(skb);
73347 return NET_RX_DROP;
73348 }
73349 @@ -428,7 +428,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
73350
73351 if (inet->hdrincl) {
73352 if (skb_checksum_complete(skb)) {
73353 - atomic_inc(&sk->sk_drops);
73354 + atomic_inc_unchecked(&sk->sk_drops);
73355 kfree_skb(skb);
73356 return NET_RX_DROP;
73357 }
73358 @@ -601,7 +601,7 @@ out:
73359 return err;
73360 }
73361
73362 -static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
73363 +static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
73364 struct flowi6 *fl6, struct dst_entry **dstp,
73365 unsigned int flags)
73366 {
73367 @@ -909,12 +909,15 @@ do_confirm:
73368 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
73369 char __user *optval, int optlen)
73370 {
73371 + struct icmp6_filter filter;
73372 +
73373 switch (optname) {
73374 case ICMPV6_FILTER:
73375 if (optlen > sizeof(struct icmp6_filter))
73376 optlen = sizeof(struct icmp6_filter);
73377 - if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
73378 + if (copy_from_user(&filter, optval, optlen))
73379 return -EFAULT;
73380 + raw6_sk(sk)->filter = filter;
73381 return 0;
73382 default:
73383 return -ENOPROTOOPT;
73384 @@ -927,6 +930,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
73385 char __user *optval, int __user *optlen)
73386 {
73387 int len;
73388 + struct icmp6_filter filter;
73389
73390 switch (optname) {
73391 case ICMPV6_FILTER:
73392 @@ -938,7 +942,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
73393 len = sizeof(struct icmp6_filter);
73394 if (put_user(len, optlen))
73395 return -EFAULT;
73396 - if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
73397 + filter = raw6_sk(sk)->filter;
73398 + if (len > sizeof filter || copy_to_user(optval, &filter, len))
73399 return -EFAULT;
73400 return 0;
73401 default:
73402 @@ -1245,7 +1250,13 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
73403 0, 0L, 0,
73404 sock_i_uid(sp), 0,
73405 sock_i_ino(sp),
73406 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
73407 + atomic_read(&sp->sk_refcnt),
73408 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73409 + NULL,
73410 +#else
73411 + sp,
73412 +#endif
73413 + atomic_read_unchecked(&sp->sk_drops));
73414 }
73415
73416 static int raw6_seq_show(struct seq_file *seq, void *v)
73417 diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
73418 index 2dea4bb..dca8ac5 100644
73419 --- a/net/ipv6/tcp_ipv6.c
73420 +++ b/net/ipv6/tcp_ipv6.c
73421 @@ -93,6 +93,10 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
73422 }
73423 #endif
73424
73425 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73426 +extern int grsec_enable_blackhole;
73427 +#endif
73428 +
73429 static void tcp_v6_hash(struct sock *sk)
73430 {
73431 if (sk->sk_state != TCP_CLOSE) {
73432 @@ -1651,6 +1655,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
73433 return 0;
73434
73435 reset:
73436 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73437 + if (!grsec_enable_blackhole)
73438 +#endif
73439 tcp_v6_send_reset(sk, skb);
73440 discard:
73441 if (opt_skb)
73442 @@ -1730,12 +1737,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
73443 TCP_SKB_CB(skb)->sacked = 0;
73444
73445 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
73446 - if (!sk)
73447 + if (!sk) {
73448 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73449 + ret = 1;
73450 +#endif
73451 goto no_tcp_socket;
73452 + }
73453
73454 process:
73455 - if (sk->sk_state == TCP_TIME_WAIT)
73456 + if (sk->sk_state == TCP_TIME_WAIT) {
73457 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73458 + ret = 2;
73459 +#endif
73460 goto do_time_wait;
73461 + }
73462
73463 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
73464 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
73465 @@ -1783,6 +1798,10 @@ no_tcp_socket:
73466 bad_packet:
73467 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
73468 } else {
73469 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73470 + if (!grsec_enable_blackhole || (ret == 1 &&
73471 + (skb->dev->flags & IFF_LOOPBACK)))
73472 +#endif
73473 tcp_v6_send_reset(NULL, skb);
73474 }
73475
73476 @@ -2043,7 +2062,13 @@ static void get_openreq6(struct seq_file *seq,
73477 uid,
73478 0, /* non standard timer */
73479 0, /* open_requests have no inode */
73480 - 0, req);
73481 + 0,
73482 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73483 + NULL
73484 +#else
73485 + req
73486 +#endif
73487 + );
73488 }
73489
73490 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
73491 @@ -2093,7 +2118,12 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
73492 sock_i_uid(sp),
73493 icsk->icsk_probes_out,
73494 sock_i_ino(sp),
73495 - atomic_read(&sp->sk_refcnt), sp,
73496 + atomic_read(&sp->sk_refcnt),
73497 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73498 + NULL,
73499 +#else
73500 + sp,
73501 +#endif
73502 jiffies_to_clock_t(icsk->icsk_rto),
73503 jiffies_to_clock_t(icsk->icsk_ack.ato),
73504 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
73505 @@ -2128,7 +2158,13 @@ static void get_timewait6_sock(struct seq_file *seq,
73506 dest->s6_addr32[2], dest->s6_addr32[3], destp,
73507 tw->tw_substate, 0, 0,
73508 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
73509 - atomic_read(&tw->tw_refcnt), tw);
73510 + atomic_read(&tw->tw_refcnt),
73511 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73512 + NULL
73513 +#else
73514 + tw
73515 +#endif
73516 + );
73517 }
73518
73519 static int tcp6_seq_show(struct seq_file *seq, void *v)
73520 diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
73521 index 8c25419..47a51ae 100644
73522 --- a/net/ipv6/udp.c
73523 +++ b/net/ipv6/udp.c
73524 @@ -50,6 +50,10 @@
73525 #include <linux/seq_file.h>
73526 #include "udp_impl.h"
73527
73528 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73529 +extern int grsec_enable_blackhole;
73530 +#endif
73531 +
73532 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
73533 {
73534 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
73535 @@ -549,7 +553,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
73536
73537 return 0;
73538 drop:
73539 - atomic_inc(&sk->sk_drops);
73540 + atomic_inc_unchecked(&sk->sk_drops);
73541 drop_no_sk_drops_inc:
73542 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
73543 kfree_skb(skb);
73544 @@ -625,7 +629,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
73545 continue;
73546 }
73547 drop:
73548 - atomic_inc(&sk->sk_drops);
73549 + atomic_inc_unchecked(&sk->sk_drops);
73550 UDP6_INC_STATS_BH(sock_net(sk),
73551 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
73552 UDP6_INC_STATS_BH(sock_net(sk),
73553 @@ -780,6 +784,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
73554 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
73555 proto == IPPROTO_UDPLITE);
73556
73557 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73558 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
73559 +#endif
73560 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
73561
73562 kfree_skb(skb);
73563 @@ -796,7 +803,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
73564 if (!sock_owned_by_user(sk))
73565 udpv6_queue_rcv_skb(sk, skb);
73566 else if (sk_add_backlog(sk, skb)) {
73567 - atomic_inc(&sk->sk_drops);
73568 + atomic_inc_unchecked(&sk->sk_drops);
73569 bh_unlock_sock(sk);
73570 sock_put(sk);
73571 goto discard;
73572 @@ -1407,8 +1414,13 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
73573 0, 0L, 0,
73574 sock_i_uid(sp), 0,
73575 sock_i_ino(sp),
73576 - atomic_read(&sp->sk_refcnt), sp,
73577 - atomic_read(&sp->sk_drops));
73578 + atomic_read(&sp->sk_refcnt),
73579 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73580 + NULL,
73581 +#else
73582 + sp,
73583 +#endif
73584 + atomic_read_unchecked(&sp->sk_drops));
73585 }
73586
73587 int udp6_seq_show(struct seq_file *seq, void *v)
73588 diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
73589 index 253695d..9481ce8 100644
73590 --- a/net/irda/ircomm/ircomm_tty.c
73591 +++ b/net/irda/ircomm/ircomm_tty.c
73592 @@ -282,16 +282,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
73593 add_wait_queue(&self->open_wait, &wait);
73594
73595 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
73596 - __FILE__,__LINE__, tty->driver->name, self->open_count );
73597 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
73598
73599 /* As far as I can see, we protect open_count - Jean II */
73600 spin_lock_irqsave(&self->spinlock, flags);
73601 if (!tty_hung_up_p(filp)) {
73602 extra_count = 1;
73603 - self->open_count--;
73604 + local_dec(&self->open_count);
73605 }
73606 spin_unlock_irqrestore(&self->spinlock, flags);
73607 - self->blocked_open++;
73608 + local_inc(&self->blocked_open);
73609
73610 while (1) {
73611 if (tty->termios->c_cflag & CBAUD) {
73612 @@ -331,7 +331,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
73613 }
73614
73615 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
73616 - __FILE__,__LINE__, tty->driver->name, self->open_count );
73617 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
73618
73619 schedule();
73620 }
73621 @@ -342,13 +342,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
73622 if (extra_count) {
73623 /* ++ is not atomic, so this should be protected - Jean II */
73624 spin_lock_irqsave(&self->spinlock, flags);
73625 - self->open_count++;
73626 + local_inc(&self->open_count);
73627 spin_unlock_irqrestore(&self->spinlock, flags);
73628 }
73629 - self->blocked_open--;
73630 + local_dec(&self->blocked_open);
73631
73632 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
73633 - __FILE__,__LINE__, tty->driver->name, self->open_count);
73634 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
73635
73636 if (!retval)
73637 self->flags |= ASYNC_NORMAL_ACTIVE;
73638 @@ -417,14 +417,14 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
73639 }
73640 /* ++ is not atomic, so this should be protected - Jean II */
73641 spin_lock_irqsave(&self->spinlock, flags);
73642 - self->open_count++;
73643 + local_inc(&self->open_count);
73644
73645 tty->driver_data = self;
73646 self->tty = tty;
73647 spin_unlock_irqrestore(&self->spinlock, flags);
73648
73649 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
73650 - self->line, self->open_count);
73651 + self->line, local_read(&self->open_count));
73652
73653 /* Not really used by us, but lets do it anyway */
73654 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
73655 @@ -510,7 +510,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
73656 return;
73657 }
73658
73659 - if ((tty->count == 1) && (self->open_count != 1)) {
73660 + if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
73661 /*
73662 * Uh, oh. tty->count is 1, which means that the tty
73663 * structure will be freed. state->count should always
73664 @@ -520,16 +520,16 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
73665 */
73666 IRDA_DEBUG(0, "%s(), bad serial port count; "
73667 "tty->count is 1, state->count is %d\n", __func__ ,
73668 - self->open_count);
73669 - self->open_count = 1;
73670 + local_read(&self->open_count));
73671 + local_set(&self->open_count, 1);
73672 }
73673
73674 - if (--self->open_count < 0) {
73675 + if (local_dec_return(&self->open_count) < 0) {
73676 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
73677 - __func__, self->line, self->open_count);
73678 - self->open_count = 0;
73679 + __func__, self->line, local_read(&self->open_count));
73680 + local_set(&self->open_count, 0);
73681 }
73682 - if (self->open_count) {
73683 + if (local_read(&self->open_count)) {
73684 spin_unlock_irqrestore(&self->spinlock, flags);
73685
73686 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
73687 @@ -561,7 +561,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
73688 tty->closing = 0;
73689 self->tty = NULL;
73690
73691 - if (self->blocked_open) {
73692 + if (local_read(&self->blocked_open)) {
73693 if (self->close_delay)
73694 schedule_timeout_interruptible(self->close_delay);
73695 wake_up_interruptible(&self->open_wait);
73696 @@ -1013,7 +1013,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
73697 spin_lock_irqsave(&self->spinlock, flags);
73698 self->flags &= ~ASYNC_NORMAL_ACTIVE;
73699 self->tty = NULL;
73700 - self->open_count = 0;
73701 + local_set(&self->open_count, 0);
73702 spin_unlock_irqrestore(&self->spinlock, flags);
73703
73704 wake_up_interruptible(&self->open_wait);
73705 @@ -1360,7 +1360,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
73706 seq_putc(m, '\n');
73707
73708 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
73709 - seq_printf(m, "Open count: %d\n", self->open_count);
73710 + seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
73711 seq_printf(m, "Max data size: %d\n", self->max_data_size);
73712 seq_printf(m, "Max header size: %d\n", self->max_header_size);
73713
73714 diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
73715 index 274d150..656a144 100644
73716 --- a/net/iucv/af_iucv.c
73717 +++ b/net/iucv/af_iucv.c
73718 @@ -787,10 +787,10 @@ static int iucv_sock_autobind(struct sock *sk)
73719
73720 write_lock_bh(&iucv_sk_list.lock);
73721
73722 - sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
73723 + sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
73724 while (__iucv_get_sock_by_name(name)) {
73725 sprintf(name, "%08x",
73726 - atomic_inc_return(&iucv_sk_list.autobind_name));
73727 + atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
73728 }
73729
73730 write_unlock_bh(&iucv_sk_list.lock);
73731 diff --git a/net/key/af_key.c b/net/key/af_key.c
73732 index 1e733e9..3d73c9f 100644
73733 --- a/net/key/af_key.c
73734 +++ b/net/key/af_key.c
73735 @@ -3016,10 +3016,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
73736 static u32 get_acqseq(void)
73737 {
73738 u32 res;
73739 - static atomic_t acqseq;
73740 + static atomic_unchecked_t acqseq;
73741
73742 do {
73743 - res = atomic_inc_return(&acqseq);
73744 + res = atomic_inc_return_unchecked(&acqseq);
73745 } while (!res);
73746 return res;
73747 }
73748 diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
73749 index 73495f1..ad51356 100644
73750 --- a/net/mac80211/ieee80211_i.h
73751 +++ b/net/mac80211/ieee80211_i.h
73752 @@ -27,6 +27,7 @@
73753 #include <net/ieee80211_radiotap.h>
73754 #include <net/cfg80211.h>
73755 #include <net/mac80211.h>
73756 +#include <asm/local.h>
73757 #include "key.h"
73758 #include "sta_info.h"
73759
73760 @@ -764,7 +765,7 @@ struct ieee80211_local {
73761 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
73762 spinlock_t queue_stop_reason_lock;
73763
73764 - int open_count;
73765 + local_t open_count;
73766 int monitors, cooked_mntrs;
73767 /* number of interfaces with corresponding FIF_ flags */
73768 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
73769 diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
73770 index 30d7355..e260095 100644
73771 --- a/net/mac80211/iface.c
73772 +++ b/net/mac80211/iface.c
73773 @@ -211,7 +211,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
73774 break;
73775 }
73776
73777 - if (local->open_count == 0) {
73778 + if (local_read(&local->open_count) == 0) {
73779 res = drv_start(local);
73780 if (res)
73781 goto err_del_bss;
73782 @@ -235,7 +235,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
73783 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
73784
73785 if (!is_valid_ether_addr(dev->dev_addr)) {
73786 - if (!local->open_count)
73787 + if (!local_read(&local->open_count))
73788 drv_stop(local);
73789 return -EADDRNOTAVAIL;
73790 }
73791 @@ -327,7 +327,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
73792 mutex_unlock(&local->mtx);
73793
73794 if (coming_up)
73795 - local->open_count++;
73796 + local_inc(&local->open_count);
73797
73798 if (hw_reconf_flags) {
73799 ieee80211_hw_config(local, hw_reconf_flags);
73800 @@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
73801 err_del_interface:
73802 drv_remove_interface(local, &sdata->vif);
73803 err_stop:
73804 - if (!local->open_count)
73805 + if (!local_read(&local->open_count))
73806 drv_stop(local);
73807 err_del_bss:
73808 sdata->bss = NULL;
73809 @@ -472,7 +472,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
73810 }
73811
73812 if (going_down)
73813 - local->open_count--;
73814 + local_dec(&local->open_count);
73815
73816 switch (sdata->vif.type) {
73817 case NL80211_IFTYPE_AP_VLAN:
73818 @@ -531,7 +531,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
73819
73820 ieee80211_recalc_ps(local, -1);
73821
73822 - if (local->open_count == 0) {
73823 + if (local_read(&local->open_count) == 0) {
73824 if (local->ops->napi_poll)
73825 napi_disable(&local->napi);
73826 ieee80211_clear_tx_pending(local);
73827 diff --git a/net/mac80211/main.c b/net/mac80211/main.c
73828 index a7536fd..4039cc0 100644
73829 --- a/net/mac80211/main.c
73830 +++ b/net/mac80211/main.c
73831 @@ -163,7 +163,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
73832 local->hw.conf.power_level = power;
73833 }
73834
73835 - if (changed && local->open_count) {
73836 + if (changed && local_read(&local->open_count)) {
73837 ret = drv_config(local, changed);
73838 /*
73839 * Goal:
73840 diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
73841 index 9ee7164..56c5061 100644
73842 --- a/net/mac80211/pm.c
73843 +++ b/net/mac80211/pm.c
73844 @@ -34,7 +34,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
73845 struct ieee80211_sub_if_data *sdata;
73846 struct sta_info *sta;
73847
73848 - if (!local->open_count)
73849 + if (!local_read(&local->open_count))
73850 goto suspend;
73851
73852 ieee80211_scan_cancel(local);
73853 @@ -72,7 +72,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
73854 cancel_work_sync(&local->dynamic_ps_enable_work);
73855 del_timer_sync(&local->dynamic_ps_timer);
73856
73857 - local->wowlan = wowlan && local->open_count;
73858 + local->wowlan = wowlan && local_read(&local->open_count);
73859 if (local->wowlan) {
73860 int err = drv_suspend(local, wowlan);
73861 if (err < 0) {
73862 @@ -129,7 +129,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
73863 }
73864
73865 /* stop hardware - this must stop RX */
73866 - if (local->open_count)
73867 + if (local_read(&local->open_count))
73868 ieee80211_stop_device(local);
73869
73870 suspend:
73871 diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
73872 index 5a5a776..9600b11 100644
73873 --- a/net/mac80211/rate.c
73874 +++ b/net/mac80211/rate.c
73875 @@ -401,7 +401,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
73876
73877 ASSERT_RTNL();
73878
73879 - if (local->open_count)
73880 + if (local_read(&local->open_count))
73881 return -EBUSY;
73882
73883 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
73884 diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
73885 index c97a065..ff61928 100644
73886 --- a/net/mac80211/rc80211_pid_debugfs.c
73887 +++ b/net/mac80211/rc80211_pid_debugfs.c
73888 @@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
73889
73890 spin_unlock_irqrestore(&events->lock, status);
73891
73892 - if (copy_to_user(buf, pb, p))
73893 + if (p > sizeof(pb) || copy_to_user(buf, pb, p))
73894 return -EFAULT;
73895
73896 return p;
73897 diff --git a/net/mac80211/util.c b/net/mac80211/util.c
73898 index d5230ec..c604b21 100644
73899 --- a/net/mac80211/util.c
73900 +++ b/net/mac80211/util.c
73901 @@ -1000,7 +1000,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
73902 drv_set_coverage_class(local, hw->wiphy->coverage_class);
73903
73904 /* everything else happens only if HW was up & running */
73905 - if (!local->open_count)
73906 + if (!local_read(&local->open_count))
73907 goto wake_up;
73908
73909 /*
73910 diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
73911 index d5597b7..ab6d39c 100644
73912 --- a/net/netfilter/Kconfig
73913 +++ b/net/netfilter/Kconfig
73914 @@ -779,6 +779,16 @@ config NETFILTER_XT_MATCH_ESP
73915
73916 To compile it as a module, choose M here. If unsure, say N.
73917
73918 +config NETFILTER_XT_MATCH_GRADM
73919 + tristate '"gradm" match support'
73920 + depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
73921 + depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
73922 + ---help---
73923 + The gradm match allows to match on grsecurity RBAC being enabled.
73924 + It is useful when iptables rules are applied early on bootup to
73925 + prevent connections to the machine (except from a trusted host)
73926 + while the RBAC system is disabled.
73927 +
73928 config NETFILTER_XT_MATCH_HASHLIMIT
73929 tristate '"hashlimit" match support'
73930 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
73931 diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
73932 index 1a02853..5d8c22e 100644
73933 --- a/net/netfilter/Makefile
73934 +++ b/net/netfilter/Makefile
73935 @@ -81,6 +81,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
73936 obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
73937 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
73938 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
73939 +obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
73940 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
73941 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
73942 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
73943 diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
73944 index 29fa5ba..8debc79 100644
73945 --- a/net/netfilter/ipvs/ip_vs_conn.c
73946 +++ b/net/netfilter/ipvs/ip_vs_conn.c
73947 @@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
73948 /* Increase the refcnt counter of the dest */
73949 atomic_inc(&dest->refcnt);
73950
73951 - conn_flags = atomic_read(&dest->conn_flags);
73952 + conn_flags = atomic_read_unchecked(&dest->conn_flags);
73953 if (cp->protocol != IPPROTO_UDP)
73954 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
73955 /* Bind with the destination and its corresponding transmitter */
73956 @@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
73957 atomic_set(&cp->refcnt, 1);
73958
73959 atomic_set(&cp->n_control, 0);
73960 - atomic_set(&cp->in_pkts, 0);
73961 + atomic_set_unchecked(&cp->in_pkts, 0);
73962
73963 atomic_inc(&ipvs->conn_count);
73964 if (flags & IP_VS_CONN_F_NO_CPORT)
73965 @@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
73966
73967 /* Don't drop the entry if its number of incoming packets is not
73968 located in [0, 8] */
73969 - i = atomic_read(&cp->in_pkts);
73970 + i = atomic_read_unchecked(&cp->in_pkts);
73971 if (i > 8 || i < 0) return 0;
73972
73973 if (!todrop_rate[i]) return 0;
73974 diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
73975 index 093cc32..9209ae1 100644
73976 --- a/net/netfilter/ipvs/ip_vs_core.c
73977 +++ b/net/netfilter/ipvs/ip_vs_core.c
73978 @@ -562,7 +562,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
73979 ret = cp->packet_xmit(skb, cp, pd->pp);
73980 /* do not touch skb anymore */
73981
73982 - atomic_inc(&cp->in_pkts);
73983 + atomic_inc_unchecked(&cp->in_pkts);
73984 ip_vs_conn_put(cp);
73985 return ret;
73986 }
73987 @@ -1611,7 +1611,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
73988 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
73989 pkts = sysctl_sync_threshold(ipvs);
73990 else
73991 - pkts = atomic_add_return(1, &cp->in_pkts);
73992 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
73993
73994 if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
73995 cp->protocol == IPPROTO_SCTP) {
73996 diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
73997 index e1a66cf..0910076 100644
73998 --- a/net/netfilter/ipvs/ip_vs_ctl.c
73999 +++ b/net/netfilter/ipvs/ip_vs_ctl.c
74000 @@ -788,7 +788,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
74001 ip_vs_rs_hash(ipvs, dest);
74002 write_unlock_bh(&ipvs->rs_lock);
74003 }
74004 - atomic_set(&dest->conn_flags, conn_flags);
74005 + atomic_set_unchecked(&dest->conn_flags, conn_flags);
74006
74007 /* bind the service */
74008 if (!dest->svc) {
74009 @@ -2028,7 +2028,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
74010 " %-7s %-6d %-10d %-10d\n",
74011 &dest->addr.in6,
74012 ntohs(dest->port),
74013 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
74014 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
74015 atomic_read(&dest->weight),
74016 atomic_read(&dest->activeconns),
74017 atomic_read(&dest->inactconns));
74018 @@ -2039,7 +2039,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
74019 "%-7s %-6d %-10d %-10d\n",
74020 ntohl(dest->addr.ip),
74021 ntohs(dest->port),
74022 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
74023 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
74024 atomic_read(&dest->weight),
74025 atomic_read(&dest->activeconns),
74026 atomic_read(&dest->inactconns));
74027 @@ -2509,7 +2509,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
74028
74029 entry.addr = dest->addr.ip;
74030 entry.port = dest->port;
74031 - entry.conn_flags = atomic_read(&dest->conn_flags);
74032 + entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
74033 entry.weight = atomic_read(&dest->weight);
74034 entry.u_threshold = dest->u_threshold;
74035 entry.l_threshold = dest->l_threshold;
74036 @@ -3042,7 +3042,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
74037 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
74038
74039 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
74040 - atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
74041 + atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
74042 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
74043 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
74044 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
74045 diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
74046 index 2b6678c0..aaa41fc 100644
74047 --- a/net/netfilter/ipvs/ip_vs_sync.c
74048 +++ b/net/netfilter/ipvs/ip_vs_sync.c
74049 @@ -649,7 +649,7 @@ control:
74050 * i.e only increment in_pkts for Templates.
74051 */
74052 if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
74053 - int pkts = atomic_add_return(1, &cp->in_pkts);
74054 + int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
74055
74056 if (pkts % sysctl_sync_period(ipvs) != 1)
74057 return;
74058 @@ -795,7 +795,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
74059
74060 if (opt)
74061 memcpy(&cp->in_seq, opt, sizeof(*opt));
74062 - atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
74063 + atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
74064 cp->state = state;
74065 cp->old_state = cp->state;
74066 /*
74067 diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
74068 index aa2d720..d8aa111 100644
74069 --- a/net/netfilter/ipvs/ip_vs_xmit.c
74070 +++ b/net/netfilter/ipvs/ip_vs_xmit.c
74071 @@ -1151,7 +1151,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
74072 else
74073 rc = NF_ACCEPT;
74074 /* do not touch skb anymore */
74075 - atomic_inc(&cp->in_pkts);
74076 + atomic_inc_unchecked(&cp->in_pkts);
74077 goto out;
74078 }
74079
74080 @@ -1272,7 +1272,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
74081 else
74082 rc = NF_ACCEPT;
74083 /* do not touch skb anymore */
74084 - atomic_inc(&cp->in_pkts);
74085 + atomic_inc_unchecked(&cp->in_pkts);
74086 goto out;
74087 }
74088
74089 diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
74090 index 66b2c54..c7884e3 100644
74091 --- a/net/netfilter/nfnetlink_log.c
74092 +++ b/net/netfilter/nfnetlink_log.c
74093 @@ -70,7 +70,7 @@ struct nfulnl_instance {
74094 };
74095
74096 static DEFINE_SPINLOCK(instances_lock);
74097 -static atomic_t global_seq;
74098 +static atomic_unchecked_t global_seq;
74099
74100 #define INSTANCE_BUCKETS 16
74101 static struct hlist_head instance_table[INSTANCE_BUCKETS];
74102 @@ -502,7 +502,7 @@ __build_packet_message(struct nfulnl_instance *inst,
74103 /* global sequence number */
74104 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
74105 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
74106 - htonl(atomic_inc_return(&global_seq)));
74107 + htonl(atomic_inc_return_unchecked(&global_seq)));
74108
74109 if (data_len) {
74110 struct nlattr *nla;
74111 diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
74112 new file mode 100644
74113 index 0000000..6905327
74114 --- /dev/null
74115 +++ b/net/netfilter/xt_gradm.c
74116 @@ -0,0 +1,51 @@
74117 +/*
74118 + * gradm match for netfilter
74119 + * Copyright © Zbigniew Krzystolik, 2010
74120 + *
74121 + * This program is free software; you can redistribute it and/or modify
74122 + * it under the terms of the GNU General Public License; either version
74123 + * 2 or 3 as published by the Free Software Foundation.
74124 + */
74125 +#include <linux/module.h>
74126 +#include <linux/moduleparam.h>
74127 +#include <linux/skbuff.h>
74128 +#include <linux/netfilter/x_tables.h>
74129 +#include <linux/grsecurity.h>
74130 +#include <linux/netfilter/xt_gradm.h>
74131 +
74132 +static bool
74133 +gradm_mt(const struct sk_buff *skb, struct xt_action_param *par)
74134 +{
74135 + const struct xt_gradm_mtinfo *info = par->matchinfo;
74136 + bool retval = false;
74137 + if (gr_acl_is_enabled())
74138 + retval = true;
74139 + return retval ^ info->invflags;
74140 +}
74141 +
74142 +static struct xt_match gradm_mt_reg __read_mostly = {
74143 + .name = "gradm",
74144 + .revision = 0,
74145 + .family = NFPROTO_UNSPEC,
74146 + .match = gradm_mt,
74147 + .matchsize = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)),
74148 + .me = THIS_MODULE,
74149 +};
74150 +
74151 +static int __init gradm_mt_init(void)
74152 +{
74153 + return xt_register_match(&gradm_mt_reg);
74154 +}
74155 +
74156 +static void __exit gradm_mt_exit(void)
74157 +{
74158 + xt_unregister_match(&gradm_mt_reg);
74159 +}
74160 +
74161 +module_init(gradm_mt_init);
74162 +module_exit(gradm_mt_exit);
74163 +MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@destrukcja.pl>");
74164 +MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match");
74165 +MODULE_LICENSE("GPL");
74166 +MODULE_ALIAS("ipt_gradm");
74167 +MODULE_ALIAS("ip6t_gradm");
74168 diff --git a/net/netfilter/xt_statistic.c b/net/netfilter/xt_statistic.c
74169 index 4fe4fb4..87a89e5 100644
74170 --- a/net/netfilter/xt_statistic.c
74171 +++ b/net/netfilter/xt_statistic.c
74172 @@ -19,7 +19,7 @@
74173 #include <linux/module.h>
74174
74175 struct xt_statistic_priv {
74176 - atomic_t count;
74177 + atomic_unchecked_t count;
74178 } ____cacheline_aligned_in_smp;
74179
74180 MODULE_LICENSE("GPL");
74181 @@ -42,9 +42,9 @@ statistic_mt(const struct sk_buff *skb, struct xt_action_param *par)
74182 break;
74183 case XT_STATISTIC_MODE_NTH:
74184 do {
74185 - oval = atomic_read(&info->master->count);
74186 + oval = atomic_read_unchecked(&info->master->count);
74187 nval = (oval == info->u.nth.every) ? 0 : oval + 1;
74188 - } while (atomic_cmpxchg(&info->master->count, oval, nval) != oval);
74189 + } while (atomic_cmpxchg_unchecked(&info->master->count, oval, nval) != oval);
74190 if (nval == 0)
74191 ret = !ret;
74192 break;
74193 @@ -64,7 +64,7 @@ static int statistic_mt_check(const struct xt_mtchk_param *par)
74194 info->master = kzalloc(sizeof(*info->master), GFP_KERNEL);
74195 if (info->master == NULL)
74196 return -ENOMEM;
74197 - atomic_set(&info->master->count, info->u.nth.count);
74198 + atomic_set_unchecked(&info->master->count, info->u.nth.count);
74199
74200 return 0;
74201 }
74202 diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
74203 index 1201b6d..bcff8c6 100644
74204 --- a/net/netlink/af_netlink.c
74205 +++ b/net/netlink/af_netlink.c
74206 @@ -742,7 +742,7 @@ static void netlink_overrun(struct sock *sk)
74207 sk->sk_error_report(sk);
74208 }
74209 }
74210 - atomic_inc(&sk->sk_drops);
74211 + atomic_inc_unchecked(&sk->sk_drops);
74212 }
74213
74214 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
74215 @@ -1999,7 +1999,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
74216 sk_wmem_alloc_get(s),
74217 nlk->cb,
74218 atomic_read(&s->sk_refcnt),
74219 - atomic_read(&s->sk_drops),
74220 + atomic_read_unchecked(&s->sk_drops),
74221 sock_i_ino(s)
74222 );
74223
74224 diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
74225 index 732152f..60bb09e 100644
74226 --- a/net/netrom/af_netrom.c
74227 +++ b/net/netrom/af_netrom.c
74228 @@ -839,6 +839,7 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
74229 struct sock *sk = sock->sk;
74230 struct nr_sock *nr = nr_sk(sk);
74231
74232 + memset(sax, 0, sizeof(*sax));
74233 lock_sock(sk);
74234 if (peer != 0) {
74235 if (sk->sk_state != TCP_ESTABLISHED) {
74236 @@ -853,7 +854,6 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
74237 *uaddr_len = sizeof(struct full_sockaddr_ax25);
74238 } else {
74239 sax->fsa_ax25.sax25_family = AF_NETROM;
74240 - sax->fsa_ax25.sax25_ndigis = 0;
74241 sax->fsa_ax25.sax25_call = nr->source_addr;
74242 *uaddr_len = sizeof(struct sockaddr_ax25);
74243 }
74244 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
74245 index d9d4970..d5a6a68 100644
74246 --- a/net/packet/af_packet.c
74247 +++ b/net/packet/af_packet.c
74248 @@ -1675,7 +1675,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
74249
74250 spin_lock(&sk->sk_receive_queue.lock);
74251 po->stats.tp_packets++;
74252 - skb->dropcount = atomic_read(&sk->sk_drops);
74253 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
74254 __skb_queue_tail(&sk->sk_receive_queue, skb);
74255 spin_unlock(&sk->sk_receive_queue.lock);
74256 sk->sk_data_ready(sk, skb->len);
74257 @@ -1684,7 +1684,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
74258 drop_n_acct:
74259 spin_lock(&sk->sk_receive_queue.lock);
74260 po->stats.tp_drops++;
74261 - atomic_inc(&sk->sk_drops);
74262 + atomic_inc_unchecked(&sk->sk_drops);
74263 spin_unlock(&sk->sk_receive_queue.lock);
74264
74265 drop_n_restore:
74266 @@ -3266,7 +3266,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
74267 case PACKET_HDRLEN:
74268 if (len > sizeof(int))
74269 len = sizeof(int);
74270 - if (copy_from_user(&val, optval, len))
74271 + if (len > sizeof(val) || copy_from_user(&val, optval, len))
74272 return -EFAULT;
74273 switch (val) {
74274 case TPACKET_V1:
74275 @@ -3316,7 +3316,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
74276
74277 if (put_user(len, optlen))
74278 return -EFAULT;
74279 - if (copy_to_user(optval, data, len))
74280 + if (len > sizeof(st) || copy_to_user(optval, data, len))
74281 return -EFAULT;
74282 return 0;
74283 }
74284 diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
74285 index bf10ea8..aeb4c3e 100644
74286 --- a/net/phonet/af_phonet.c
74287 +++ b/net/phonet/af_phonet.c
74288 @@ -41,7 +41,7 @@ static struct phonet_protocol *phonet_proto_get(unsigned int protocol)
74289 {
74290 struct phonet_protocol *pp;
74291
74292 - if (protocol >= PHONET_NPROTO)
74293 + if (protocol < 0 || protocol >= PHONET_NPROTO)
74294 return NULL;
74295
74296 rcu_read_lock();
74297 @@ -469,7 +469,7 @@ int __init_or_module phonet_proto_register(unsigned int protocol,
74298 {
74299 int err = 0;
74300
74301 - if (protocol >= PHONET_NPROTO)
74302 + if (protocol < 0 || protocol >= PHONET_NPROTO)
74303 return -EINVAL;
74304
74305 err = proto_register(pp->prot, 1);
74306 diff --git a/net/phonet/pep.c b/net/phonet/pep.c
74307 index 2ba6e9f..409573f 100644
74308 --- a/net/phonet/pep.c
74309 +++ b/net/phonet/pep.c
74310 @@ -388,7 +388,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
74311
74312 case PNS_PEP_CTRL_REQ:
74313 if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
74314 - atomic_inc(&sk->sk_drops);
74315 + atomic_inc_unchecked(&sk->sk_drops);
74316 break;
74317 }
74318 __skb_pull(skb, 4);
74319 @@ -409,7 +409,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
74320 }
74321
74322 if (pn->rx_credits == 0) {
74323 - atomic_inc(&sk->sk_drops);
74324 + atomic_inc_unchecked(&sk->sk_drops);
74325 err = -ENOBUFS;
74326 break;
74327 }
74328 @@ -557,7 +557,7 @@ static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb)
74329 }
74330
74331 if (pn->rx_credits == 0) {
74332 - atomic_inc(&sk->sk_drops);
74333 + atomic_inc_unchecked(&sk->sk_drops);
74334 err = NET_RX_DROP;
74335 break;
74336 }
74337 diff --git a/net/phonet/socket.c b/net/phonet/socket.c
74338 index 3f8d0b1..74635e0 100644
74339 --- a/net/phonet/socket.c
74340 +++ b/net/phonet/socket.c
74341 @@ -613,8 +613,13 @@ static int pn_sock_seq_show(struct seq_file *seq, void *v)
74342 pn->resource, sk->sk_state,
74343 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
74344 sock_i_uid(sk), sock_i_ino(sk),
74345 - atomic_read(&sk->sk_refcnt), sk,
74346 - atomic_read(&sk->sk_drops), &len);
74347 + atomic_read(&sk->sk_refcnt),
74348 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74349 + NULL,
74350 +#else
74351 + sk,
74352 +#endif
74353 + atomic_read_unchecked(&sk->sk_drops), &len);
74354 }
74355 seq_printf(seq, "%*s\n", 127 - len, "");
74356 return 0;
74357 diff --git a/net/rds/cong.c b/net/rds/cong.c
74358 index e5b65ac..f3b6fb7 100644
74359 --- a/net/rds/cong.c
74360 +++ b/net/rds/cong.c
74361 @@ -78,7 +78,7 @@
74362 * finds that the saved generation number is smaller than the global generation
74363 * number, it wakes up the process.
74364 */
74365 -static atomic_t rds_cong_generation = ATOMIC_INIT(0);
74366 +static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0);
74367
74368 /*
74369 * Congestion monitoring
74370 @@ -233,7 +233,7 @@ void rds_cong_map_updated(struct rds_cong_map *map, uint64_t portmask)
74371 rdsdebug("waking map %p for %pI4\n",
74372 map, &map->m_addr);
74373 rds_stats_inc(s_cong_update_received);
74374 - atomic_inc(&rds_cong_generation);
74375 + atomic_inc_unchecked(&rds_cong_generation);
74376 if (waitqueue_active(&map->m_waitq))
74377 wake_up(&map->m_waitq);
74378 if (waitqueue_active(&rds_poll_waitq))
74379 @@ -259,7 +259,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
74380
74381 int rds_cong_updated_since(unsigned long *recent)
74382 {
74383 - unsigned long gen = atomic_read(&rds_cong_generation);
74384 + unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
74385
74386 if (likely(*recent == gen))
74387 return 0;
74388 diff --git a/net/rds/ib.h b/net/rds/ib.h
74389 index edfaaaf..8c89879 100644
74390 --- a/net/rds/ib.h
74391 +++ b/net/rds/ib.h
74392 @@ -128,7 +128,7 @@ struct rds_ib_connection {
74393 /* sending acks */
74394 unsigned long i_ack_flags;
74395 #ifdef KERNEL_HAS_ATOMIC64
74396 - atomic64_t i_ack_next; /* next ACK to send */
74397 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
74398 #else
74399 spinlock_t i_ack_lock; /* protect i_ack_next */
74400 u64 i_ack_next; /* next ACK to send */
74401 diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
74402 index 51c8689..36c555f 100644
74403 --- a/net/rds/ib_cm.c
74404 +++ b/net/rds/ib_cm.c
74405 @@ -718,7 +718,7 @@ void rds_ib_conn_shutdown(struct rds_connection *conn)
74406 /* Clear the ACK state */
74407 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
74408 #ifdef KERNEL_HAS_ATOMIC64
74409 - atomic64_set(&ic->i_ack_next, 0);
74410 + atomic64_set_unchecked(&ic->i_ack_next, 0);
74411 #else
74412 ic->i_ack_next = 0;
74413 #endif
74414 diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
74415 index e29e0ca..fa3a6a3 100644
74416 --- a/net/rds/ib_recv.c
74417 +++ b/net/rds/ib_recv.c
74418 @@ -592,7 +592,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
74419 static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
74420 int ack_required)
74421 {
74422 - atomic64_set(&ic->i_ack_next, seq);
74423 + atomic64_set_unchecked(&ic->i_ack_next, seq);
74424 if (ack_required) {
74425 smp_mb__before_clear_bit();
74426 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
74427 @@ -604,7 +604,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
74428 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
74429 smp_mb__after_clear_bit();
74430
74431 - return atomic64_read(&ic->i_ack_next);
74432 + return atomic64_read_unchecked(&ic->i_ack_next);
74433 }
74434 #endif
74435
74436 diff --git a/net/rds/iw.h b/net/rds/iw.h
74437 index 04ce3b1..48119a6 100644
74438 --- a/net/rds/iw.h
74439 +++ b/net/rds/iw.h
74440 @@ -134,7 +134,7 @@ struct rds_iw_connection {
74441 /* sending acks */
74442 unsigned long i_ack_flags;
74443 #ifdef KERNEL_HAS_ATOMIC64
74444 - atomic64_t i_ack_next; /* next ACK to send */
74445 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
74446 #else
74447 spinlock_t i_ack_lock; /* protect i_ack_next */
74448 u64 i_ack_next; /* next ACK to send */
74449 diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c
74450 index 9556d28..f046d0e 100644
74451 --- a/net/rds/iw_cm.c
74452 +++ b/net/rds/iw_cm.c
74453 @@ -663,7 +663,7 @@ void rds_iw_conn_shutdown(struct rds_connection *conn)
74454 /* Clear the ACK state */
74455 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
74456 #ifdef KERNEL_HAS_ATOMIC64
74457 - atomic64_set(&ic->i_ack_next, 0);
74458 + atomic64_set_unchecked(&ic->i_ack_next, 0);
74459 #else
74460 ic->i_ack_next = 0;
74461 #endif
74462 diff --git a/net/rds/iw_recv.c b/net/rds/iw_recv.c
74463 index 5e57347..3916042 100644
74464 --- a/net/rds/iw_recv.c
74465 +++ b/net/rds/iw_recv.c
74466 @@ -427,7 +427,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
74467 static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq,
74468 int ack_required)
74469 {
74470 - atomic64_set(&ic->i_ack_next, seq);
74471 + atomic64_set_unchecked(&ic->i_ack_next, seq);
74472 if (ack_required) {
74473 smp_mb__before_clear_bit();
74474 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
74475 @@ -439,7 +439,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
74476 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
74477 smp_mb__after_clear_bit();
74478
74479 - return atomic64_read(&ic->i_ack_next);
74480 + return atomic64_read_unchecked(&ic->i_ack_next);
74481 }
74482 #endif
74483
74484 diff --git a/net/rds/tcp.c b/net/rds/tcp.c
74485 index edac9ef..16bcb98 100644
74486 --- a/net/rds/tcp.c
74487 +++ b/net/rds/tcp.c
74488 @@ -59,7 +59,7 @@ void rds_tcp_nonagle(struct socket *sock)
74489 int val = 1;
74490
74491 set_fs(KERNEL_DS);
74492 - sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __user *)&val,
74493 + sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __force_user *)&val,
74494 sizeof(val));
74495 set_fs(oldfs);
74496 }
74497 diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
74498 index 1b4fd68..2234175 100644
74499 --- a/net/rds/tcp_send.c
74500 +++ b/net/rds/tcp_send.c
74501 @@ -43,7 +43,7 @@ static void rds_tcp_cork(struct socket *sock, int val)
74502
74503 oldfs = get_fs();
74504 set_fs(KERNEL_DS);
74505 - sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __user *)&val,
74506 + sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __force_user *)&val,
74507 sizeof(val));
74508 set_fs(oldfs);
74509 }
74510 diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
74511 index 74c064c..fdec26f 100644
74512 --- a/net/rxrpc/af_rxrpc.c
74513 +++ b/net/rxrpc/af_rxrpc.c
74514 @@ -39,7 +39,7 @@ static const struct proto_ops rxrpc_rpc_ops;
74515 __be32 rxrpc_epoch;
74516
74517 /* current debugging ID */
74518 -atomic_t rxrpc_debug_id;
74519 +atomic_unchecked_t rxrpc_debug_id;
74520
74521 /* count of skbs currently in use */
74522 atomic_t rxrpc_n_skbs;
74523 diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c
74524 index f99cfce..cc529dd 100644
74525 --- a/net/rxrpc/ar-ack.c
74526 +++ b/net/rxrpc/ar-ack.c
74527 @@ -175,7 +175,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
74528
74529 _enter("{%d,%d,%d,%d},",
74530 call->acks_hard, call->acks_unacked,
74531 - atomic_read(&call->sequence),
74532 + atomic_read_unchecked(&call->sequence),
74533 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
74534
74535 stop = 0;
74536 @@ -199,7 +199,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
74537
74538 /* each Tx packet has a new serial number */
74539 sp->hdr.serial =
74540 - htonl(atomic_inc_return(&call->conn->serial));
74541 + htonl(atomic_inc_return_unchecked(&call->conn->serial));
74542
74543 hdr = (struct rxrpc_header *) txb->head;
74544 hdr->serial = sp->hdr.serial;
74545 @@ -403,7 +403,7 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, u32 hard)
74546 */
74547 static void rxrpc_clear_tx_window(struct rxrpc_call *call)
74548 {
74549 - rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
74550 + rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
74551 }
74552
74553 /*
74554 @@ -629,7 +629,7 @@ process_further:
74555
74556 latest = ntohl(sp->hdr.serial);
74557 hard = ntohl(ack.firstPacket);
74558 - tx = atomic_read(&call->sequence);
74559 + tx = atomic_read_unchecked(&call->sequence);
74560
74561 _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
74562 latest,
74563 @@ -1161,7 +1161,7 @@ void rxrpc_process_call(struct work_struct *work)
74564 goto maybe_reschedule;
74565
74566 send_ACK_with_skew:
74567 - ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
74568 + ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
74569 ntohl(ack.serial));
74570 send_ACK:
74571 mtu = call->conn->trans->peer->if_mtu;
74572 @@ -1173,7 +1173,7 @@ send_ACK:
74573 ackinfo.rxMTU = htonl(5692);
74574 ackinfo.jumbo_max = htonl(4);
74575
74576 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
74577 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
74578 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
74579 ntohl(hdr.serial),
74580 ntohs(ack.maxSkew),
74581 @@ -1191,7 +1191,7 @@ send_ACK:
74582 send_message:
74583 _debug("send message");
74584
74585 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
74586 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
74587 _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
74588 send_message_2:
74589
74590 diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c
74591 index bf656c2..48f9d27 100644
74592 --- a/net/rxrpc/ar-call.c
74593 +++ b/net/rxrpc/ar-call.c
74594 @@ -83,7 +83,7 @@ static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
74595 spin_lock_init(&call->lock);
74596 rwlock_init(&call->state_lock);
74597 atomic_set(&call->usage, 1);
74598 - call->debug_id = atomic_inc_return(&rxrpc_debug_id);
74599 + call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
74600 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
74601
74602 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
74603 diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c
74604 index 4106ca9..a338d7a 100644
74605 --- a/net/rxrpc/ar-connection.c
74606 +++ b/net/rxrpc/ar-connection.c
74607 @@ -206,7 +206,7 @@ static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
74608 rwlock_init(&conn->lock);
74609 spin_lock_init(&conn->state_lock);
74610 atomic_set(&conn->usage, 1);
74611 - conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
74612 + conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
74613 conn->avail_calls = RXRPC_MAXCALLS;
74614 conn->size_align = 4;
74615 conn->header_size = sizeof(struct rxrpc_header);
74616 diff --git a/net/rxrpc/ar-connevent.c b/net/rxrpc/ar-connevent.c
74617 index e7ed43a..6afa140 100644
74618 --- a/net/rxrpc/ar-connevent.c
74619 +++ b/net/rxrpc/ar-connevent.c
74620 @@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
74621
74622 len = iov[0].iov_len + iov[1].iov_len;
74623
74624 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
74625 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
74626 _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
74627
74628 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
74629 diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c
74630 index 1a2b0633..e8d1382 100644
74631 --- a/net/rxrpc/ar-input.c
74632 +++ b/net/rxrpc/ar-input.c
74633 @@ -340,9 +340,9 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
74634 /* track the latest serial number on this connection for ACK packet
74635 * information */
74636 serial = ntohl(sp->hdr.serial);
74637 - hi_serial = atomic_read(&call->conn->hi_serial);
74638 + hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
74639 while (serial > hi_serial)
74640 - hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
74641 + hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
74642 serial);
74643
74644 /* request ACK generation for any ACK or DATA packet that requests
74645 diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
74646 index 8e22bd3..f66d1c0 100644
74647 --- a/net/rxrpc/ar-internal.h
74648 +++ b/net/rxrpc/ar-internal.h
74649 @@ -272,8 +272,8 @@ struct rxrpc_connection {
74650 int error; /* error code for local abort */
74651 int debug_id; /* debug ID for printks */
74652 unsigned call_counter; /* call ID counter */
74653 - atomic_t serial; /* packet serial number counter */
74654 - atomic_t hi_serial; /* highest serial number received */
74655 + atomic_unchecked_t serial; /* packet serial number counter */
74656 + atomic_unchecked_t hi_serial; /* highest serial number received */
74657 u8 avail_calls; /* number of calls available */
74658 u8 size_align; /* data size alignment (for security) */
74659 u8 header_size; /* rxrpc + security header size */
74660 @@ -346,7 +346,7 @@ struct rxrpc_call {
74661 spinlock_t lock;
74662 rwlock_t state_lock; /* lock for state transition */
74663 atomic_t usage;
74664 - atomic_t sequence; /* Tx data packet sequence counter */
74665 + atomic_unchecked_t sequence; /* Tx data packet sequence counter */
74666 u32 abort_code; /* local/remote abort code */
74667 enum { /* current state of call */
74668 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
74669 @@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(struct rxrpc_call *call, u32 abort_code)
74670 */
74671 extern atomic_t rxrpc_n_skbs;
74672 extern __be32 rxrpc_epoch;
74673 -extern atomic_t rxrpc_debug_id;
74674 +extern atomic_unchecked_t rxrpc_debug_id;
74675 extern struct workqueue_struct *rxrpc_workqueue;
74676
74677 /*
74678 diff --git a/net/rxrpc/ar-local.c b/net/rxrpc/ar-local.c
74679 index 87f7135..74d3703 100644
74680 --- a/net/rxrpc/ar-local.c
74681 +++ b/net/rxrpc/ar-local.c
74682 @@ -45,7 +45,7 @@ struct rxrpc_local *rxrpc_alloc_local(struct sockaddr_rxrpc *srx)
74683 spin_lock_init(&local->lock);
74684 rwlock_init(&local->services_lock);
74685 atomic_set(&local->usage, 1);
74686 - local->debug_id = atomic_inc_return(&rxrpc_debug_id);
74687 + local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
74688 memcpy(&local->srx, srx, sizeof(*srx));
74689 }
74690
74691 diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c
74692 index 338d793..47391d0 100644
74693 --- a/net/rxrpc/ar-output.c
74694 +++ b/net/rxrpc/ar-output.c
74695 @@ -682,9 +682,9 @@ static int rxrpc_send_data(struct kiocb *iocb,
74696 sp->hdr.cid = call->cid;
74697 sp->hdr.callNumber = call->call_id;
74698 sp->hdr.seq =
74699 - htonl(atomic_inc_return(&call->sequence));
74700 + htonl(atomic_inc_return_unchecked(&call->sequence));
74701 sp->hdr.serial =
74702 - htonl(atomic_inc_return(&conn->serial));
74703 + htonl(atomic_inc_return_unchecked(&conn->serial));
74704 sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
74705 sp->hdr.userStatus = 0;
74706 sp->hdr.securityIndex = conn->security_ix;
74707 diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c
74708 index 2754f09..b20e38f 100644
74709 --- a/net/rxrpc/ar-peer.c
74710 +++ b/net/rxrpc/ar-peer.c
74711 @@ -72,7 +72,7 @@ static struct rxrpc_peer *rxrpc_alloc_peer(struct sockaddr_rxrpc *srx,
74712 INIT_LIST_HEAD(&peer->error_targets);
74713 spin_lock_init(&peer->lock);
74714 atomic_set(&peer->usage, 1);
74715 - peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
74716 + peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
74717 memcpy(&peer->srx, srx, sizeof(*srx));
74718
74719 rxrpc_assess_MTU_size(peer);
74720 diff --git a/net/rxrpc/ar-proc.c b/net/rxrpc/ar-proc.c
74721 index 38047f7..9f48511 100644
74722 --- a/net/rxrpc/ar-proc.c
74723 +++ b/net/rxrpc/ar-proc.c
74724 @@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
74725 atomic_read(&conn->usage),
74726 rxrpc_conn_states[conn->state],
74727 key_serial(conn->key),
74728 - atomic_read(&conn->serial),
74729 - atomic_read(&conn->hi_serial));
74730 + atomic_read_unchecked(&conn->serial),
74731 + atomic_read_unchecked(&conn->hi_serial));
74732
74733 return 0;
74734 }
74735 diff --git a/net/rxrpc/ar-transport.c b/net/rxrpc/ar-transport.c
74736 index 92df566..87ec1bf 100644
74737 --- a/net/rxrpc/ar-transport.c
74738 +++ b/net/rxrpc/ar-transport.c
74739 @@ -47,7 +47,7 @@ static struct rxrpc_transport *rxrpc_alloc_transport(struct rxrpc_local *local,
74740 spin_lock_init(&trans->client_lock);
74741 rwlock_init(&trans->conn_lock);
74742 atomic_set(&trans->usage, 1);
74743 - trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
74744 + trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
74745
74746 if (peer->srx.transport.family == AF_INET) {
74747 switch (peer->srx.transport_type) {
74748 diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
74749 index 7635107..4670276 100644
74750 --- a/net/rxrpc/rxkad.c
74751 +++ b/net/rxrpc/rxkad.c
74752 @@ -610,7 +610,7 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
74753
74754 len = iov[0].iov_len + iov[1].iov_len;
74755
74756 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
74757 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
74758 _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
74759
74760 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
74761 @@ -660,7 +660,7 @@ static int rxkad_send_response(struct rxrpc_connection *conn,
74762
74763 len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
74764
74765 - hdr->serial = htonl(atomic_inc_return(&conn->serial));
74766 + hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
74767 _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
74768
74769 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
74770 diff --git a/net/sctp/proc.c b/net/sctp/proc.c
74771 index 1e2eee8..ce3967e 100644
74772 --- a/net/sctp/proc.c
74773 +++ b/net/sctp/proc.c
74774 @@ -319,7 +319,8 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
74775 seq_printf(seq,
74776 "%8pK %8pK %-3d %-3d %-2d %-4d "
74777 "%4d %8d %8d %7d %5lu %-5d %5d ",
74778 - assoc, sk, sctp_sk(sk)->type, sk->sk_state,
74779 + assoc, sk,
74780 + sctp_sk(sk)->type, sk->sk_state,
74781 assoc->state, hash,
74782 assoc->assoc_id,
74783 assoc->sndbuf_used,
74784 diff --git a/net/sctp/socket.c b/net/sctp/socket.c
74785 index 54a7cd2..944edae 100644
74786 --- a/net/sctp/socket.c
74787 +++ b/net/sctp/socket.c
74788 @@ -4574,7 +4574,7 @@ static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
74789 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
74790 if (space_left < addrlen)
74791 return -ENOMEM;
74792 - if (copy_to_user(to, &temp, addrlen))
74793 + if (addrlen > sizeof(temp) || copy_to_user(to, &temp, addrlen))
74794 return -EFAULT;
74795 to += addrlen;
74796 cnt++;
74797 diff --git a/net/socket.c b/net/socket.c
74798 index 2877647..08e2fde 100644
74799 --- a/net/socket.c
74800 +++ b/net/socket.c
74801 @@ -88,6 +88,7 @@
74802 #include <linux/nsproxy.h>
74803 #include <linux/magic.h>
74804 #include <linux/slab.h>
74805 +#include <linux/in.h>
74806
74807 #include <asm/uaccess.h>
74808 #include <asm/unistd.h>
74809 @@ -105,6 +106,8 @@
74810 #include <linux/sockios.h>
74811 #include <linux/atalk.h>
74812
74813 +#include <linux/grsock.h>
74814 +
74815 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
74816 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
74817 unsigned long nr_segs, loff_t pos);
74818 @@ -321,7 +324,7 @@ static struct dentry *sockfs_mount(struct file_system_type *fs_type,
74819 &sockfs_dentry_operations, SOCKFS_MAGIC);
74820 }
74821
74822 -static struct vfsmount *sock_mnt __read_mostly;
74823 +struct vfsmount *sock_mnt __read_mostly;
74824
74825 static struct file_system_type sock_fs_type = {
74826 .name = "sockfs",
74827 @@ -1187,6 +1190,8 @@ int __sock_create(struct net *net, int family, int type, int protocol,
74828 return -EAFNOSUPPORT;
74829 if (type < 0 || type >= SOCK_MAX)
74830 return -EINVAL;
74831 + if (protocol < 0)
74832 + return -EINVAL;
74833
74834 /* Compatibility.
74835
74836 @@ -1319,6 +1324,16 @@ SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol)
74837 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
74838 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
74839
74840 + if(!gr_search_socket(family, type, protocol)) {
74841 + retval = -EACCES;
74842 + goto out;
74843 + }
74844 +
74845 + if (gr_handle_sock_all(family, type, protocol)) {
74846 + retval = -EACCES;
74847 + goto out;
74848 + }
74849 +
74850 retval = sock_create(family, type, protocol, &sock);
74851 if (retval < 0)
74852 goto out;
74853 @@ -1431,6 +1446,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
74854 if (sock) {
74855 err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address);
74856 if (err >= 0) {
74857 + if (gr_handle_sock_server((struct sockaddr *)&address)) {
74858 + err = -EACCES;
74859 + goto error;
74860 + }
74861 + err = gr_search_bind(sock, (struct sockaddr_in *)&address);
74862 + if (err)
74863 + goto error;
74864 +
74865 err = security_socket_bind(sock,
74866 (struct sockaddr *)&address,
74867 addrlen);
74868 @@ -1439,6 +1462,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
74869 (struct sockaddr *)
74870 &address, addrlen);
74871 }
74872 +error:
74873 fput_light(sock->file, fput_needed);
74874 }
74875 return err;
74876 @@ -1462,10 +1486,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog)
74877 if ((unsigned)backlog > somaxconn)
74878 backlog = somaxconn;
74879
74880 + if (gr_handle_sock_server_other(sock->sk)) {
74881 + err = -EPERM;
74882 + goto error;
74883 + }
74884 +
74885 + err = gr_search_listen(sock);
74886 + if (err)
74887 + goto error;
74888 +
74889 err = security_socket_listen(sock, backlog);
74890 if (!err)
74891 err = sock->ops->listen(sock, backlog);
74892
74893 +error:
74894 fput_light(sock->file, fput_needed);
74895 }
74896 return err;
74897 @@ -1509,6 +1543,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
74898 newsock->type = sock->type;
74899 newsock->ops = sock->ops;
74900
74901 + if (gr_handle_sock_server_other(sock->sk)) {
74902 + err = -EPERM;
74903 + sock_release(newsock);
74904 + goto out_put;
74905 + }
74906 +
74907 + err = gr_search_accept(sock);
74908 + if (err) {
74909 + sock_release(newsock);
74910 + goto out_put;
74911 + }
74912 +
74913 /*
74914 * We don't need try_module_get here, as the listening socket (sock)
74915 * has the protocol module (sock->ops->owner) held.
74916 @@ -1547,6 +1593,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
74917 fd_install(newfd, newfile);
74918 err = newfd;
74919
74920 + gr_attach_curr_ip(newsock->sk);
74921 +
74922 out_put:
74923 fput_light(sock->file, fput_needed);
74924 out:
74925 @@ -1579,6 +1627,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
74926 int, addrlen)
74927 {
74928 struct socket *sock;
74929 + struct sockaddr *sck;
74930 struct sockaddr_storage address;
74931 int err, fput_needed;
74932
74933 @@ -1589,6 +1638,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
74934 if (err < 0)
74935 goto out_put;
74936
74937 + sck = (struct sockaddr *)&address;
74938 +
74939 + if (gr_handle_sock_client(sck)) {
74940 + err = -EACCES;
74941 + goto out_put;
74942 + }
74943 +
74944 + err = gr_search_connect(sock, (struct sockaddr_in *)sck);
74945 + if (err)
74946 + goto out_put;
74947 +
74948 err =
74949 security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
74950 if (err)
74951 @@ -1950,7 +2010,7 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
74952 * checking falls down on this.
74953 */
74954 if (copy_from_user(ctl_buf,
74955 - (void __user __force *)msg_sys->msg_control,
74956 + (void __force_user *)msg_sys->msg_control,
74957 ctl_len))
74958 goto out_freectl;
74959 msg_sys->msg_control = ctl_buf;
74960 @@ -2120,7 +2180,7 @@ static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
74961 * kernel msghdr to use the kernel address space)
74962 */
74963
74964 - uaddr = (__force void __user *)msg_sys->msg_name;
74965 + uaddr = (void __force_user *)msg_sys->msg_name;
74966 uaddr_len = COMPAT_NAMELEN(msg);
74967 if (MSG_CMSG_COMPAT & flags) {
74968 err = verify_compat_iovec(msg_sys, iov,
74969 @@ -2748,7 +2808,7 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
74970 }
74971
74972 ifr = compat_alloc_user_space(buf_size);
74973 - rxnfc = (void *)ifr + ALIGN(sizeof(struct ifreq), 8);
74974 + rxnfc = (void __user *)ifr + ALIGN(sizeof(struct ifreq), 8);
74975
74976 if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ))
74977 return -EFAULT;
74978 @@ -2772,12 +2832,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
74979 offsetof(struct ethtool_rxnfc, fs.ring_cookie));
74980
74981 if (copy_in_user(rxnfc, compat_rxnfc,
74982 - (void *)(&rxnfc->fs.m_ext + 1) -
74983 - (void *)rxnfc) ||
74984 + (void __user *)(&rxnfc->fs.m_ext + 1) -
74985 + (void __user *)rxnfc) ||
74986 copy_in_user(&rxnfc->fs.ring_cookie,
74987 &compat_rxnfc->fs.ring_cookie,
74988 - (void *)(&rxnfc->fs.location + 1) -
74989 - (void *)&rxnfc->fs.ring_cookie) ||
74990 + (void __user *)(&rxnfc->fs.location + 1) -
74991 + (void __user *)&rxnfc->fs.ring_cookie) ||
74992 copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt,
74993 sizeof(rxnfc->rule_cnt)))
74994 return -EFAULT;
74995 @@ -2789,12 +2849,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
74996
74997 if (convert_out) {
74998 if (copy_in_user(compat_rxnfc, rxnfc,
74999 - (const void *)(&rxnfc->fs.m_ext + 1) -
75000 - (const void *)rxnfc) ||
75001 + (const void __user *)(&rxnfc->fs.m_ext + 1) -
75002 + (const void __user *)rxnfc) ||
75003 copy_in_user(&compat_rxnfc->fs.ring_cookie,
75004 &rxnfc->fs.ring_cookie,
75005 - (const void *)(&rxnfc->fs.location + 1) -
75006 - (const void *)&rxnfc->fs.ring_cookie) ||
75007 + (const void __user *)(&rxnfc->fs.location + 1) -
75008 + (const void __user *)&rxnfc->fs.ring_cookie) ||
75009 copy_in_user(&compat_rxnfc->rule_cnt, &rxnfc->rule_cnt,
75010 sizeof(rxnfc->rule_cnt)))
75011 return -EFAULT;
75012 @@ -2864,7 +2924,7 @@ static int bond_ioctl(struct net *net, unsigned int cmd,
75013 old_fs = get_fs();
75014 set_fs(KERNEL_DS);
75015 err = dev_ioctl(net, cmd,
75016 - (struct ifreq __user __force *) &kifr);
75017 + (struct ifreq __force_user *) &kifr);
75018 set_fs(old_fs);
75019
75020 return err;
75021 @@ -2973,7 +3033,7 @@ static int compat_sioc_ifmap(struct net *net, unsigned int cmd,
75022
75023 old_fs = get_fs();
75024 set_fs(KERNEL_DS);
75025 - err = dev_ioctl(net, cmd, (void __user __force *)&ifr);
75026 + err = dev_ioctl(net, cmd, (void __force_user *)&ifr);
75027 set_fs(old_fs);
75028
75029 if (cmd == SIOCGIFMAP && !err) {
75030 @@ -3078,7 +3138,7 @@ static int routing_ioctl(struct net *net, struct socket *sock,
75031 ret |= __get_user(rtdev, &(ur4->rt_dev));
75032 if (rtdev) {
75033 ret |= copy_from_user(devname, compat_ptr(rtdev), 15);
75034 - r4.rt_dev = (char __user __force *)devname;
75035 + r4.rt_dev = (char __force_user *)devname;
75036 devname[15] = 0;
75037 } else
75038 r4.rt_dev = NULL;
75039 @@ -3318,8 +3378,8 @@ int kernel_getsockopt(struct socket *sock, int level, int optname,
75040 int __user *uoptlen;
75041 int err;
75042
75043 - uoptval = (char __user __force *) optval;
75044 - uoptlen = (int __user __force *) optlen;
75045 + uoptval = (char __force_user *) optval;
75046 + uoptlen = (int __force_user *) optlen;
75047
75048 set_fs(KERNEL_DS);
75049 if (level == SOL_SOCKET)
75050 @@ -3339,7 +3399,7 @@ int kernel_setsockopt(struct socket *sock, int level, int optname,
75051 char __user *uoptval;
75052 int err;
75053
75054 - uoptval = (char __user __force *) optval;
75055 + uoptval = (char __force_user *) optval;
75056
75057 set_fs(KERNEL_DS);
75058 if (level == SOL_SOCKET)
75059 diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
75060 index 00a1a2a..6a0138a 100644
75061 --- a/net/sunrpc/sched.c
75062 +++ b/net/sunrpc/sched.c
75063 @@ -238,9 +238,9 @@ static int rpc_wait_bit_killable(void *word)
75064 #ifdef RPC_DEBUG
75065 static void rpc_task_set_debuginfo(struct rpc_task *task)
75066 {
75067 - static atomic_t rpc_pid;
75068 + static atomic_unchecked_t rpc_pid;
75069
75070 - task->tk_pid = atomic_inc_return(&rpc_pid);
75071 + task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
75072 }
75073 #else
75074 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
75075 diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
75076 index 71bed1c..5dff36d 100644
75077 --- a/net/sunrpc/svcsock.c
75078 +++ b/net/sunrpc/svcsock.c
75079 @@ -396,7 +396,7 @@ static int svc_partial_recvfrom(struct svc_rqst *rqstp,
75080 int buflen, unsigned int base)
75081 {
75082 size_t save_iovlen;
75083 - void __user *save_iovbase;
75084 + void *save_iovbase;
75085 unsigned int i;
75086 int ret;
75087
75088 diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c
75089 index 09af4fa..77110a9 100644
75090 --- a/net/sunrpc/xprtrdma/svc_rdma.c
75091 +++ b/net/sunrpc/xprtrdma/svc_rdma.c
75092 @@ -61,15 +61,15 @@ unsigned int svcrdma_max_req_size = RPCRDMA_MAX_REQ_SIZE;
75093 static unsigned int min_max_inline = 4096;
75094 static unsigned int max_max_inline = 65536;
75095
75096 -atomic_t rdma_stat_recv;
75097 -atomic_t rdma_stat_read;
75098 -atomic_t rdma_stat_write;
75099 -atomic_t rdma_stat_sq_starve;
75100 -atomic_t rdma_stat_rq_starve;
75101 -atomic_t rdma_stat_rq_poll;
75102 -atomic_t rdma_stat_rq_prod;
75103 -atomic_t rdma_stat_sq_poll;
75104 -atomic_t rdma_stat_sq_prod;
75105 +atomic_unchecked_t rdma_stat_recv;
75106 +atomic_unchecked_t rdma_stat_read;
75107 +atomic_unchecked_t rdma_stat_write;
75108 +atomic_unchecked_t rdma_stat_sq_starve;
75109 +atomic_unchecked_t rdma_stat_rq_starve;
75110 +atomic_unchecked_t rdma_stat_rq_poll;
75111 +atomic_unchecked_t rdma_stat_rq_prod;
75112 +atomic_unchecked_t rdma_stat_sq_poll;
75113 +atomic_unchecked_t rdma_stat_sq_prod;
75114
75115 /* Temporary NFS request map and context caches */
75116 struct kmem_cache *svc_rdma_map_cachep;
75117 @@ -109,7 +109,7 @@ static int read_reset_stat(ctl_table *table, int write,
75118 len -= *ppos;
75119 if (len > *lenp)
75120 len = *lenp;
75121 - if (len && copy_to_user(buffer, str_buf, len))
75122 + if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
75123 return -EFAULT;
75124 *lenp = len;
75125 *ppos += len;
75126 @@ -150,63 +150,63 @@ static ctl_table svcrdma_parm_table[] = {
75127 {
75128 .procname = "rdma_stat_read",
75129 .data = &rdma_stat_read,
75130 - .maxlen = sizeof(atomic_t),
75131 + .maxlen = sizeof(atomic_unchecked_t),
75132 .mode = 0644,
75133 .proc_handler = read_reset_stat,
75134 },
75135 {
75136 .procname = "rdma_stat_recv",
75137 .data = &rdma_stat_recv,
75138 - .maxlen = sizeof(atomic_t),
75139 + .maxlen = sizeof(atomic_unchecked_t),
75140 .mode = 0644,
75141 .proc_handler = read_reset_stat,
75142 },
75143 {
75144 .procname = "rdma_stat_write",
75145 .data = &rdma_stat_write,
75146 - .maxlen = sizeof(atomic_t),
75147 + .maxlen = sizeof(atomic_unchecked_t),
75148 .mode = 0644,
75149 .proc_handler = read_reset_stat,
75150 },
75151 {
75152 .procname = "rdma_stat_sq_starve",
75153 .data = &rdma_stat_sq_starve,
75154 - .maxlen = sizeof(atomic_t),
75155 + .maxlen = sizeof(atomic_unchecked_t),
75156 .mode = 0644,
75157 .proc_handler = read_reset_stat,
75158 },
75159 {
75160 .procname = "rdma_stat_rq_starve",
75161 .data = &rdma_stat_rq_starve,
75162 - .maxlen = sizeof(atomic_t),
75163 + .maxlen = sizeof(atomic_unchecked_t),
75164 .mode = 0644,
75165 .proc_handler = read_reset_stat,
75166 },
75167 {
75168 .procname = "rdma_stat_rq_poll",
75169 .data = &rdma_stat_rq_poll,
75170 - .maxlen = sizeof(atomic_t),
75171 + .maxlen = sizeof(atomic_unchecked_t),
75172 .mode = 0644,
75173 .proc_handler = read_reset_stat,
75174 },
75175 {
75176 .procname = "rdma_stat_rq_prod",
75177 .data = &rdma_stat_rq_prod,
75178 - .maxlen = sizeof(atomic_t),
75179 + .maxlen = sizeof(atomic_unchecked_t),
75180 .mode = 0644,
75181 .proc_handler = read_reset_stat,
75182 },
75183 {
75184 .procname = "rdma_stat_sq_poll",
75185 .data = &rdma_stat_sq_poll,
75186 - .maxlen = sizeof(atomic_t),
75187 + .maxlen = sizeof(atomic_unchecked_t),
75188 .mode = 0644,
75189 .proc_handler = read_reset_stat,
75190 },
75191 {
75192 .procname = "rdma_stat_sq_prod",
75193 .data = &rdma_stat_sq_prod,
75194 - .maxlen = sizeof(atomic_t),
75195 + .maxlen = sizeof(atomic_unchecked_t),
75196 .mode = 0644,
75197 .proc_handler = read_reset_stat,
75198 },
75199 diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
75200 index df67211..c354b13 100644
75201 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
75202 +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
75203 @@ -499,7 +499,7 @@ next_sge:
75204 svc_rdma_put_context(ctxt, 0);
75205 goto out;
75206 }
75207 - atomic_inc(&rdma_stat_read);
75208 + atomic_inc_unchecked(&rdma_stat_read);
75209
75210 if (read_wr.num_sge < chl_map->ch[ch_no].count) {
75211 chl_map->ch[ch_no].count -= read_wr.num_sge;
75212 @@ -609,7 +609,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
75213 dto_q);
75214 list_del_init(&ctxt->dto_q);
75215 } else {
75216 - atomic_inc(&rdma_stat_rq_starve);
75217 + atomic_inc_unchecked(&rdma_stat_rq_starve);
75218 clear_bit(XPT_DATA, &xprt->xpt_flags);
75219 ctxt = NULL;
75220 }
75221 @@ -629,7 +629,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
75222 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
75223 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
75224 BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
75225 - atomic_inc(&rdma_stat_recv);
75226 + atomic_inc_unchecked(&rdma_stat_recv);
75227
75228 /* Build up the XDR from the receive buffers. */
75229 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
75230 diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
75231 index 249a835..fb2794b 100644
75232 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
75233 +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
75234 @@ -362,7 +362,7 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
75235 write_wr.wr.rdma.remote_addr = to;
75236
75237 /* Post It */
75238 - atomic_inc(&rdma_stat_write);
75239 + atomic_inc_unchecked(&rdma_stat_write);
75240 if (svc_rdma_send(xprt, &write_wr))
75241 goto err;
75242 return 0;
75243 diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
75244 index ba1296d..0fec1a5 100644
75245 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
75246 +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
75247 @@ -300,7 +300,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
75248 return;
75249
75250 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
75251 - atomic_inc(&rdma_stat_rq_poll);
75252 + atomic_inc_unchecked(&rdma_stat_rq_poll);
75253
75254 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
75255 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
75256 @@ -322,7 +322,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
75257 }
75258
75259 if (ctxt)
75260 - atomic_inc(&rdma_stat_rq_prod);
75261 + atomic_inc_unchecked(&rdma_stat_rq_prod);
75262
75263 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
75264 /*
75265 @@ -394,7 +394,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
75266 return;
75267
75268 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
75269 - atomic_inc(&rdma_stat_sq_poll);
75270 + atomic_inc_unchecked(&rdma_stat_sq_poll);
75271 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
75272 if (wc.status != IB_WC_SUCCESS)
75273 /* Close the transport */
75274 @@ -412,7 +412,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
75275 }
75276
75277 if (ctxt)
75278 - atomic_inc(&rdma_stat_sq_prod);
75279 + atomic_inc_unchecked(&rdma_stat_sq_prod);
75280 }
75281
75282 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
75283 @@ -1274,7 +1274,7 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
75284 spin_lock_bh(&xprt->sc_lock);
75285 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
75286 spin_unlock_bh(&xprt->sc_lock);
75287 - atomic_inc(&rdma_stat_sq_starve);
75288 + atomic_inc_unchecked(&rdma_stat_sq_starve);
75289
75290 /* See if we can opportunistically reap SQ WR to make room */
75291 sq_cq_reap(xprt);
75292 diff --git a/net/sysctl_net.c b/net/sysctl_net.c
75293 index e758139..d29ea47 100644
75294 --- a/net/sysctl_net.c
75295 +++ b/net/sysctl_net.c
75296 @@ -47,7 +47,7 @@ static int net_ctl_permissions(struct ctl_table_root *root,
75297 struct ctl_table *table)
75298 {
75299 /* Allow network administrator to have same access as root. */
75300 - if (capable(CAP_NET_ADMIN)) {
75301 + if (capable_nolog(CAP_NET_ADMIN)) {
75302 int mode = (table->mode >> 6) & 7;
75303 return (mode << 6) | (mode << 3) | mode;
75304 }
75305 diff --git a/net/tipc/link.c b/net/tipc/link.c
75306 index ae98a72..7bb6056 100644
75307 --- a/net/tipc/link.c
75308 +++ b/net/tipc/link.c
75309 @@ -1203,7 +1203,7 @@ static int link_send_sections_long(struct tipc_port *sender,
75310 struct tipc_msg fragm_hdr;
75311 struct sk_buff *buf, *buf_chain, *prev;
75312 u32 fragm_crs, fragm_rest, hsz, sect_rest;
75313 - const unchar *sect_crs;
75314 + const unchar __user *sect_crs;
75315 int curr_sect;
75316 u32 fragm_no;
75317
75318 @@ -1247,7 +1247,7 @@ again:
75319
75320 if (!sect_rest) {
75321 sect_rest = msg_sect[++curr_sect].iov_len;
75322 - sect_crs = (const unchar *)msg_sect[curr_sect].iov_base;
75323 + sect_crs = (const unchar __user *)msg_sect[curr_sect].iov_base;
75324 }
75325
75326 if (sect_rest < fragm_rest)
75327 @@ -1266,7 +1266,7 @@ error:
75328 }
75329 } else
75330 skb_copy_to_linear_data_offset(buf, fragm_crs,
75331 - sect_crs, sz);
75332 + (const void __force_kernel *)sect_crs, sz);
75333 sect_crs += sz;
75334 sect_rest -= sz;
75335 fragm_crs += sz;
75336 diff --git a/net/tipc/msg.c b/net/tipc/msg.c
75337 index 83d5096..dcba497 100644
75338 --- a/net/tipc/msg.c
75339 +++ b/net/tipc/msg.c
75340 @@ -99,7 +99,7 @@ int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
75341 msg_sect[cnt].iov_len);
75342 else
75343 skb_copy_to_linear_data_offset(*buf, pos,
75344 - msg_sect[cnt].iov_base,
75345 + (const void __force_kernel *)msg_sect[cnt].iov_base,
75346 msg_sect[cnt].iov_len);
75347 pos += msg_sect[cnt].iov_len;
75348 }
75349 diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
75350 index 1983717..4d6102c 100644
75351 --- a/net/tipc/subscr.c
75352 +++ b/net/tipc/subscr.c
75353 @@ -101,7 +101,7 @@ static void subscr_send_event(struct subscription *sub,
75354 {
75355 struct iovec msg_sect;
75356
75357 - msg_sect.iov_base = (void *)&sub->evt;
75358 + msg_sect.iov_base = (void __force_user *)&sub->evt;
75359 msg_sect.iov_len = sizeof(struct tipc_event);
75360
75361 sub->evt.event = htohl(event, sub->swap);
75362 diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
75363 index b595a3d..b1cd354 100644
75364 --- a/net/unix/af_unix.c
75365 +++ b/net/unix/af_unix.c
75366 @@ -767,6 +767,12 @@ static struct sock *unix_find_other(struct net *net,
75367 err = -ECONNREFUSED;
75368 if (!S_ISSOCK(inode->i_mode))
75369 goto put_fail;
75370 +
75371 + if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
75372 + err = -EACCES;
75373 + goto put_fail;
75374 + }
75375 +
75376 u = unix_find_socket_byinode(inode);
75377 if (!u)
75378 goto put_fail;
75379 @@ -787,6 +793,13 @@ static struct sock *unix_find_other(struct net *net,
75380 if (u) {
75381 struct dentry *dentry;
75382 dentry = unix_sk(u)->dentry;
75383 +
75384 + if (!gr_handle_chroot_unix(pid_vnr(u->sk_peer_pid))) {
75385 + err = -EPERM;
75386 + sock_put(u);
75387 + goto fail;
75388 + }
75389 +
75390 if (dentry)
75391 touch_atime(unix_sk(u)->mnt, dentry);
75392 } else
75393 @@ -869,11 +882,18 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
75394 err = security_path_mknod(&path, dentry, mode, 0);
75395 if (err)
75396 goto out_mknod_drop_write;
75397 + if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
75398 + err = -EACCES;
75399 + goto out_mknod_drop_write;
75400 + }
75401 err = vfs_mknod(path.dentry->d_inode, dentry, mode, 0);
75402 out_mknod_drop_write:
75403 mnt_drop_write(path.mnt);
75404 if (err)
75405 goto out_mknod_dput;
75406 +
75407 + gr_handle_create(dentry, path.mnt);
75408 +
75409 mutex_unlock(&path.dentry->d_inode->i_mutex);
75410 dput(path.dentry);
75411 path.dentry = dentry;
75412 diff --git a/net/wireless/core.h b/net/wireless/core.h
75413 index b9ec306..b4a563e 100644
75414 --- a/net/wireless/core.h
75415 +++ b/net/wireless/core.h
75416 @@ -27,7 +27,7 @@ struct cfg80211_registered_device {
75417 struct mutex mtx;
75418
75419 /* rfkill support */
75420 - struct rfkill_ops rfkill_ops;
75421 + rfkill_ops_no_const rfkill_ops;
75422 struct rfkill *rfkill;
75423 struct work_struct rfkill_sync;
75424
75425 diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
75426 index 0af7f54..c916d2f 100644
75427 --- a/net/wireless/wext-core.c
75428 +++ b/net/wireless/wext-core.c
75429 @@ -747,8 +747,7 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
75430 */
75431
75432 /* Support for very large requests */
75433 - if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
75434 - (user_length > descr->max_tokens)) {
75435 + if (user_length > descr->max_tokens) {
75436 /* Allow userspace to GET more than max so
75437 * we can support any size GET requests.
75438 * There is still a limit : -ENOMEM.
75439 @@ -785,22 +784,6 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
75440 }
75441 }
75442
75443 - if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
75444 - /*
75445 - * If this is a GET, but not NOMAX, it means that the extra
75446 - * data is not bounded by userspace, but by max_tokens. Thus
75447 - * set the length to max_tokens. This matches the extra data
75448 - * allocation.
75449 - * The driver should fill it with the number of tokens it
75450 - * provided, and it may check iwp->length rather than having
75451 - * knowledge of max_tokens. If the driver doesn't change the
75452 - * iwp->length, this ioctl just copies back max_token tokens
75453 - * filled with zeroes. Hopefully the driver isn't claiming
75454 - * them to be valid data.
75455 - */
75456 - iwp->length = descr->max_tokens;
75457 - }
75458 -
75459 err = handler(dev, info, (union iwreq_data *) iwp, extra);
75460
75461 iwp->length += essid_compat;
75462 diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
75463 index 9049a5c..cfa6f5c 100644
75464 --- a/net/xfrm/xfrm_policy.c
75465 +++ b/net/xfrm/xfrm_policy.c
75466 @@ -299,7 +299,7 @@ static void xfrm_policy_kill(struct xfrm_policy *policy)
75467 {
75468 policy->walk.dead = 1;
75469
75470 - atomic_inc(&policy->genid);
75471 + atomic_inc_unchecked(&policy->genid);
75472
75473 if (del_timer(&policy->timer))
75474 xfrm_pol_put(policy);
75475 @@ -583,7 +583,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
75476 hlist_add_head(&policy->bydst, chain);
75477 xfrm_pol_hold(policy);
75478 net->xfrm.policy_count[dir]++;
75479 - atomic_inc(&flow_cache_genid);
75480 + atomic_inc_unchecked(&flow_cache_genid);
75481 if (delpol)
75482 __xfrm_policy_unlink(delpol, dir);
75483 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
75484 @@ -1530,7 +1530,7 @@ free_dst:
75485 goto out;
75486 }
75487
75488 -static int inline
75489 +static inline int
75490 xfrm_dst_alloc_copy(void **target, const void *src, int size)
75491 {
75492 if (!*target) {
75493 @@ -1542,7 +1542,7 @@ xfrm_dst_alloc_copy(void **target, const void *src, int size)
75494 return 0;
75495 }
75496
75497 -static int inline
75498 +static inline int
75499 xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
75500 {
75501 #ifdef CONFIG_XFRM_SUB_POLICY
75502 @@ -1554,7 +1554,7 @@ xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
75503 #endif
75504 }
75505
75506 -static int inline
75507 +static inline int
75508 xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl)
75509 {
75510 #ifdef CONFIG_XFRM_SUB_POLICY
75511 @@ -1648,7 +1648,7 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
75512
75513 xdst->num_pols = num_pols;
75514 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
75515 - xdst->policy_genid = atomic_read(&pols[0]->genid);
75516 + xdst->policy_genid = atomic_read_unchecked(&pols[0]->genid);
75517
75518 return xdst;
75519 }
75520 @@ -2345,7 +2345,7 @@ static int xfrm_bundle_ok(struct xfrm_dst *first)
75521 if (xdst->xfrm_genid != dst->xfrm->genid)
75522 return 0;
75523 if (xdst->num_pols > 0 &&
75524 - xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
75525 + xdst->policy_genid != atomic_read_unchecked(&xdst->pols[0]->genid))
75526 return 0;
75527
75528 mtu = dst_mtu(dst->child);
75529 @@ -2882,7 +2882,7 @@ static int xfrm_policy_migrate(struct xfrm_policy *pol,
75530 sizeof(pol->xfrm_vec[i].saddr));
75531 pol->xfrm_vec[i].encap_family = mp->new_family;
75532 /* flush bundles */
75533 - atomic_inc(&pol->genid);
75534 + atomic_inc_unchecked(&pol->genid);
75535 }
75536 }
75537
75538 diff --git a/scripts/Makefile.build b/scripts/Makefile.build
75539 index d2b366c..51ff91e 100644
75540 --- a/scripts/Makefile.build
75541 +++ b/scripts/Makefile.build
75542 @@ -109,7 +109,7 @@ endif
75543 endif
75544
75545 # Do not include host rules unless needed
75546 -ifneq ($(hostprogs-y)$(hostprogs-m),)
75547 +ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m),)
75548 include scripts/Makefile.host
75549 endif
75550
75551 diff --git a/scripts/Makefile.clean b/scripts/Makefile.clean
75552 index 686cb0d..9d653bf 100644
75553 --- a/scripts/Makefile.clean
75554 +++ b/scripts/Makefile.clean
75555 @@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subdir-ymn))
75556 __clean-files := $(extra-y) $(always) \
75557 $(targets) $(clean-files) \
75558 $(host-progs) \
75559 - $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
75560 + $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
75561 + $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
75562
75563 __clean-files := $(filter-out $(no-clean-files), $(__clean-files))
75564
75565 diff --git a/scripts/Makefile.host b/scripts/Makefile.host
75566 index 1ac414f..a1c1451 100644
75567 --- a/scripts/Makefile.host
75568 +++ b/scripts/Makefile.host
75569 @@ -31,6 +31,7 @@
75570 # Note: Shared libraries consisting of C++ files are not supported
75571
75572 __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
75573 +__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
75574
75575 # C code
75576 # Executables compiled from a single .c file
75577 @@ -54,6 +55,7 @@ host-cxxobjs := $(sort $(foreach m,$(host-cxxmulti),$($(m)-cxxobjs)))
75578 # Shared libaries (only .c supported)
75579 # Shared libraries (.so) - all .so files referenced in "xxx-objs"
75580 host-cshlib := $(sort $(filter %.so, $(host-cobjs)))
75581 +host-cshlib += $(sort $(filter %.so, $(__hostlibs)))
75582 # Remove .so files from "xxx-objs"
75583 host-cobjs := $(filter-out %.so,$(host-cobjs))
75584
75585 diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c
75586 index cb1f50c..cef2a7c 100644
75587 --- a/scripts/basic/fixdep.c
75588 +++ b/scripts/basic/fixdep.c
75589 @@ -161,7 +161,7 @@ static unsigned int strhash(const char *str, unsigned int sz)
75590 /*
75591 * Lookup a value in the configuration string.
75592 */
75593 -static int is_defined_config(const char *name, int len, unsigned int hash)
75594 +static int is_defined_config(const char *name, unsigned int len, unsigned int hash)
75595 {
75596 struct item *aux;
75597
75598 @@ -211,10 +211,10 @@ static void clear_config(void)
75599 /*
75600 * Record the use of a CONFIG_* word.
75601 */
75602 -static void use_config(const char *m, int slen)
75603 +static void use_config(const char *m, unsigned int slen)
75604 {
75605 unsigned int hash = strhash(m, slen);
75606 - int c, i;
75607 + unsigned int c, i;
75608
75609 if (is_defined_config(m, slen, hash))
75610 return;
75611 @@ -235,9 +235,9 @@ static void use_config(const char *m, int slen)
75612
75613 static void parse_config_file(const char *map, size_t len)
75614 {
75615 - const int *end = (const int *) (map + len);
75616 + const unsigned int *end = (const unsigned int *) (map + len);
75617 /* start at +1, so that p can never be < map */
75618 - const int *m = (const int *) map + 1;
75619 + const unsigned int *m = (const unsigned int *) map + 1;
75620 const char *p, *q;
75621
75622 for (; m < end; m++) {
75623 @@ -406,7 +406,7 @@ static void print_deps(void)
75624 static void traps(void)
75625 {
75626 static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
75627 - int *p = (int *)test;
75628 + unsigned int *p = (unsigned int *)test;
75629
75630 if (*p != INT_CONF) {
75631 fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
75632 diff --git a/scripts/gcc-plugin.sh b/scripts/gcc-plugin.sh
75633 new file mode 100644
75634 index 0000000..8729101
75635 --- /dev/null
75636 +++ b/scripts/gcc-plugin.sh
75637 @@ -0,0 +1,2 @@
75638 +#!/bin/sh
75639 +echo -e "#include \"gcc-plugin.h\"\n#include \"tree.h\"\n#include \"tm.h\"\n#include \"rtl.h\"" | $1 -x c -shared - -o /dev/null -I`$2 -print-file-name=plugin`/include >/dev/null 2>&1 && echo "y"
75640 diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
75641 index f936d1f..a66d95f 100644
75642 --- a/scripts/mod/file2alias.c
75643 +++ b/scripts/mod/file2alias.c
75644 @@ -72,7 +72,7 @@ static void device_id_check(const char *modname, const char *device_id,
75645 unsigned long size, unsigned long id_size,
75646 void *symval)
75647 {
75648 - int i;
75649 + unsigned int i;
75650
75651 if (size % id_size || size < id_size) {
75652 if (cross_build != 0)
75653 @@ -102,7 +102,7 @@ static void device_id_check(const char *modname, const char *device_id,
75654 /* USB is special because the bcdDevice can be matched against a numeric range */
75655 /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */
75656 static void do_usb_entry(struct usb_device_id *id,
75657 - unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
75658 + unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
75659 unsigned char range_lo, unsigned char range_hi,
75660 unsigned char max, struct module *mod)
75661 {
75662 @@ -203,7 +203,7 @@ static void do_usb_entry_multi(struct usb_device_id *id, struct module *mod)
75663 {
75664 unsigned int devlo, devhi;
75665 unsigned char chi, clo, max;
75666 - int ndigits;
75667 + unsigned int ndigits;
75668
75669 id->match_flags = TO_NATIVE(id->match_flags);
75670 id->idVendor = TO_NATIVE(id->idVendor);
75671 @@ -437,7 +437,7 @@ static void do_pnp_device_entry(void *symval, unsigned long size,
75672 for (i = 0; i < count; i++) {
75673 const char *id = (char *)devs[i].id;
75674 char acpi_id[sizeof(devs[0].id)];
75675 - int j;
75676 + unsigned int j;
75677
75678 buf_printf(&mod->dev_table_buf,
75679 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
75680 @@ -467,7 +467,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
75681
75682 for (j = 0; j < PNP_MAX_DEVICES; j++) {
75683 const char *id = (char *)card->devs[j].id;
75684 - int i2, j2;
75685 + unsigned int i2, j2;
75686 int dup = 0;
75687
75688 if (!id[0])
75689 @@ -493,7 +493,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
75690 /* add an individual alias for every device entry */
75691 if (!dup) {
75692 char acpi_id[sizeof(card->devs[0].id)];
75693 - int k;
75694 + unsigned int k;
75695
75696 buf_printf(&mod->dev_table_buf,
75697 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
75698 @@ -807,7 +807,7 @@ static void dmi_ascii_filter(char *d, const char *s)
75699 static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
75700 char *alias)
75701 {
75702 - int i, j;
75703 + unsigned int i, j;
75704
75705 sprintf(alias, "dmi*");
75706
75707 diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
75708 index 2bd594e..d43245e 100644
75709 --- a/scripts/mod/modpost.c
75710 +++ b/scripts/mod/modpost.c
75711 @@ -919,6 +919,7 @@ enum mismatch {
75712 ANY_INIT_TO_ANY_EXIT,
75713 ANY_EXIT_TO_ANY_INIT,
75714 EXPORT_TO_INIT_EXIT,
75715 + DATA_TO_TEXT
75716 };
75717
75718 struct sectioncheck {
75719 @@ -1027,6 +1028,12 @@ const struct sectioncheck sectioncheck[] = {
75720 .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
75721 .mismatch = EXPORT_TO_INIT_EXIT,
75722 .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
75723 +},
75724 +/* Do not reference code from writable data */
75725 +{
75726 + .fromsec = { DATA_SECTIONS, NULL },
75727 + .tosec = { TEXT_SECTIONS, NULL },
75728 + .mismatch = DATA_TO_TEXT
75729 }
75730 };
75731
75732 @@ -1149,10 +1156,10 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr,
75733 continue;
75734 if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
75735 continue;
75736 - if (sym->st_value == addr)
75737 - return sym;
75738 /* Find a symbol nearby - addr are maybe negative */
75739 d = sym->st_value - addr;
75740 + if (d == 0)
75741 + return sym;
75742 if (d < 0)
75743 d = addr - sym->st_value;
75744 if (d < distance) {
75745 @@ -1431,6 +1438,14 @@ static void report_sec_mismatch(const char *modname,
75746 tosym, prl_to, prl_to, tosym);
75747 free(prl_to);
75748 break;
75749 + case DATA_TO_TEXT:
75750 +/*
75751 + fprintf(stderr,
75752 + "The variable %s references\n"
75753 + "the %s %s%s%s\n",
75754 + fromsym, to, sec2annotation(tosec), tosym, to_p);
75755 +*/
75756 + break;
75757 }
75758 fprintf(stderr, "\n");
75759 }
75760 @@ -1656,7 +1671,7 @@ static void section_rel(const char *modname, struct elf_info *elf,
75761 static void check_sec_ref(struct module *mod, const char *modname,
75762 struct elf_info *elf)
75763 {
75764 - int i;
75765 + unsigned int i;
75766 Elf_Shdr *sechdrs = elf->sechdrs;
75767
75768 /* Walk through all sections */
75769 @@ -1754,7 +1769,7 @@ void __attribute__((format(printf, 2, 3))) buf_printf(struct buffer *buf,
75770 va_end(ap);
75771 }
75772
75773 -void buf_write(struct buffer *buf, const char *s, int len)
75774 +void buf_write(struct buffer *buf, const char *s, unsigned int len)
75775 {
75776 if (buf->size - buf->pos < len) {
75777 buf->size += len + SZ;
75778 @@ -1972,7 +1987,7 @@ static void write_if_changed(struct buffer *b, const char *fname)
75779 if (fstat(fileno(file), &st) < 0)
75780 goto close_write;
75781
75782 - if (st.st_size != b->pos)
75783 + if (st.st_size != (off_t)b->pos)
75784 goto close_write;
75785
75786 tmp = NOFAIL(malloc(b->pos));
75787 diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h
75788 index 2031119..b5433af 100644
75789 --- a/scripts/mod/modpost.h
75790 +++ b/scripts/mod/modpost.h
75791 @@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *expr);
75792
75793 struct buffer {
75794 char *p;
75795 - int pos;
75796 - int size;
75797 + unsigned int pos;
75798 + unsigned int size;
75799 };
75800
75801 void __attribute__((format(printf, 2, 3)))
75802 buf_printf(struct buffer *buf, const char *fmt, ...);
75803
75804 void
75805 -buf_write(struct buffer *buf, const char *s, int len);
75806 +buf_write(struct buffer *buf, const char *s, unsigned int len);
75807
75808 struct module {
75809 struct module *next;
75810 diff --git a/scripts/mod/sumversion.c b/scripts/mod/sumversion.c
75811 index 9dfcd6d..099068e 100644
75812 --- a/scripts/mod/sumversion.c
75813 +++ b/scripts/mod/sumversion.c
75814 @@ -470,7 +470,7 @@ static void write_version(const char *filename, const char *sum,
75815 goto out;
75816 }
75817
75818 - if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
75819 + if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
75820 warn("writing sum in %s failed: %s\n",
75821 filename, strerror(errno));
75822 goto out;
75823 diff --git a/scripts/pnmtologo.c b/scripts/pnmtologo.c
75824 index 5c11312..72742b5 100644
75825 --- a/scripts/pnmtologo.c
75826 +++ b/scripts/pnmtologo.c
75827 @@ -237,14 +237,14 @@ static void write_header(void)
75828 fprintf(out, " * Linux logo %s\n", logoname);
75829 fputs(" */\n\n", out);
75830 fputs("#include <linux/linux_logo.h>\n\n", out);
75831 - fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
75832 + fprintf(out, "static unsigned char %s_data[] = {\n",
75833 logoname);
75834 }
75835
75836 static void write_footer(void)
75837 {
75838 fputs("\n};\n\n", out);
75839 - fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
75840 + fprintf(out, "const struct linux_logo %s = {\n", logoname);
75841 fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
75842 fprintf(out, "\t.width\t\t= %d,\n", logo_width);
75843 fprintf(out, "\t.height\t\t= %d,\n", logo_height);
75844 @@ -374,7 +374,7 @@ static void write_logo_clut224(void)
75845 fputs("\n};\n\n", out);
75846
75847 /* write logo clut */
75848 - fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
75849 + fprintf(out, "static unsigned char %s_clut[] = {\n",
75850 logoname);
75851 write_hex_cnt = 0;
75852 for (i = 0; i < logo_clutsize; i++) {
75853 diff --git a/security/Kconfig b/security/Kconfig
75854 index 51bd5a0..8465ae6 100644
75855 --- a/security/Kconfig
75856 +++ b/security/Kconfig
75857 @@ -4,6 +4,626 @@
75858
75859 menu "Security options"
75860
75861 +source grsecurity/Kconfig
75862 +
75863 +menu "PaX"
75864 +
75865 + config ARCH_TRACK_EXEC_LIMIT
75866 + bool
75867 +
75868 + config PAX_KERNEXEC_PLUGIN
75869 + bool
75870 +
75871 + config PAX_PER_CPU_PGD
75872 + bool
75873 +
75874 + config TASK_SIZE_MAX_SHIFT
75875 + int
75876 + depends on X86_64
75877 + default 47 if !PAX_PER_CPU_PGD
75878 + default 42 if PAX_PER_CPU_PGD
75879 +
75880 + config PAX_ENABLE_PAE
75881 + bool
75882 + default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
75883 +
75884 +config PAX
75885 + bool "Enable various PaX features"
75886 + depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
75887 + help
75888 + This allows you to enable various PaX features. PaX adds
75889 + intrusion prevention mechanisms to the kernel that reduce
75890 + the risks posed by exploitable memory corruption bugs.
75891 +
75892 +menu "PaX Control"
75893 + depends on PAX
75894 +
75895 +config PAX_SOFTMODE
75896 + bool 'Support soft mode'
75897 + help
75898 + Enabling this option will allow you to run PaX in soft mode, that
75899 + is, PaX features will not be enforced by default, only on executables
75900 + marked explicitly. You must also enable PT_PAX_FLAGS or XATTR_PAX_FLAGS
75901 + support as they are the only way to mark executables for soft mode use.
75902 +
75903 + Soft mode can be activated by using the "pax_softmode=1" kernel command
75904 + line option on boot. Furthermore you can control various PaX features
75905 + at runtime via the entries in /proc/sys/kernel/pax.
75906 +
75907 +config PAX_EI_PAX
75908 + bool 'Use legacy ELF header marking'
75909 + help
75910 + Enabling this option will allow you to control PaX features on
75911 + a per executable basis via the 'chpax' utility available at
75912 + http://pax.grsecurity.net/. The control flags will be read from
75913 + an otherwise reserved part of the ELF header. This marking has
75914 + numerous drawbacks (no support for soft-mode, toolchain does not
75915 + know about the non-standard use of the ELF header) therefore it
75916 + has been deprecated in favour of PT_PAX_FLAGS and XATTR_PAX_FLAGS
75917 + support.
75918 +
75919 + If you have applications not marked by the PT_PAX_FLAGS ELF program
75920 + header and you cannot use XATTR_PAX_FLAGS then you MUST enable this
75921 + option otherwise they will not get any protection.
75922 +
75923 + Note that if you enable PT_PAX_FLAGS or XATTR_PAX_FLAGS marking
75924 + support as well, they will override the legacy EI_PAX marks.
75925 +
75926 +config PAX_PT_PAX_FLAGS
75927 + bool 'Use ELF program header marking'
75928 + help
75929 + Enabling this option will allow you to control PaX features on
75930 + a per executable basis via the 'paxctl' utility available at
75931 + http://pax.grsecurity.net/. The control flags will be read from
75932 + a PaX specific ELF program header (PT_PAX_FLAGS). This marking
75933 + has the benefits of supporting both soft mode and being fully
75934 + integrated into the toolchain (the binutils patch is available
75935 + from http://pax.grsecurity.net).
75936 +
75937 + If you have applications not marked by the PT_PAX_FLAGS ELF program
75938 + header then you MUST enable either XATTR_PAX_FLAGS or EI_PAX marking
75939 + support otherwise they will not get any protection.
75940 +
75941 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
75942 + must make sure that the marks are the same if a binary has both marks.
75943 +
75944 + Note that if you enable the legacy EI_PAX marking support as well,
75945 + the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
75946 +
75947 +config PAX_XATTR_PAX_FLAGS
75948 + bool 'Use filesystem extended attributes marking'
75949 + depends on EXPERT
75950 + select CIFS_XATTR if CIFS
75951 + select EXT2_FS_XATTR if EXT2_FS
75952 + select EXT3_FS_XATTR if EXT3_FS
75953 + select EXT4_FS_XATTR if EXT4_FS
75954 + select JFFS2_FS_XATTR if JFFS2_FS
75955 + select REISERFS_FS_XATTR if REISERFS_FS
75956 + select SQUASHFS_XATTR if SQUASHFS
75957 + select TMPFS_XATTR if TMPFS
75958 + select UBIFS_FS_XATTR if UBIFS_FS
75959 + help
75960 + Enabling this option will allow you to control PaX features on
75961 + a per executable basis via the 'setfattr' utility. The control
75962 + flags will be read from the user.pax.flags extended attribute of
75963 + the file. This marking has the benefit of supporting binary-only
75964 + applications that self-check themselves (e.g., skype) and would
75965 + not tolerate chpax/paxctl changes. The main drawback is that
75966 + extended attributes are not supported by some filesystems (e.g.,
75967 + isofs, udf, vfat) so copying files through such filesystems will
75968 + lose the extended attributes and these PaX markings.
75969 +
75970 + If you have applications not marked by the PT_PAX_FLAGS ELF program
75971 + header then you MUST enable either XATTR_PAX_FLAGS or EI_PAX marking
75972 + support otherwise they will not get any protection.
75973 +
75974 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
75975 + must make sure that the marks are the same if a binary has both marks.
75976 +
75977 + Note that if you enable the legacy EI_PAX marking support as well,
75978 + the EI_PAX marks will be overridden by the XATTR_PAX_FLAGS marks.
75979 +
75980 +choice
75981 + prompt 'MAC system integration'
75982 + default PAX_HAVE_ACL_FLAGS
75983 + help
75984 + Mandatory Access Control systems have the option of controlling
75985 + PaX flags on a per executable basis, choose the method supported
75986 + by your particular system.
75987 +
75988 + - "none": if your MAC system does not interact with PaX,
75989 + - "direct": if your MAC system defines pax_set_initial_flags() itself,
75990 + - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
75991 +
75992 + NOTE: this option is for developers/integrators only.
75993 +
75994 + config PAX_NO_ACL_FLAGS
75995 + bool 'none'
75996 +
75997 + config PAX_HAVE_ACL_FLAGS
75998 + bool 'direct'
75999 +
76000 + config PAX_HOOK_ACL_FLAGS
76001 + bool 'hook'
76002 +endchoice
76003 +
76004 +endmenu
76005 +
76006 +menu "Non-executable pages"
76007 + depends on PAX
76008 +
76009 +config PAX_NOEXEC
76010 + bool "Enforce non-executable pages"
76011 + depends on ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86
76012 + help
76013 + By design some architectures do not allow for protecting memory
76014 + pages against execution or even if they do, Linux does not make
76015 + use of this feature. In practice this means that if a page is
76016 + readable (such as the stack or heap) it is also executable.
76017 +
76018 + There is a well known exploit technique that makes use of this
76019 + fact and a common programming mistake where an attacker can
76020 + introduce code of his choice somewhere in the attacked program's
76021 + memory (typically the stack or the heap) and then execute it.
76022 +
76023 + If the attacked program was running with different (typically
76024 + higher) privileges than that of the attacker, then he can elevate
76025 + his own privilege level (e.g. get a root shell, write to files for
76026 + which he does not have write access to, etc).
76027 +
76028 + Enabling this option will let you choose from various features
76029 + that prevent the injection and execution of 'foreign' code in
76030 + a program.
76031 +
76032 + This will also break programs that rely on the old behaviour and
76033 + expect that dynamically allocated memory via the malloc() family
76034 + of functions is executable (which it is not). Notable examples
76035 + are the XFree86 4.x server, the java runtime and wine.
76036 +
76037 +config PAX_PAGEEXEC
76038 + bool "Paging based non-executable pages"
76039 + depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
76040 + select S390_SWITCH_AMODE if S390
76041 + select S390_EXEC_PROTECT if S390
76042 + select ARCH_TRACK_EXEC_LIMIT if X86_32
76043 + help
76044 + This implementation is based on the paging feature of the CPU.
76045 + On i386 without hardware non-executable bit support there is a
76046 + variable but usually low performance impact, however on Intel's
76047 + P4 core based CPUs it is very high so you should not enable this
76048 + for kernels meant to be used on such CPUs.
76049 +
76050 + On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
76051 + with hardware non-executable bit support there is no performance
76052 + impact, on ppc the impact is negligible.
76053 +
76054 + Note that several architectures require various emulations due to
76055 + badly designed userland ABIs, this will cause a performance impact
76056 + but will disappear as soon as userland is fixed. For example, ppc
76057 + userland MUST have been built with secure-plt by a recent toolchain.
76058 +
76059 +config PAX_SEGMEXEC
76060 + bool "Segmentation based non-executable pages"
76061 + depends on PAX_NOEXEC && X86_32
76062 + help
76063 + This implementation is based on the segmentation feature of the
76064 + CPU and has a very small performance impact, however applications
76065 + will be limited to a 1.5 GB address space instead of the normal
76066 + 3 GB.
76067 +
76068 +config PAX_EMUTRAMP
76069 + bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
76070 + default y if PARISC
76071 + help
76072 + There are some programs and libraries that for one reason or
76073 + another attempt to execute special small code snippets from
76074 + non-executable memory pages. Most notable examples are the
76075 + signal handler return code generated by the kernel itself and
76076 + the GCC trampolines.
76077 +
76078 + If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
76079 + such programs will no longer work under your kernel.
76080 +
76081 + As a remedy you can say Y here and use the 'chpax' or 'paxctl'
76082 + utilities to enable trampoline emulation for the affected programs
76083 + yet still have the protection provided by the non-executable pages.
76084 +
76085 + On parisc you MUST enable this option and EMUSIGRT as well, otherwise
76086 + your system will not even boot.
76087 +
76088 + Alternatively you can say N here and use the 'chpax' or 'paxctl'
76089 + utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
76090 + for the affected files.
76091 +
76092 + NOTE: enabling this feature *may* open up a loophole in the
76093 + protection provided by non-executable pages that an attacker
76094 + could abuse. Therefore the best solution is to not have any
76095 + files on your system that would require this option. This can
76096 + be achieved by not using libc5 (which relies on the kernel
76097 + signal handler return code) and not using or rewriting programs
76098 + that make use of the nested function implementation of GCC.
76099 + Skilled users can just fix GCC itself so that it implements
76100 + nested function calls in a way that does not interfere with PaX.
76101 +
76102 +config PAX_EMUSIGRT
76103 + bool "Automatically emulate sigreturn trampolines"
76104 + depends on PAX_EMUTRAMP && PARISC
76105 + default y
76106 + help
76107 + Enabling this option will have the kernel automatically detect
76108 + and emulate signal return trampolines executing on the stack
76109 + that would otherwise lead to task termination.
76110 +
76111 + This solution is intended as a temporary one for users with
76112 + legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
76113 + Modula-3 runtime, etc) or executables linked to such, basically
76114 + everything that does not specify its own SA_RESTORER function in
76115 + normal executable memory like glibc 2.1+ does.
76116 +
76117 + On parisc you MUST enable this option, otherwise your system will
76118 + not even boot.
76119 +
76120 + NOTE: this feature cannot be disabled on a per executable basis
76121 + and since it *does* open up a loophole in the protection provided
76122 + by non-executable pages, the best solution is to not have any
76123 + files on your system that would require this option.
76124 +
76125 +config PAX_MPROTECT
76126 + bool "Restrict mprotect()"
76127 + depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
76128 + help
76129 + Enabling this option will prevent programs from
76130 + - changing the executable status of memory pages that were
76131 + not originally created as executable,
76132 + - making read-only executable pages writable again,
76133 + - creating executable pages from anonymous memory,
76134 + - making read-only-after-relocations (RELRO) data pages writable again.
76135 +
76136 + You should say Y here to complete the protection provided by
76137 + the enforcement of non-executable pages.
76138 +
76139 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
76140 + this feature on a per file basis.
76141 +
76142 +config PAX_MPROTECT_COMPAT
76143 + bool "Use legacy/compat protection demoting (read help)"
76144 + depends on PAX_MPROTECT
76145 + default n
76146 + help
76147 + The current implementation of PAX_MPROTECT denies RWX allocations/mprotects
76148 + by sending the proper error code to the application. For some broken
76149 + userland, this can cause problems with Python or other applications. The
76150 + current implementation however allows for applications like clamav to
76151 + detect if JIT compilation/execution is allowed and to fall back gracefully
76152 + to an interpreter-based mode if it does not. While we encourage everyone
76153 + to use the current implementation as-is and push upstream to fix broken
76154 + userland (note that the RWX logging option can assist with this), in some
76155 + environments this may not be possible. Having to disable MPROTECT
76156 + completely on certain binaries reduces the security benefit of PaX,
76157 + so this option is provided for those environments to revert to the old
76158 + behavior.
76159 +
76160 +config PAX_ELFRELOCS
76161 + bool "Allow ELF text relocations (read help)"
76162 + depends on PAX_MPROTECT
76163 + default n
76164 + help
76165 + Non-executable pages and mprotect() restrictions are effective
76166 + in preventing the introduction of new executable code into an
76167 + attacked task's address space. There remain only two venues
76168 + for this kind of attack: if the attacker can execute already
76169 + existing code in the attacked task then he can either have it
76170 + create and mmap() a file containing his code or have it mmap()
76171 + an already existing ELF library that does not have position
76172 + independent code in it and use mprotect() on it to make it
76173 + writable and copy his code there. While protecting against
76174 + the former approach is beyond PaX, the latter can be prevented
76175 + by having only PIC ELF libraries on one's system (which do not
76176 + need to relocate their code). If you are sure this is your case,
76177 + as is the case with all modern Linux distributions, then leave
76178 + this option disabled. You should say 'n' here.
76179 +
76180 +config PAX_ETEXECRELOCS
76181 + bool "Allow ELF ET_EXEC text relocations"
76182 + depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
76183 + select PAX_ELFRELOCS
76184 + default y
76185 + help
76186 + On some architectures there are incorrectly created applications
76187 + that require text relocations and would not work without enabling
76188 + this option. If you are an alpha, ia64 or parisc user, you should
76189 + enable this option and disable it once you have made sure that
76190 + none of your applications need it.
76191 +
76192 +config PAX_EMUPLT
76193 + bool "Automatically emulate ELF PLT"
76194 + depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
76195 + default y
76196 + help
76197 + Enabling this option will have the kernel automatically detect
76198 + and emulate the Procedure Linkage Table entries in ELF files.
76199 + On some architectures such entries are in writable memory, and
76200 + become non-executable leading to task termination. Therefore
76201 + it is mandatory that you enable this option on alpha, parisc,
76202 + sparc and sparc64, otherwise your system would not even boot.
76203 +
76204 + NOTE: this feature *does* open up a loophole in the protection
76205 + provided by the non-executable pages, therefore the proper
76206 + solution is to modify the toolchain to produce a PLT that does
76207 + not need to be writable.
76208 +
76209 +config PAX_DLRESOLVE
76210 + bool 'Emulate old glibc resolver stub'
76211 + depends on PAX_EMUPLT && SPARC
76212 + default n
76213 + help
76214 + This option is needed if userland has an old glibc (before 2.4)
76215 + that puts a 'save' instruction into the runtime generated resolver
76216 + stub that needs special emulation.
76217 +
76218 +config PAX_KERNEXEC
76219 + bool "Enforce non-executable kernel pages"
76220 + depends on (PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN
76221 + select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
76222 + select PAX_KERNEXEC_PLUGIN if X86_64
76223 + help
76224 + This is the kernel land equivalent of PAGEEXEC and MPROTECT,
76225 + that is, enabling this option will make it harder to inject
76226 + and execute 'foreign' code in kernel memory itself.
76227 +
76228 + Note that on x86_64 kernels there is a known regression when
76229 + this feature and KVM/VMX are both enabled in the host kernel.
76230 +
76231 +choice
76232 + prompt "Return Address Instrumentation Method"
76233 + default PAX_KERNEXEC_PLUGIN_METHOD_BTS
76234 + depends on PAX_KERNEXEC_PLUGIN
76235 + help
76236 + Select the method used to instrument function pointer dereferences.
76237 + Note that binary modules cannot be instrumented by this approach.
76238 +
76239 + config PAX_KERNEXEC_PLUGIN_METHOD_BTS
76240 + bool "bts"
76241 + help
76242 + This method is compatible with binary only modules but has
76243 + a higher runtime overhead.
76244 +
76245 + config PAX_KERNEXEC_PLUGIN_METHOD_OR
76246 + bool "or"
76247 + depends on !PARAVIRT
76248 + help
76249 + This method is incompatible with binary only modules but has
76250 + a lower runtime overhead.
76251 +endchoice
76252 +
76253 +config PAX_KERNEXEC_PLUGIN_METHOD
76254 + string
76255 + default "bts" if PAX_KERNEXEC_PLUGIN_METHOD_BTS
76256 + default "or" if PAX_KERNEXEC_PLUGIN_METHOD_OR
76257 + default ""
76258 +
76259 +config PAX_KERNEXEC_MODULE_TEXT
76260 + int "Minimum amount of memory reserved for module code"
76261 + default "4"
76262 + depends on PAX_KERNEXEC && X86_32 && MODULES
76263 + help
76264 + Due to implementation details the kernel must reserve a fixed
76265 + amount of memory for module code at compile time that cannot be
76266 + changed at runtime. Here you can specify the minimum amount
76267 + in MB that will be reserved. Due to the same implementation
76268 + details this size will always be rounded up to the next 2/4 MB
76269 + boundary (depends on PAE) so the actually available memory for
76270 + module code will usually be more than this minimum.
76271 +
76272 + The default 4 MB should be enough for most users but if you have
76273 + an excessive number of modules (e.g., most distribution configs
76274 + compile many drivers as modules) or use huge modules such as
76275 + nvidia's kernel driver, you will need to adjust this amount.
76276 + A good rule of thumb is to look at your currently loaded kernel
76277 + modules and add up their sizes.
76278 +
76279 +endmenu
76280 +
76281 +menu "Address Space Layout Randomization"
76282 + depends on PAX
76283 +
76284 +config PAX_ASLR
76285 + bool "Address Space Layout Randomization"
76286 + help
76287 + Many if not most exploit techniques rely on the knowledge of
76288 + certain addresses in the attacked program. The following options
76289 + will allow the kernel to apply a certain amount of randomization
76290 + to specific parts of the program thereby forcing an attacker to
76291 + guess them in most cases. Any failed guess will most likely crash
76292 + the attacked program which allows the kernel to detect such attempts
76293 + and react on them. PaX itself provides no reaction mechanisms,
76294 + instead it is strongly encouraged that you make use of Nergal's
76295 + segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
76296 + (http://www.grsecurity.net/) built-in crash detection features or
76297 + develop one yourself.
76298 +
76299 + By saying Y here you can choose to randomize the following areas:
76300 + - top of the task's kernel stack
76301 + - top of the task's userland stack
76302 + - base address for mmap() requests that do not specify one
76303 + (this includes all libraries)
76304 + - base address of the main executable
76305 +
76306 + It is strongly recommended to say Y here as address space layout
76307 + randomization has negligible impact on performance yet it provides
76308 + a very effective protection.
76309 +
76310 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
76311 + this feature on a per file basis.
76312 +
76313 +config PAX_RANDKSTACK
76314 + bool "Randomize kernel stack base"
76315 + depends on X86_TSC && X86
76316 + help
76317 + By saying Y here the kernel will randomize every task's kernel
76318 + stack on every system call. This will not only force an attacker
76319 + to guess it but also prevent him from making use of possible
76320 + leaked information about it.
76321 +
76322 + Since the kernel stack is a rather scarce resource, randomization
76323 + may cause unexpected stack overflows, therefore you should very
76324 + carefully test your system. Note that once enabled in the kernel
76325 + configuration, this feature cannot be disabled on a per file basis.
76326 +
76327 +config PAX_RANDUSTACK
76328 + bool "Randomize user stack base"
76329 + depends on PAX_ASLR
76330 + help
76331 + By saying Y here the kernel will randomize every task's userland
76332 + stack. The randomization is done in two steps where the second
76333 + one may apply a big amount of shift to the top of the stack and
76334 + cause problems for programs that want to use lots of memory (more
76335 + than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
76336 + For this reason the second step can be controlled by 'chpax' or
76337 + 'paxctl' on a per file basis.
76338 +
76339 +config PAX_RANDMMAP
76340 + bool "Randomize mmap() base"
76341 + depends on PAX_ASLR
76342 + help
76343 + By saying Y here the kernel will use a randomized base address for
76344 + mmap() requests that do not specify one themselves. As a result
76345 + all dynamically loaded libraries will appear at random addresses
76346 + and therefore be harder to exploit by a technique where an attacker
76347 + attempts to execute library code for his purposes (e.g. spawn a
76348 + shell from an exploited program that is running at an elevated
76349 + privilege level).
76350 +
76351 + Furthermore, if a program is relinked as a dynamic ELF file, its
76352 + base address will be randomized as well, completing the full
76353 + randomization of the address space layout. Attacking such programs
76354 + becomes a guess game. You can find an example of doing this at
76355 + http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
76356 + http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
76357 +
76358 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
76359 + feature on a per file basis.
76360 +
76361 +endmenu
76362 +
76363 +menu "Miscellaneous hardening features"
76364 +
76365 +config PAX_MEMORY_SANITIZE
76366 + bool "Sanitize all freed memory"
76367 + help
76368 + By saying Y here the kernel will erase memory pages as soon as they
76369 + are freed. This in turn reduces the lifetime of data stored in the
76370 + pages, making it less likely that sensitive information such as
76371 + passwords, cryptographic secrets, etc stay in memory for too long.
76372 +
76373 + This is especially useful for programs whose runtime is short, long
76374 + lived processes and the kernel itself benefit from this as long as
76375 + they operate on whole memory pages and ensure timely freeing of pages
76376 + that may hold sensitive information.
76377 +
76378 + The tradeoff is performance impact, on a single CPU system kernel
76379 + compilation sees a 3% slowdown, other systems and workloads may vary
76380 + and you are advised to test this feature on your expected workload
76381 + before deploying it.
76382 +
76383 + Note that this feature does not protect data stored in live pages,
76384 + e.g., process memory swapped to disk may stay there for a long time.
76385 +
76386 +config PAX_MEMORY_STACKLEAK
76387 + bool "Sanitize kernel stack"
76388 + depends on X86
76389 + help
76390 + By saying Y here the kernel will erase the kernel stack before it
76391 + returns from a system call. This in turn reduces the information
76392 + that a kernel stack leak bug can reveal.
76393 +
76394 + Note that such a bug can still leak information that was put on
76395 + the stack by the current system call (the one eventually triggering
76396 + the bug) but traces of earlier system calls on the kernel stack
76397 + cannot leak anymore.
76398 +
76399 + The tradeoff is performance impact: on a single CPU system kernel
76400 + compilation sees a 1% slowdown, other systems and workloads may vary
76401 + and you are advised to test this feature on your expected workload
76402 + before deploying it.
76403 +
76404 + Note: full support for this feature requires gcc with plugin support
76405 + so make sure your compiler is at least gcc 4.5.0. Using older gcc
76406 + versions means that functions with large enough stack frames may
76407 + leave uninitialized memory behind that may be exposed to a later
76408 + syscall leaking the stack.
76409 +
76410 +config PAX_MEMORY_UDEREF
76411 + bool "Prevent invalid userland pointer dereference"
76412 + depends on X86 && !UML_X86 && !XEN
76413 + select PAX_PER_CPU_PGD if X86_64
76414 + help
76415 + By saying Y here the kernel will be prevented from dereferencing
76416 + userland pointers in contexts where the kernel expects only kernel
76417 + pointers. This is both a useful runtime debugging feature and a
76418 + security measure that prevents exploiting a class of kernel bugs.
76419 +
76420 + The tradeoff is that some virtualization solutions may experience
76421 + a huge slowdown and therefore you should not enable this feature
76422 + for kernels meant to run in such environments. Whether a given VM
76423 + solution is affected or not is best determined by simply trying it
76424 + out, the performance impact will be obvious right on boot as this
76425 + mechanism engages from very early on. A good rule of thumb is that
76426 + VMs running on CPUs without hardware virtualization support (i.e.,
76427 + the majority of IA-32 CPUs) will likely experience the slowdown.
76428 +
76429 +config PAX_REFCOUNT
76430 + bool "Prevent various kernel object reference counter overflows"
76431 + depends on GRKERNSEC && (X86 || SPARC64)
76432 + help
76433 + By saying Y here the kernel will detect and prevent overflowing
76434 + various (but not all) kinds of object reference counters. Such
76435 + overflows can normally occur due to bugs only and are often, if
76436 + not always, exploitable.
76437 +
76438 + The tradeoff is that data structures protected by an overflowed
76439 + refcount will never be freed and therefore will leak memory. Note
76440 + that this leak also happens even without this protection but in
76441 + that case the overflow can eventually trigger the freeing of the
76442 + data structure while it is still being used elsewhere, resulting
76443 + in the exploitable situation that this feature prevents.
76444 +
76445 + Since this has a negligible performance impact, you should enable
76446 + this feature.
76447 +
76448 +config PAX_USERCOPY
76449 + bool "Harden heap object copies between kernel and userland"
76450 + depends on X86 || PPC || SPARC || ARM
76451 + depends on GRKERNSEC && (SLAB || SLUB || SLOB)
76452 + help
76453 + By saying Y here the kernel will enforce the size of heap objects
76454 + when they are copied in either direction between the kernel and
76455 + userland, even if only a part of the heap object is copied.
76456 +
76457 + Specifically, this checking prevents information leaking from the
76458 + kernel heap during kernel to userland copies (if the kernel heap
76459 + object is otherwise fully initialized) and prevents kernel heap
76460 + overflows during userland to kernel copies.
76461 +
76462 + Note that the current implementation provides the strictest bounds
76463 + checks for the SLUB allocator.
76464 +
76465 + Enabling this option also enables per-slab cache protection against
76466 + data in a given cache being copied into/out of via userland
76467 + accessors. Though the whitelist of regions will be reduced over
76468 + time, it notably protects important data structures like task structs.
76469 +
76470 + If frame pointers are enabled on x86, this option will also restrict
76471 + copies into and out of the kernel stack to local variables within a
76472 + single frame.
76473 +
76474 + Since this has a negligible performance impact, you should enable
76475 + this feature.
76476 +
76477 +endmenu
76478 +
76479 +endmenu
76480 +
76481 config KEYS
76482 bool "Enable access key retention support"
76483 help
76484 @@ -169,7 +789,7 @@ config INTEL_TXT
76485 config LSM_MMAP_MIN_ADDR
76486 int "Low address space for LSM to protect from user allocation"
76487 depends on SECURITY && SECURITY_SELINUX
76488 - default 32768 if ARM
76489 + default 32768 if ALPHA || ARM || PARISC || SPARC32
76490 default 65536
76491 help
76492 This is the portion of low virtual memory which should be protected
76493 diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
76494 index 3783202..1852837 100644
76495 --- a/security/apparmor/lsm.c
76496 +++ b/security/apparmor/lsm.c
76497 @@ -621,7 +621,7 @@ static int apparmor_task_setrlimit(struct task_struct *task,
76498 return error;
76499 }
76500
76501 -static struct security_operations apparmor_ops = {
76502 +static struct security_operations apparmor_ops __read_only = {
76503 .name = "apparmor",
76504
76505 .ptrace_access_check = apparmor_ptrace_access_check,
76506 diff --git a/security/commoncap.c b/security/commoncap.c
76507 index ee4f848..a320c64 100644
76508 --- a/security/commoncap.c
76509 +++ b/security/commoncap.c
76510 @@ -28,6 +28,7 @@
76511 #include <linux/prctl.h>
76512 #include <linux/securebits.h>
76513 #include <linux/user_namespace.h>
76514 +#include <net/sock.h>
76515
76516 /*
76517 * If a non-root user executes a setuid-root binary in
76518 @@ -58,7 +59,7 @@ int cap_netlink_send(struct sock *sk, struct sk_buff *skb)
76519
76520 int cap_netlink_recv(struct sk_buff *skb, int cap)
76521 {
76522 - if (!cap_raised(current_cap(), cap))
76523 + if (!cap_raised(current_cap(), cap) || !gr_is_capable(cap))
76524 return -EPERM;
76525 return 0;
76526 }
76527 @@ -579,6 +580,9 @@ int cap_bprm_secureexec(struct linux_binprm *bprm)
76528 {
76529 const struct cred *cred = current_cred();
76530
76531 + if (gr_acl_enable_at_secure())
76532 + return 1;
76533 +
76534 if (cred->uid != 0) {
76535 if (bprm->cap_effective)
76536 return 1;
76537 diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
76538 index 3ccf7ac..d73ad64 100644
76539 --- a/security/integrity/ima/ima.h
76540 +++ b/security/integrity/ima/ima.h
76541 @@ -86,8 +86,8 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
76542 extern spinlock_t ima_queue_lock;
76543
76544 struct ima_h_table {
76545 - atomic_long_t len; /* number of stored measurements in the list */
76546 - atomic_long_t violations;
76547 + atomic_long_unchecked_t len; /* number of stored measurements in the list */
76548 + atomic_long_unchecked_t violations;
76549 struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
76550 };
76551 extern struct ima_h_table ima_htable;
76552 diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
76553 index 88a2788..581ab92 100644
76554 --- a/security/integrity/ima/ima_api.c
76555 +++ b/security/integrity/ima/ima_api.c
76556 @@ -75,7 +75,7 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
76557 int result;
76558
76559 /* can overflow, only indicator */
76560 - atomic_long_inc(&ima_htable.violations);
76561 + atomic_long_inc_unchecked(&ima_htable.violations);
76562
76563 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
76564 if (!entry) {
76565 diff --git a/security/integrity/ima/ima_audit.c b/security/integrity/ima/ima_audit.c
76566 index c5c5a72..2ad942f 100644
76567 --- a/security/integrity/ima/ima_audit.c
76568 +++ b/security/integrity/ima/ima_audit.c
76569 @@ -56,9 +56,11 @@ void integrity_audit_msg(int audit_msgno, struct inode *inode,
76570 audit_log_format(ab, " name=");
76571 audit_log_untrustedstring(ab, fname);
76572 }
76573 - if (inode)
76574 - audit_log_format(ab, " dev=%s ino=%lu",
76575 - inode->i_sb->s_id, inode->i_ino);
76576 + if (inode) {
76577 + audit_log_format(ab, " dev=");
76578 + audit_log_untrustedstring(ab, inode->i_sb->s_id);
76579 + audit_log_format(ab, " ino=%lu", inode->i_ino);
76580 + }
76581 audit_log_format(ab, " res=%d", !result ? 0 : 1);
76582 audit_log_end(ab);
76583 }
76584 diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
76585 index e1aa2b4..52027bf 100644
76586 --- a/security/integrity/ima/ima_fs.c
76587 +++ b/security/integrity/ima/ima_fs.c
76588 @@ -28,12 +28,12 @@
76589 static int valid_policy = 1;
76590 #define TMPBUFLEN 12
76591 static ssize_t ima_show_htable_value(char __user *buf, size_t count,
76592 - loff_t *ppos, atomic_long_t *val)
76593 + loff_t *ppos, atomic_long_unchecked_t *val)
76594 {
76595 char tmpbuf[TMPBUFLEN];
76596 ssize_t len;
76597
76598 - len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
76599 + len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
76600 return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
76601 }
76602
76603 diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
76604 index 55a6271..ad829c3 100644
76605 --- a/security/integrity/ima/ima_queue.c
76606 +++ b/security/integrity/ima/ima_queue.c
76607 @@ -81,7 +81,7 @@ static int ima_add_digest_entry(struct ima_template_entry *entry)
76608 INIT_LIST_HEAD(&qe->later);
76609 list_add_tail_rcu(&qe->later, &ima_measurements);
76610
76611 - atomic_long_inc(&ima_htable.len);
76612 + atomic_long_inc_unchecked(&ima_htable.len);
76613 key = ima_hash_key(entry->digest);
76614 hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
76615 return 0;
76616 diff --git a/security/keys/compat.c b/security/keys/compat.c
76617 index 4c48e13..7abdac9 100644
76618 --- a/security/keys/compat.c
76619 +++ b/security/keys/compat.c
76620 @@ -44,7 +44,7 @@ long compat_keyctl_instantiate_key_iov(
76621 if (ret == 0)
76622 goto no_payload_free;
76623
76624 - ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
76625 + ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
76626
76627 if (iov != iovstack)
76628 kfree(iov);
76629 diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
76630 index 0b3f5d7..892c8a6 100644
76631 --- a/security/keys/keyctl.c
76632 +++ b/security/keys/keyctl.c
76633 @@ -921,7 +921,7 @@ static int keyctl_change_reqkey_auth(struct key *key)
76634 /*
76635 * Copy the iovec data from userspace
76636 */
76637 -static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
76638 +static long copy_from_user_iovec(void *buffer, const struct iovec __user *iov,
76639 unsigned ioc)
76640 {
76641 for (; ioc > 0; ioc--) {
76642 @@ -943,7 +943,7 @@ static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
76643 * If successful, 0 will be returned.
76644 */
76645 long keyctl_instantiate_key_common(key_serial_t id,
76646 - const struct iovec *payload_iov,
76647 + const struct iovec __user *payload_iov,
76648 unsigned ioc,
76649 size_t plen,
76650 key_serial_t ringid)
76651 @@ -1038,7 +1038,7 @@ long keyctl_instantiate_key(key_serial_t id,
76652 [0].iov_len = plen
76653 };
76654
76655 - return keyctl_instantiate_key_common(id, iov, 1, plen, ringid);
76656 + return keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, 1, plen, ringid);
76657 }
76658
76659 return keyctl_instantiate_key_common(id, NULL, 0, 0, ringid);
76660 @@ -1071,7 +1071,7 @@ long keyctl_instantiate_key_iov(key_serial_t id,
76661 if (ret == 0)
76662 goto no_payload_free;
76663
76664 - ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
76665 + ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
76666
76667 if (iov != iovstack)
76668 kfree(iov);
76669 diff --git a/security/keys/keyring.c b/security/keys/keyring.c
76670 index 37a7f3b..86dc19f 100644
76671 --- a/security/keys/keyring.c
76672 +++ b/security/keys/keyring.c
76673 @@ -214,15 +214,15 @@ static long keyring_read(const struct key *keyring,
76674 ret = -EFAULT;
76675
76676 for (loop = 0; loop < klist->nkeys; loop++) {
76677 + key_serial_t serial;
76678 key = klist->keys[loop];
76679 + serial = key->serial;
76680
76681 tmp = sizeof(key_serial_t);
76682 if (tmp > buflen)
76683 tmp = buflen;
76684
76685 - if (copy_to_user(buffer,
76686 - &key->serial,
76687 - tmp) != 0)
76688 + if (copy_to_user(buffer, &serial, tmp))
76689 goto error;
76690
76691 buflen -= tmp;
76692 diff --git a/security/lsm_audit.c b/security/lsm_audit.c
76693 index 893af8a..ba9237c 100644
76694 --- a/security/lsm_audit.c
76695 +++ b/security/lsm_audit.c
76696 @@ -234,10 +234,11 @@ static void dump_common_audit_data(struct audit_buffer *ab,
76697 audit_log_d_path(ab, "path=", &a->u.path);
76698
76699 inode = a->u.path.dentry->d_inode;
76700 - if (inode)
76701 - audit_log_format(ab, " dev=%s ino=%lu",
76702 - inode->i_sb->s_id,
76703 - inode->i_ino);
76704 + if (inode) {
76705 + audit_log_format(ab, " dev=");
76706 + audit_log_untrustedstring(ab, inode->i_sb->s_id);
76707 + audit_log_format(ab, " ino=%lu", inode->i_ino);
76708 + }
76709 break;
76710 }
76711 case LSM_AUDIT_DATA_DENTRY: {
76712 @@ -247,10 +248,11 @@ static void dump_common_audit_data(struct audit_buffer *ab,
76713 audit_log_untrustedstring(ab, a->u.dentry->d_name.name);
76714
76715 inode = a->u.dentry->d_inode;
76716 - if (inode)
76717 - audit_log_format(ab, " dev=%s ino=%lu",
76718 - inode->i_sb->s_id,
76719 - inode->i_ino);
76720 + if (inode) {
76721 + audit_log_format(ab, " dev=");
76722 + audit_log_untrustedstring(ab, inode->i_sb->s_id);
76723 + audit_log_format(ab, " ino=%lu", inode->i_ino);
76724 + }
76725 break;
76726 }
76727 case LSM_AUDIT_DATA_INODE: {
76728 @@ -265,8 +267,9 @@ static void dump_common_audit_data(struct audit_buffer *ab,
76729 dentry->d_name.name);
76730 dput(dentry);
76731 }
76732 - audit_log_format(ab, " dev=%s ino=%lu", inode->i_sb->s_id,
76733 - inode->i_ino);
76734 + audit_log_format(ab, " dev=");
76735 + audit_log_untrustedstring(ab, inode->i_sb->s_id);
76736 + audit_log_format(ab, " ino=%lu", inode->i_ino);
76737 break;
76738 }
76739 case LSM_AUDIT_DATA_TASK:
76740 diff --git a/security/min_addr.c b/security/min_addr.c
76741 index f728728..6457a0c 100644
76742 --- a/security/min_addr.c
76743 +++ b/security/min_addr.c
76744 @@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR;
76745 */
76746 static void update_mmap_min_addr(void)
76747 {
76748 +#ifndef SPARC
76749 #ifdef CONFIG_LSM_MMAP_MIN_ADDR
76750 if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
76751 mmap_min_addr = dac_mmap_min_addr;
76752 @@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
76753 #else
76754 mmap_min_addr = dac_mmap_min_addr;
76755 #endif
76756 +#endif
76757 }
76758
76759 /*
76760 diff --git a/security/security.c b/security/security.c
76761 index e2f684a..8d62ef5 100644
76762 --- a/security/security.c
76763 +++ b/security/security.c
76764 @@ -26,8 +26,8 @@
76765 static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1] =
76766 CONFIG_DEFAULT_SECURITY;
76767
76768 -static struct security_operations *security_ops;
76769 -static struct security_operations default_security_ops = {
76770 +static struct security_operations *security_ops __read_only;
76771 +static struct security_operations default_security_ops __read_only = {
76772 .name = "default",
76773 };
76774
76775 @@ -68,7 +68,9 @@ int __init security_init(void)
76776
76777 void reset_security_ops(void)
76778 {
76779 + pax_open_kernel();
76780 security_ops = &default_security_ops;
76781 + pax_close_kernel();
76782 }
76783
76784 /* Save user chosen LSM */
76785 diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
76786 index 1126c10..effb32b 100644
76787 --- a/security/selinux/hooks.c
76788 +++ b/security/selinux/hooks.c
76789 @@ -94,8 +94,6 @@
76790
76791 #define NUM_SEL_MNT_OPTS 5
76792
76793 -extern struct security_operations *security_ops;
76794 -
76795 /* SECMARK reference count */
76796 static atomic_t selinux_secmark_refcount = ATOMIC_INIT(0);
76797
76798 @@ -5449,7 +5447,7 @@ static int selinux_key_getsecurity(struct key *key, char **_buffer)
76799
76800 #endif
76801
76802 -static struct security_operations selinux_ops = {
76803 +static struct security_operations selinux_ops __read_only = {
76804 .name = "selinux",
76805
76806 .ptrace_access_check = selinux_ptrace_access_check,
76807 diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h
76808 index b43813c..74be837 100644
76809 --- a/security/selinux/include/xfrm.h
76810 +++ b/security/selinux/include/xfrm.h
76811 @@ -48,7 +48,7 @@ int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall);
76812
76813 static inline void selinux_xfrm_notify_policyload(void)
76814 {
76815 - atomic_inc(&flow_cache_genid);
76816 + atomic_inc_unchecked(&flow_cache_genid);
76817 }
76818 #else
76819 static inline int selinux_xfrm_enabled(void)
76820 diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
76821 index 7db62b4..ee4d949 100644
76822 --- a/security/smack/smack_lsm.c
76823 +++ b/security/smack/smack_lsm.c
76824 @@ -3481,7 +3481,7 @@ static int smack_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
76825 return 0;
76826 }
76827
76828 -struct security_operations smack_ops = {
76829 +struct security_operations smack_ops __read_only = {
76830 .name = "smack",
76831
76832 .ptrace_access_check = smack_ptrace_access_check,
76833 diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
76834 index 4b327b6..646c57a 100644
76835 --- a/security/tomoyo/tomoyo.c
76836 +++ b/security/tomoyo/tomoyo.c
76837 @@ -504,7 +504,7 @@ static int tomoyo_socket_sendmsg(struct socket *sock, struct msghdr *msg,
76838 * tomoyo_security_ops is a "struct security_operations" which is used for
76839 * registering TOMOYO.
76840 */
76841 -static struct security_operations tomoyo_security_ops = {
76842 +static struct security_operations tomoyo_security_ops __read_only = {
76843 .name = "tomoyo",
76844 .cred_alloc_blank = tomoyo_cred_alloc_blank,
76845 .cred_prepare = tomoyo_cred_prepare,
76846 diff --git a/sound/aoa/codecs/onyx.c b/sound/aoa/codecs/onyx.c
76847 index 762af68..7103453 100644
76848 --- a/sound/aoa/codecs/onyx.c
76849 +++ b/sound/aoa/codecs/onyx.c
76850 @@ -54,7 +54,7 @@ struct onyx {
76851 spdif_locked:1,
76852 analog_locked:1,
76853 original_mute:2;
76854 - int open_count;
76855 + local_t open_count;
76856 struct codec_info *codec_info;
76857
76858 /* mutex serializes concurrent access to the device
76859 @@ -753,7 +753,7 @@ static int onyx_open(struct codec_info_item *cii,
76860 struct onyx *onyx = cii->codec_data;
76861
76862 mutex_lock(&onyx->mutex);
76863 - onyx->open_count++;
76864 + local_inc(&onyx->open_count);
76865 mutex_unlock(&onyx->mutex);
76866
76867 return 0;
76868 @@ -765,8 +765,7 @@ static int onyx_close(struct codec_info_item *cii,
76869 struct onyx *onyx = cii->codec_data;
76870
76871 mutex_lock(&onyx->mutex);
76872 - onyx->open_count--;
76873 - if (!onyx->open_count)
76874 + if (local_dec_and_test(&onyx->open_count))
76875 onyx->spdif_locked = onyx->analog_locked = 0;
76876 mutex_unlock(&onyx->mutex);
76877
76878 diff --git a/sound/aoa/codecs/onyx.h b/sound/aoa/codecs/onyx.h
76879 index ffd2025..df062c9 100644
76880 --- a/sound/aoa/codecs/onyx.h
76881 +++ b/sound/aoa/codecs/onyx.h
76882 @@ -11,6 +11,7 @@
76883 #include <linux/i2c.h>
76884 #include <asm/pmac_low_i2c.h>
76885 #include <asm/prom.h>
76886 +#include <asm/local.h>
76887
76888 /* PCM3052 register definitions */
76889
76890 diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
76891 index 3cc4b86..af0a951 100644
76892 --- a/sound/core/oss/pcm_oss.c
76893 +++ b/sound/core/oss/pcm_oss.c
76894 @@ -1189,10 +1189,10 @@ snd_pcm_sframes_t snd_pcm_oss_write3(struct snd_pcm_substream *substream, const
76895 if (in_kernel) {
76896 mm_segment_t fs;
76897 fs = snd_enter_user();
76898 - ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
76899 + ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
76900 snd_leave_user(fs);
76901 } else {
76902 - ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
76903 + ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
76904 }
76905 if (ret != -EPIPE && ret != -ESTRPIPE)
76906 break;
76907 @@ -1234,10 +1234,10 @@ snd_pcm_sframes_t snd_pcm_oss_read3(struct snd_pcm_substream *substream, char *p
76908 if (in_kernel) {
76909 mm_segment_t fs;
76910 fs = snd_enter_user();
76911 - ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
76912 + ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
76913 snd_leave_user(fs);
76914 } else {
76915 - ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
76916 + ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
76917 }
76918 if (ret == -EPIPE) {
76919 if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
76920 @@ -1337,7 +1337,7 @@ static ssize_t snd_pcm_oss_write2(struct snd_pcm_substream *substream, const cha
76921 struct snd_pcm_plugin_channel *channels;
76922 size_t oss_frame_bytes = (runtime->oss.plugin_first->src_width * runtime->oss.plugin_first->src_format.channels) / 8;
76923 if (!in_kernel) {
76924 - if (copy_from_user(runtime->oss.buffer, (const char __force __user *)buf, bytes))
76925 + if (copy_from_user(runtime->oss.buffer, (const char __force_user *)buf, bytes))
76926 return -EFAULT;
76927 buf = runtime->oss.buffer;
76928 }
76929 @@ -1407,7 +1407,7 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
76930 }
76931 } else {
76932 tmp = snd_pcm_oss_write2(substream,
76933 - (const char __force *)buf,
76934 + (const char __force_kernel *)buf,
76935 runtime->oss.period_bytes, 0);
76936 if (tmp <= 0)
76937 goto err;
76938 @@ -1433,7 +1433,7 @@ static ssize_t snd_pcm_oss_read2(struct snd_pcm_substream *substream, char *buf,
76939 struct snd_pcm_runtime *runtime = substream->runtime;
76940 snd_pcm_sframes_t frames, frames1;
76941 #ifdef CONFIG_SND_PCM_OSS_PLUGINS
76942 - char __user *final_dst = (char __force __user *)buf;
76943 + char __user *final_dst = (char __force_user *)buf;
76944 if (runtime->oss.plugin_first) {
76945 struct snd_pcm_plugin_channel *channels;
76946 size_t oss_frame_bytes = (runtime->oss.plugin_last->dst_width * runtime->oss.plugin_last->dst_format.channels) / 8;
76947 @@ -1495,7 +1495,7 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
76948 xfer += tmp;
76949 runtime->oss.buffer_used -= tmp;
76950 } else {
76951 - tmp = snd_pcm_oss_read2(substream, (char __force *)buf,
76952 + tmp = snd_pcm_oss_read2(substream, (char __force_kernel *)buf,
76953 runtime->oss.period_bytes, 0);
76954 if (tmp <= 0)
76955 goto err;
76956 @@ -1663,7 +1663,7 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
76957 size1);
76958 size1 /= runtime->channels; /* frames */
76959 fs = snd_enter_user();
76960 - snd_pcm_lib_write(substream, (void __force __user *)runtime->oss.buffer, size1);
76961 + snd_pcm_lib_write(substream, (void __force_user *)runtime->oss.buffer, size1);
76962 snd_leave_user(fs);
76963 }
76964 } else if (runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) {
76965 diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
76966 index 91cdf94..4085161 100644
76967 --- a/sound/core/pcm_compat.c
76968 +++ b/sound/core/pcm_compat.c
76969 @@ -31,7 +31,7 @@ static int snd_pcm_ioctl_delay_compat(struct snd_pcm_substream *substream,
76970 int err;
76971
76972 fs = snd_enter_user();
76973 - err = snd_pcm_delay(substream, &delay);
76974 + err = snd_pcm_delay(substream, (snd_pcm_sframes_t __force_user *)&delay);
76975 snd_leave_user(fs);
76976 if (err < 0)
76977 return err;
76978 diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
76979 index 25ed9fe..24c46e9 100644
76980 --- a/sound/core/pcm_native.c
76981 +++ b/sound/core/pcm_native.c
76982 @@ -2765,11 +2765,11 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
76983 switch (substream->stream) {
76984 case SNDRV_PCM_STREAM_PLAYBACK:
76985 result = snd_pcm_playback_ioctl1(NULL, substream, cmd,
76986 - (void __user *)arg);
76987 + (void __force_user *)arg);
76988 break;
76989 case SNDRV_PCM_STREAM_CAPTURE:
76990 result = snd_pcm_capture_ioctl1(NULL, substream, cmd,
76991 - (void __user *)arg);
76992 + (void __force_user *)arg);
76993 break;
76994 default:
76995 result = -EINVAL;
76996 diff --git a/sound/core/seq/seq_device.c b/sound/core/seq/seq_device.c
76997 index 5cf8d65..912a79c 100644
76998 --- a/sound/core/seq/seq_device.c
76999 +++ b/sound/core/seq/seq_device.c
77000 @@ -64,7 +64,7 @@ struct ops_list {
77001 int argsize; /* argument size */
77002
77003 /* operators */
77004 - struct snd_seq_dev_ops ops;
77005 + struct snd_seq_dev_ops *ops;
77006
77007 /* registred devices */
77008 struct list_head dev_list; /* list of devices */
77009 @@ -333,7 +333,7 @@ int snd_seq_device_register_driver(char *id, struct snd_seq_dev_ops *entry,
77010
77011 mutex_lock(&ops->reg_mutex);
77012 /* copy driver operators */
77013 - ops->ops = *entry;
77014 + ops->ops = entry;
77015 ops->driver |= DRIVER_LOADED;
77016 ops->argsize = argsize;
77017
77018 @@ -463,7 +463,7 @@ static int init_device(struct snd_seq_device *dev, struct ops_list *ops)
77019 dev->name, ops->id, ops->argsize, dev->argsize);
77020 return -EINVAL;
77021 }
77022 - if (ops->ops.init_device(dev) >= 0) {
77023 + if (ops->ops->init_device(dev) >= 0) {
77024 dev->status = SNDRV_SEQ_DEVICE_REGISTERED;
77025 ops->num_init_devices++;
77026 } else {
77027 @@ -490,7 +490,7 @@ static int free_device(struct snd_seq_device *dev, struct ops_list *ops)
77028 dev->name, ops->id, ops->argsize, dev->argsize);
77029 return -EINVAL;
77030 }
77031 - if ((result = ops->ops.free_device(dev)) >= 0 || result == -ENXIO) {
77032 + if ((result = ops->ops->free_device(dev)) >= 0 || result == -ENXIO) {
77033 dev->status = SNDRV_SEQ_DEVICE_FREE;
77034 dev->driver_data = NULL;
77035 ops->num_init_devices--;
77036 diff --git a/sound/drivers/mts64.c b/sound/drivers/mts64.c
77037 index f24bf9a..1f7b67c 100644
77038 --- a/sound/drivers/mts64.c
77039 +++ b/sound/drivers/mts64.c
77040 @@ -29,6 +29,7 @@
77041 #include <sound/initval.h>
77042 #include <sound/rawmidi.h>
77043 #include <sound/control.h>
77044 +#include <asm/local.h>
77045
77046 #define CARD_NAME "Miditerminal 4140"
77047 #define DRIVER_NAME "MTS64"
77048 @@ -67,7 +68,7 @@ struct mts64 {
77049 struct pardevice *pardev;
77050 int pardev_claimed;
77051
77052 - int open_count;
77053 + local_t open_count;
77054 int current_midi_output_port;
77055 int current_midi_input_port;
77056 u8 mode[MTS64_NUM_INPUT_PORTS];
77057 @@ -697,7 +698,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
77058 {
77059 struct mts64 *mts = substream->rmidi->private_data;
77060
77061 - if (mts->open_count == 0) {
77062 + if (local_read(&mts->open_count) == 0) {
77063 /* We don't need a spinlock here, because this is just called
77064 if the device has not been opened before.
77065 So there aren't any IRQs from the device */
77066 @@ -705,7 +706,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
77067
77068 msleep(50);
77069 }
77070 - ++(mts->open_count);
77071 + local_inc(&mts->open_count);
77072
77073 return 0;
77074 }
77075 @@ -715,8 +716,7 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
77076 struct mts64 *mts = substream->rmidi->private_data;
77077 unsigned long flags;
77078
77079 - --(mts->open_count);
77080 - if (mts->open_count == 0) {
77081 + if (local_dec_return(&mts->open_count) == 0) {
77082 /* We need the spinlock_irqsave here because we can still
77083 have IRQs at this point */
77084 spin_lock_irqsave(&mts->lock, flags);
77085 @@ -725,8 +725,8 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
77086
77087 msleep(500);
77088
77089 - } else if (mts->open_count < 0)
77090 - mts->open_count = 0;
77091 + } else if (local_read(&mts->open_count) < 0)
77092 + local_set(&mts->open_count, 0);
77093
77094 return 0;
77095 }
77096 diff --git a/sound/drivers/opl4/opl4_lib.c b/sound/drivers/opl4/opl4_lib.c
77097 index b953fb4..1999c01 100644
77098 --- a/sound/drivers/opl4/opl4_lib.c
77099 +++ b/sound/drivers/opl4/opl4_lib.c
77100 @@ -29,7 +29,7 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
77101 MODULE_DESCRIPTION("OPL4 driver");
77102 MODULE_LICENSE("GPL");
77103
77104 -static void inline snd_opl4_wait(struct snd_opl4 *opl4)
77105 +static inline void snd_opl4_wait(struct snd_opl4 *opl4)
77106 {
77107 int timeout = 10;
77108 while ((inb(opl4->fm_port) & OPL4_STATUS_BUSY) && --timeout > 0)
77109 diff --git a/sound/drivers/portman2x4.c b/sound/drivers/portman2x4.c
77110 index f664823..590c745 100644
77111 --- a/sound/drivers/portman2x4.c
77112 +++ b/sound/drivers/portman2x4.c
77113 @@ -48,6 +48,7 @@
77114 #include <sound/initval.h>
77115 #include <sound/rawmidi.h>
77116 #include <sound/control.h>
77117 +#include <asm/local.h>
77118
77119 #define CARD_NAME "Portman 2x4"
77120 #define DRIVER_NAME "portman"
77121 @@ -85,7 +86,7 @@ struct portman {
77122 struct pardevice *pardev;
77123 int pardev_claimed;
77124
77125 - int open_count;
77126 + local_t open_count;
77127 int mode[PORTMAN_NUM_INPUT_PORTS];
77128 struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
77129 };
77130 diff --git a/sound/firewire/amdtp.c b/sound/firewire/amdtp.c
77131 index 87657dd..a8268d4 100644
77132 --- a/sound/firewire/amdtp.c
77133 +++ b/sound/firewire/amdtp.c
77134 @@ -371,7 +371,7 @@ static void queue_out_packet(struct amdtp_out_stream *s, unsigned int cycle)
77135 ptr = s->pcm_buffer_pointer + data_blocks;
77136 if (ptr >= pcm->runtime->buffer_size)
77137 ptr -= pcm->runtime->buffer_size;
77138 - ACCESS_ONCE(s->pcm_buffer_pointer) = ptr;
77139 + ACCESS_ONCE_RW(s->pcm_buffer_pointer) = ptr;
77140
77141 s->pcm_period_pointer += data_blocks;
77142 if (s->pcm_period_pointer >= pcm->runtime->period_size) {
77143 @@ -511,7 +511,7 @@ EXPORT_SYMBOL(amdtp_out_stream_start);
77144 */
77145 void amdtp_out_stream_update(struct amdtp_out_stream *s)
77146 {
77147 - ACCESS_ONCE(s->source_node_id_field) =
77148 + ACCESS_ONCE_RW(s->source_node_id_field) =
77149 (fw_parent_device(s->unit)->card->node_id & 0x3f) << 24;
77150 }
77151 EXPORT_SYMBOL(amdtp_out_stream_update);
77152 diff --git a/sound/firewire/amdtp.h b/sound/firewire/amdtp.h
77153 index 537a9cb..8e8c8e9 100644
77154 --- a/sound/firewire/amdtp.h
77155 +++ b/sound/firewire/amdtp.h
77156 @@ -146,7 +146,7 @@ static inline void amdtp_out_stream_pcm_prepare(struct amdtp_out_stream *s)
77157 static inline void amdtp_out_stream_pcm_trigger(struct amdtp_out_stream *s,
77158 struct snd_pcm_substream *pcm)
77159 {
77160 - ACCESS_ONCE(s->pcm) = pcm;
77161 + ACCESS_ONCE_RW(s->pcm) = pcm;
77162 }
77163
77164 /**
77165 diff --git a/sound/firewire/isight.c b/sound/firewire/isight.c
77166 index cd094ec..eca1277 100644
77167 --- a/sound/firewire/isight.c
77168 +++ b/sound/firewire/isight.c
77169 @@ -96,7 +96,7 @@ static void isight_update_pointers(struct isight *isight, unsigned int count)
77170 ptr += count;
77171 if (ptr >= runtime->buffer_size)
77172 ptr -= runtime->buffer_size;
77173 - ACCESS_ONCE(isight->buffer_pointer) = ptr;
77174 + ACCESS_ONCE_RW(isight->buffer_pointer) = ptr;
77175
77176 isight->period_counter += count;
77177 if (isight->period_counter >= runtime->period_size) {
77178 @@ -307,7 +307,7 @@ static int isight_hw_params(struct snd_pcm_substream *substream,
77179 if (err < 0)
77180 return err;
77181
77182 - ACCESS_ONCE(isight->pcm_active) = true;
77183 + ACCESS_ONCE_RW(isight->pcm_active) = true;
77184
77185 return 0;
77186 }
77187 @@ -340,7 +340,7 @@ static int isight_hw_free(struct snd_pcm_substream *substream)
77188 {
77189 struct isight *isight = substream->private_data;
77190
77191 - ACCESS_ONCE(isight->pcm_active) = false;
77192 + ACCESS_ONCE_RW(isight->pcm_active) = false;
77193
77194 mutex_lock(&isight->mutex);
77195 isight_stop_streaming(isight);
77196 @@ -433,10 +433,10 @@ static int isight_trigger(struct snd_pcm_substream *substream, int cmd)
77197
77198 switch (cmd) {
77199 case SNDRV_PCM_TRIGGER_START:
77200 - ACCESS_ONCE(isight->pcm_running) = true;
77201 + ACCESS_ONCE_RW(isight->pcm_running) = true;
77202 break;
77203 case SNDRV_PCM_TRIGGER_STOP:
77204 - ACCESS_ONCE(isight->pcm_running) = false;
77205 + ACCESS_ONCE_RW(isight->pcm_running) = false;
77206 break;
77207 default:
77208 return -EINVAL;
77209 diff --git a/sound/isa/cmi8330.c b/sound/isa/cmi8330.c
77210 index c94578d..0794ac1 100644
77211 --- a/sound/isa/cmi8330.c
77212 +++ b/sound/isa/cmi8330.c
77213 @@ -172,7 +172,7 @@ struct snd_cmi8330 {
77214
77215 struct snd_pcm *pcm;
77216 struct snd_cmi8330_stream {
77217 - struct snd_pcm_ops ops;
77218 + snd_pcm_ops_no_const ops;
77219 snd_pcm_open_callback_t open;
77220 void *private_data; /* sb or wss */
77221 } streams[2];
77222 diff --git a/sound/oss/sb_audio.c b/sound/oss/sb_audio.c
77223 index 733b014..56ce96f 100644
77224 --- a/sound/oss/sb_audio.c
77225 +++ b/sound/oss/sb_audio.c
77226 @@ -901,7 +901,7 @@ sb16_copy_from_user(int dev,
77227 buf16 = (signed short *)(localbuf + localoffs);
77228 while (c)
77229 {
77230 - locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
77231 + locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
77232 if (copy_from_user(lbuf8,
77233 userbuf+useroffs + p,
77234 locallen))
77235 diff --git a/sound/oss/swarm_cs4297a.c b/sound/oss/swarm_cs4297a.c
77236 index 09d4648..cf234c7 100644
77237 --- a/sound/oss/swarm_cs4297a.c
77238 +++ b/sound/oss/swarm_cs4297a.c
77239 @@ -2606,7 +2606,6 @@ static int __init cs4297a_init(void)
77240 {
77241 struct cs4297a_state *s;
77242 u32 pwr, id;
77243 - mm_segment_t fs;
77244 int rval;
77245 #ifndef CONFIG_BCM_CS4297A_CSWARM
77246 u64 cfg;
77247 @@ -2696,22 +2695,23 @@ static int __init cs4297a_init(void)
77248 if (!rval) {
77249 char *sb1250_duart_present;
77250
77251 +#if 0
77252 + mm_segment_t fs;
77253 fs = get_fs();
77254 set_fs(KERNEL_DS);
77255 -#if 0
77256 val = SOUND_MASK_LINE;
77257 mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
77258 for (i = 0; i < ARRAY_SIZE(initvol); i++) {
77259 val = initvol[i].vol;
77260 mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
77261 }
77262 + set_fs(fs);
77263 // cs4297a_write_ac97(s, 0x18, 0x0808);
77264 #else
77265 // cs4297a_write_ac97(s, 0x5e, 0x180);
77266 cs4297a_write_ac97(s, 0x02, 0x0808);
77267 cs4297a_write_ac97(s, 0x18, 0x0808);
77268 #endif
77269 - set_fs(fs);
77270
77271 list_add(&s->list, &cs4297a_devs);
77272
77273 diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
77274 index 5644711..a2aebc1 100644
77275 --- a/sound/pci/hda/hda_codec.h
77276 +++ b/sound/pci/hda/hda_codec.h
77277 @@ -611,7 +611,7 @@ struct hda_bus_ops {
77278 /* notify power-up/down from codec to controller */
77279 void (*pm_notify)(struct hda_bus *bus);
77280 #endif
77281 -};
77282 +} __no_const;
77283
77284 /* template to pass to the bus constructor */
77285 struct hda_bus_template {
77286 @@ -713,6 +713,7 @@ struct hda_codec_ops {
77287 #endif
77288 void (*reboot_notify)(struct hda_codec *codec);
77289 };
77290 +typedef struct hda_codec_ops __no_const hda_codec_ops_no_const;
77291
77292 /* record for amp information cache */
77293 struct hda_cache_head {
77294 @@ -743,7 +744,7 @@ struct hda_pcm_ops {
77295 struct snd_pcm_substream *substream);
77296 int (*cleanup)(struct hda_pcm_stream *info, struct hda_codec *codec,
77297 struct snd_pcm_substream *substream);
77298 -};
77299 +} __no_const;
77300
77301 /* PCM information for each substream */
77302 struct hda_pcm_stream {
77303 @@ -801,7 +802,7 @@ struct hda_codec {
77304 const char *modelname; /* model name for preset */
77305
77306 /* set by patch */
77307 - struct hda_codec_ops patch_ops;
77308 + hda_codec_ops_no_const patch_ops;
77309
77310 /* PCM to create, set by patch_ops.build_pcms callback */
77311 unsigned int num_pcms;
77312 diff --git a/sound/pci/ice1712/ice1712.h b/sound/pci/ice1712/ice1712.h
77313 index 0da778a..bc38b84 100644
77314 --- a/sound/pci/ice1712/ice1712.h
77315 +++ b/sound/pci/ice1712/ice1712.h
77316 @@ -269,7 +269,7 @@ struct snd_ak4xxx_private {
77317 unsigned int mask_flags; /* total mask bits */
77318 struct snd_akm4xxx_ops {
77319 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
77320 - } ops;
77321 + } __no_const ops;
77322 };
77323
77324 struct snd_ice1712_spdif {
77325 @@ -285,7 +285,7 @@ struct snd_ice1712_spdif {
77326 int (*default_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
77327 void (*stream_get)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
77328 int (*stream_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
77329 - } ops;
77330 + } __no_const ops;
77331 };
77332
77333
77334 diff --git a/sound/pci/ymfpci/ymfpci_main.c b/sound/pci/ymfpci/ymfpci_main.c
77335 index 03ee4e3..be86b46 100644
77336 --- a/sound/pci/ymfpci/ymfpci_main.c
77337 +++ b/sound/pci/ymfpci/ymfpci_main.c
77338 @@ -203,8 +203,8 @@ static void snd_ymfpci_hw_stop(struct snd_ymfpci *chip)
77339 if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
77340 break;
77341 }
77342 - if (atomic_read(&chip->interrupt_sleep_count)) {
77343 - atomic_set(&chip->interrupt_sleep_count, 0);
77344 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
77345 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
77346 wake_up(&chip->interrupt_sleep);
77347 }
77348 __end:
77349 @@ -788,7 +788,7 @@ static void snd_ymfpci_irq_wait(struct snd_ymfpci *chip)
77350 continue;
77351 init_waitqueue_entry(&wait, current);
77352 add_wait_queue(&chip->interrupt_sleep, &wait);
77353 - atomic_inc(&chip->interrupt_sleep_count);
77354 + atomic_inc_unchecked(&chip->interrupt_sleep_count);
77355 schedule_timeout_uninterruptible(msecs_to_jiffies(50));
77356 remove_wait_queue(&chip->interrupt_sleep, &wait);
77357 }
77358 @@ -826,8 +826,8 @@ static irqreturn_t snd_ymfpci_interrupt(int irq, void *dev_id)
77359 snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
77360 spin_unlock(&chip->reg_lock);
77361
77362 - if (atomic_read(&chip->interrupt_sleep_count)) {
77363 - atomic_set(&chip->interrupt_sleep_count, 0);
77364 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
77365 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
77366 wake_up(&chip->interrupt_sleep);
77367 }
77368 }
77369 @@ -2382,7 +2382,7 @@ int __devinit snd_ymfpci_create(struct snd_card *card,
77370 spin_lock_init(&chip->reg_lock);
77371 spin_lock_init(&chip->voice_lock);
77372 init_waitqueue_head(&chip->interrupt_sleep);
77373 - atomic_set(&chip->interrupt_sleep_count, 0);
77374 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
77375 chip->card = card;
77376 chip->pci = pci;
77377 chip->irq = -1;
77378 diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
77379 index ee15337..e2187a6 100644
77380 --- a/sound/soc/soc-pcm.c
77381 +++ b/sound/soc/soc-pcm.c
77382 @@ -583,7 +583,7 @@ static snd_pcm_uframes_t soc_pcm_pointer(struct snd_pcm_substream *substream)
77383 }
77384
77385 /* ASoC PCM operations */
77386 -static struct snd_pcm_ops soc_pcm_ops = {
77387 +static snd_pcm_ops_no_const soc_pcm_ops = {
77388 .open = soc_pcm_open,
77389 .close = soc_pcm_close,
77390 .hw_params = soc_pcm_hw_params,
77391 diff --git a/sound/usb/card.h b/sound/usb/card.h
77392 index a39edcc..1014050 100644
77393 --- a/sound/usb/card.h
77394 +++ b/sound/usb/card.h
77395 @@ -44,6 +44,7 @@ struct snd_urb_ops {
77396 int (*prepare_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
77397 int (*retire_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
77398 };
77399 +typedef struct snd_urb_ops __no_const snd_urb_ops_no_const;
77400
77401 struct snd_usb_substream {
77402 struct snd_usb_stream *stream;
77403 @@ -93,7 +94,7 @@ struct snd_usb_substream {
77404 struct snd_pcm_hw_constraint_list rate_list; /* limited rates */
77405 spinlock_t lock;
77406
77407 - struct snd_urb_ops ops; /* callbacks (must be filled at init) */
77408 + snd_urb_ops_no_const ops; /* callbacks (must be filled at init) */
77409 int last_frame_number; /* stored frame number */
77410 int last_delay; /* stored delay */
77411 };
77412 diff --git a/tools/gcc/Makefile b/tools/gcc/Makefile
77413 new file mode 100644
77414 index 0000000..b044b80
77415 --- /dev/null
77416 +++ b/tools/gcc/Makefile
77417 @@ -0,0 +1,21 @@
77418 +#CC := gcc
77419 +#PLUGIN_SOURCE_FILES := pax_plugin.c
77420 +#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
77421 +GCCPLUGINS_DIR := $(shell $(CC) -print-file-name=plugin)
77422 +#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W
77423 +
77424 +HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include
77425 +
77426 +hostlibs-y := constify_plugin.so
77427 +hostlibs-$(CONFIG_PAX_MEMORY_STACKLEAK) += stackleak_plugin.so
77428 +hostlibs-$(CONFIG_KALLOCSTAT_PLUGIN) += kallocstat_plugin.so
77429 +hostlibs-$(CONFIG_PAX_KERNEXEC_PLUGIN) += kernexec_plugin.so
77430 +hostlibs-$(CONFIG_CHECKER_PLUGIN) += checker_plugin.so
77431 +
77432 +always := $(hostlibs-y)
77433 +
77434 +constify_plugin-objs := constify_plugin.o
77435 +stackleak_plugin-objs := stackleak_plugin.o
77436 +kallocstat_plugin-objs := kallocstat_plugin.o
77437 +kernexec_plugin-objs := kernexec_plugin.o
77438 +checker_plugin-objs := checker_plugin.o
77439 diff --git a/tools/gcc/checker_plugin.c b/tools/gcc/checker_plugin.c
77440 new file mode 100644
77441 index 0000000..d41b5af
77442 --- /dev/null
77443 +++ b/tools/gcc/checker_plugin.c
77444 @@ -0,0 +1,171 @@
77445 +/*
77446 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
77447 + * Licensed under the GPL v2
77448 + *
77449 + * Note: the choice of the license means that the compilation process is
77450 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
77451 + * but for the kernel it doesn't matter since it doesn't link against
77452 + * any of the gcc libraries
77453 + *
77454 + * gcc plugin to implement various sparse (source code checker) features
77455 + *
77456 + * TODO:
77457 + * - define separate __iomem, __percpu and __rcu address spaces (lots of code to patch)
77458 + *
77459 + * BUGS:
77460 + * - none known
77461 + */
77462 +#include "gcc-plugin.h"
77463 +#include "config.h"
77464 +#include "system.h"
77465 +#include "coretypes.h"
77466 +#include "tree.h"
77467 +#include "tree-pass.h"
77468 +#include "flags.h"
77469 +#include "intl.h"
77470 +#include "toplev.h"
77471 +#include "plugin.h"
77472 +//#include "expr.h" where are you...
77473 +#include "diagnostic.h"
77474 +#include "plugin-version.h"
77475 +#include "tm.h"
77476 +#include "function.h"
77477 +#include "basic-block.h"
77478 +#include "gimple.h"
77479 +#include "rtl.h"
77480 +#include "emit-rtl.h"
77481 +#include "tree-flow.h"
77482 +#include "target.h"
77483 +
77484 +extern void c_register_addr_space (const char *str, addr_space_t as);
77485 +extern enum machine_mode default_addr_space_pointer_mode (addr_space_t);
77486 +extern enum machine_mode default_addr_space_address_mode (addr_space_t);
77487 +extern bool default_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as);
77488 +extern bool default_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as);
77489 +extern rtx default_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as);
77490 +
77491 +extern void print_gimple_stmt(FILE *, gimple, int, int);
77492 +extern rtx emit_move_insn(rtx x, rtx y);
77493 +
77494 +int plugin_is_GPL_compatible;
77495 +
77496 +static struct plugin_info checker_plugin_info = {
77497 + .version = "201111150100",
77498 +};
77499 +
77500 +#define ADDR_SPACE_KERNEL 0
77501 +#define ADDR_SPACE_FORCE_KERNEL 1
77502 +#define ADDR_SPACE_USER 2
77503 +#define ADDR_SPACE_FORCE_USER 3
77504 +#define ADDR_SPACE_IOMEM 0
77505 +#define ADDR_SPACE_FORCE_IOMEM 0
77506 +#define ADDR_SPACE_PERCPU 0
77507 +#define ADDR_SPACE_FORCE_PERCPU 0
77508 +#define ADDR_SPACE_RCU 0
77509 +#define ADDR_SPACE_FORCE_RCU 0
77510 +
77511 +static enum machine_mode checker_addr_space_pointer_mode(addr_space_t addrspace)
77512 +{
77513 + return default_addr_space_pointer_mode(ADDR_SPACE_GENERIC);
77514 +}
77515 +
77516 +static enum machine_mode checker_addr_space_address_mode(addr_space_t addrspace)
77517 +{
77518 + return default_addr_space_address_mode(ADDR_SPACE_GENERIC);
77519 +}
77520 +
77521 +static bool checker_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as)
77522 +{
77523 + return default_addr_space_valid_pointer_mode(mode, as);
77524 +}
77525 +
77526 +static bool checker_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as)
77527 +{
77528 + return default_addr_space_legitimate_address_p(mode, mem, strict, ADDR_SPACE_GENERIC);
77529 +}
77530 +
77531 +static rtx checker_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as)
77532 +{
77533 + return default_addr_space_legitimize_address(x, oldx, mode, as);
77534 +}
77535 +
77536 +static bool checker_addr_space_subset_p(addr_space_t subset, addr_space_t superset)
77537 +{
77538 + if (subset == ADDR_SPACE_FORCE_KERNEL && superset == ADDR_SPACE_KERNEL)
77539 + return true;
77540 +
77541 + if (subset == ADDR_SPACE_FORCE_USER && superset == ADDR_SPACE_USER)
77542 + return true;
77543 +
77544 + if (subset == ADDR_SPACE_FORCE_IOMEM && superset == ADDR_SPACE_IOMEM)
77545 + return true;
77546 +
77547 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_USER)
77548 + return true;
77549 +
77550 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_IOMEM)
77551 + return true;
77552 +
77553 + if (subset == ADDR_SPACE_USER && superset == ADDR_SPACE_FORCE_KERNEL)
77554 + return true;
77555 +
77556 + if (subset == ADDR_SPACE_IOMEM && superset == ADDR_SPACE_FORCE_KERNEL)
77557 + return true;
77558 +
77559 + return subset == superset;
77560 +}
77561 +
77562 +static rtx checker_addr_space_convert(rtx op, tree from_type, tree to_type)
77563 +{
77564 +// addr_space_t from_as = TYPE_ADDR_SPACE(TREE_TYPE(from_type));
77565 +// addr_space_t to_as = TYPE_ADDR_SPACE(TREE_TYPE(to_type));
77566 +
77567 + return op;
77568 +}
77569 +
77570 +static void register_checker_address_spaces(void *event_data, void *data)
77571 +{
77572 + c_register_addr_space("__kernel", ADDR_SPACE_KERNEL);
77573 + c_register_addr_space("__force_kernel", ADDR_SPACE_FORCE_KERNEL);
77574 + c_register_addr_space("__user", ADDR_SPACE_USER);
77575 + c_register_addr_space("__force_user", ADDR_SPACE_FORCE_USER);
77576 +// c_register_addr_space("__iomem", ADDR_SPACE_IOMEM);
77577 +// c_register_addr_space("__force_iomem", ADDR_SPACE_FORCE_IOMEM);
77578 +// c_register_addr_space("__percpu", ADDR_SPACE_PERCPU);
77579 +// c_register_addr_space("__force_percpu", ADDR_SPACE_FORCE_PERCPU);
77580 +// c_register_addr_space("__rcu", ADDR_SPACE_RCU);
77581 +// c_register_addr_space("__force_rcu", ADDR_SPACE_FORCE_RCU);
77582 +
77583 + targetm.addr_space.pointer_mode = checker_addr_space_pointer_mode;
77584 + targetm.addr_space.address_mode = checker_addr_space_address_mode;
77585 + targetm.addr_space.valid_pointer_mode = checker_addr_space_valid_pointer_mode;
77586 + targetm.addr_space.legitimate_address_p = checker_addr_space_legitimate_address_p;
77587 +// targetm.addr_space.legitimize_address = checker_addr_space_legitimize_address;
77588 + targetm.addr_space.subset_p = checker_addr_space_subset_p;
77589 + targetm.addr_space.convert = checker_addr_space_convert;
77590 +}
77591 +
77592 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
77593 +{
77594 + const char * const plugin_name = plugin_info->base_name;
77595 + const int argc = plugin_info->argc;
77596 + const struct plugin_argument * const argv = plugin_info->argv;
77597 + int i;
77598 +
77599 + if (!plugin_default_version_check(version, &gcc_version)) {
77600 + error(G_("incompatible gcc/plugin versions"));
77601 + return 1;
77602 + }
77603 +
77604 + register_callback(plugin_name, PLUGIN_INFO, NULL, &checker_plugin_info);
77605 +
77606 + for (i = 0; i < argc; ++i)
77607 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
77608 +
77609 + if (TARGET_64BIT == 0)
77610 + return 0;
77611 +
77612 + register_callback(plugin_name, PLUGIN_PRAGMAS, register_checker_address_spaces, NULL);
77613 +
77614 + return 0;
77615 +}
77616 diff --git a/tools/gcc/constify_plugin.c b/tools/gcc/constify_plugin.c
77617 new file mode 100644
77618 index 0000000..704a564
77619 --- /dev/null
77620 +++ b/tools/gcc/constify_plugin.c
77621 @@ -0,0 +1,303 @@
77622 +/*
77623 + * Copyright 2011 by Emese Revfy <re.emese@gmail.com>
77624 + * Copyright 2011 by PaX Team <pageexec@freemail.hu>
77625 + * Licensed under the GPL v2, or (at your option) v3
77626 + *
77627 + * This gcc plugin constifies all structures which contain only function pointers or are explicitly marked for constification.
77628 + *
77629 + * Homepage:
77630 + * http://www.grsecurity.net/~ephox/const_plugin/
77631 + *
77632 + * Usage:
77633 + * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o constify_plugin.so constify_plugin.c
77634 + * $ gcc -fplugin=constify_plugin.so test.c -O2
77635 + */
77636 +
77637 +#include "gcc-plugin.h"
77638 +#include "config.h"
77639 +#include "system.h"
77640 +#include "coretypes.h"
77641 +#include "tree.h"
77642 +#include "tree-pass.h"
77643 +#include "flags.h"
77644 +#include "intl.h"
77645 +#include "toplev.h"
77646 +#include "plugin.h"
77647 +#include "diagnostic.h"
77648 +#include "plugin-version.h"
77649 +#include "tm.h"
77650 +#include "function.h"
77651 +#include "basic-block.h"
77652 +#include "gimple.h"
77653 +#include "rtl.h"
77654 +#include "emit-rtl.h"
77655 +#include "tree-flow.h"
77656 +
77657 +#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1(TYPE)
77658 +
77659 +int plugin_is_GPL_compatible;
77660 +
77661 +static struct plugin_info const_plugin_info = {
77662 + .version = "201111150100",
77663 + .help = "no-constify\tturn off constification\n",
77664 +};
77665 +
77666 +static void constify_type(tree type);
77667 +static bool walk_struct(tree node);
77668 +
77669 +static tree deconstify_type(tree old_type)
77670 +{
77671 + tree new_type, field;
77672 +
77673 + new_type = build_qualified_type(old_type, TYPE_QUALS(old_type) & ~TYPE_QUAL_CONST);
77674 + TYPE_FIELDS(new_type) = copy_list(TYPE_FIELDS(new_type));
77675 + for (field = TYPE_FIELDS(new_type); field; field = TREE_CHAIN(field))
77676 + DECL_FIELD_CONTEXT(field) = new_type;
77677 + TYPE_READONLY(new_type) = 0;
77678 + C_TYPE_FIELDS_READONLY(new_type) = 0;
77679 + return new_type;
77680 +}
77681 +
77682 +static tree handle_no_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
77683 +{
77684 + tree type;
77685 +
77686 + *no_add_attrs = true;
77687 + if (TREE_CODE(*node) == FUNCTION_DECL) {
77688 + error("%qE attribute does not apply to functions", name);
77689 + return NULL_TREE;
77690 + }
77691 +
77692 + if (TREE_CODE(*node) == VAR_DECL) {
77693 + error("%qE attribute does not apply to variables", name);
77694 + return NULL_TREE;
77695 + }
77696 +
77697 + if (TYPE_P(*node)) {
77698 + if (TREE_CODE(*node) == RECORD_TYPE || TREE_CODE(*node) == UNION_TYPE)
77699 + *no_add_attrs = false;
77700 + else
77701 + error("%qE attribute applies to struct and union types only", name);
77702 + return NULL_TREE;
77703 + }
77704 +
77705 + type = TREE_TYPE(*node);
77706 +
77707 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE) {
77708 + error("%qE attribute applies to struct and union types only", name);
77709 + return NULL_TREE;
77710 + }
77711 +
77712 + if (lookup_attribute(IDENTIFIER_POINTER(name), TYPE_ATTRIBUTES(type))) {
77713 + error("%qE attribute is already applied to the type", name);
77714 + return NULL_TREE;
77715 + }
77716 +
77717 + if (TREE_CODE(*node) == TYPE_DECL && !TYPE_READONLY(type)) {
77718 + error("%qE attribute used on type that is not constified", name);
77719 + return NULL_TREE;
77720 + }
77721 +
77722 + if (TREE_CODE(*node) == TYPE_DECL) {
77723 + TREE_TYPE(*node) = deconstify_type(type);
77724 + TREE_READONLY(*node) = 0;
77725 + return NULL_TREE;
77726 + }
77727 +
77728 + return NULL_TREE;
77729 +}
77730 +
77731 +static tree handle_do_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
77732 +{
77733 + *no_add_attrs = true;
77734 + if (!TYPE_P(*node)) {
77735 + error("%qE attribute applies to types only", name);
77736 + return NULL_TREE;
77737 + }
77738 +
77739 + if (TREE_CODE(*node) != RECORD_TYPE && TREE_CODE(*node) != UNION_TYPE) {
77740 + error("%qE attribute applies to struct and union types only", name);
77741 + return NULL_TREE;
77742 + }
77743 +
77744 + *no_add_attrs = false;
77745 + constify_type(*node);
77746 + return NULL_TREE;
77747 +}
77748 +
77749 +static struct attribute_spec no_const_attr = {
77750 + .name = "no_const",
77751 + .min_length = 0,
77752 + .max_length = 0,
77753 + .decl_required = false,
77754 + .type_required = false,
77755 + .function_type_required = false,
77756 + .handler = handle_no_const_attribute,
77757 +#if BUILDING_GCC_VERSION >= 4007
77758 + .affects_type_identity = true
77759 +#endif
77760 +};
77761 +
77762 +static struct attribute_spec do_const_attr = {
77763 + .name = "do_const",
77764 + .min_length = 0,
77765 + .max_length = 0,
77766 + .decl_required = false,
77767 + .type_required = false,
77768 + .function_type_required = false,
77769 + .handler = handle_do_const_attribute,
77770 +#if BUILDING_GCC_VERSION >= 4007
77771 + .affects_type_identity = true
77772 +#endif
77773 +};
77774 +
77775 +static void register_attributes(void *event_data, void *data)
77776 +{
77777 + register_attribute(&no_const_attr);
77778 + register_attribute(&do_const_attr);
77779 +}
77780 +
77781 +static void constify_type(tree type)
77782 +{
77783 + TYPE_READONLY(type) = 1;
77784 + C_TYPE_FIELDS_READONLY(type) = 1;
77785 +}
77786 +
77787 +static bool is_fptr(tree field)
77788 +{
77789 + tree ptr = TREE_TYPE(field);
77790 +
77791 + if (TREE_CODE(ptr) != POINTER_TYPE)
77792 + return false;
77793 +
77794 + return TREE_CODE(TREE_TYPE(ptr)) == FUNCTION_TYPE;
77795 +}
77796 +
77797 +static bool walk_struct(tree node)
77798 +{
77799 + tree field;
77800 +
77801 + if (lookup_attribute("no_const", TYPE_ATTRIBUTES(node)))
77802 + return false;
77803 +
77804 + if (TYPE_FIELDS(node) == NULL_TREE)
77805 + return false;
77806 +
77807 + for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
77808 + tree type = TREE_TYPE(field);
77809 + enum tree_code code = TREE_CODE(type);
77810 + if (code == RECORD_TYPE || code == UNION_TYPE) {
77811 + if (!(walk_struct(type)))
77812 + return false;
77813 + } else if (!is_fptr(field) && !TREE_READONLY(field))
77814 + return false;
77815 + }
77816 + return true;
77817 +}
77818 +
77819 +static void finish_type(void *event_data, void *data)
77820 +{
77821 + tree type = (tree)event_data;
77822 +
77823 + if (type == NULL_TREE)
77824 + return;
77825 +
77826 + if (TYPE_READONLY(type))
77827 + return;
77828 +
77829 + if (walk_struct(type))
77830 + constify_type(type);
77831 +}
77832 +
77833 +static unsigned int check_local_variables(void);
77834 +
77835 +struct gimple_opt_pass pass_local_variable = {
77836 + {
77837 + .type = GIMPLE_PASS,
77838 + .name = "check_local_variables",
77839 + .gate = NULL,
77840 + .execute = check_local_variables,
77841 + .sub = NULL,
77842 + .next = NULL,
77843 + .static_pass_number = 0,
77844 + .tv_id = TV_NONE,
77845 + .properties_required = 0,
77846 + .properties_provided = 0,
77847 + .properties_destroyed = 0,
77848 + .todo_flags_start = 0,
77849 + .todo_flags_finish = 0
77850 + }
77851 +};
77852 +
77853 +static unsigned int check_local_variables(void)
77854 +{
77855 + tree var;
77856 + referenced_var_iterator rvi;
77857 +
77858 +#if BUILDING_GCC_VERSION == 4005
77859 + FOR_EACH_REFERENCED_VAR(var, rvi) {
77860 +#else
77861 + FOR_EACH_REFERENCED_VAR(cfun, var, rvi) {
77862 +#endif
77863 + tree type = TREE_TYPE(var);
77864 +
77865 + if (!DECL_P(var) || TREE_STATIC(var) || DECL_EXTERNAL(var))
77866 + continue;
77867 +
77868 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
77869 + continue;
77870 +
77871 + if (!TYPE_READONLY(type))
77872 + continue;
77873 +
77874 +// if (lookup_attribute("no_const", DECL_ATTRIBUTES(var)))
77875 +// continue;
77876 +
77877 +// if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type)))
77878 +// continue;
77879 +
77880 + if (walk_struct(type)) {
77881 + error("constified variable %qE cannot be local", var);
77882 + return 1;
77883 + }
77884 + }
77885 + return 0;
77886 +}
77887 +
77888 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
77889 +{
77890 + const char * const plugin_name = plugin_info->base_name;
77891 + const int argc = plugin_info->argc;
77892 + const struct plugin_argument * const argv = plugin_info->argv;
77893 + int i;
77894 + bool constify = true;
77895 +
77896 + struct register_pass_info local_variable_pass_info = {
77897 + .pass = &pass_local_variable.pass,
77898 + .reference_pass_name = "*referenced_vars",
77899 + .ref_pass_instance_number = 0,
77900 + .pos_op = PASS_POS_INSERT_AFTER
77901 + };
77902 +
77903 + if (!plugin_default_version_check(version, &gcc_version)) {
77904 + error(G_("incompatible gcc/plugin versions"));
77905 + return 1;
77906 + }
77907 +
77908 + for (i = 0; i < argc; ++i) {
77909 + if (!(strcmp(argv[i].key, "no-constify"))) {
77910 + constify = false;
77911 + continue;
77912 + }
77913 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
77914 + }
77915 +
77916 + register_callback(plugin_name, PLUGIN_INFO, NULL, &const_plugin_info);
77917 + if (constify) {
77918 + register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
77919 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &local_variable_pass_info);
77920 + }
77921 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
77922 +
77923 + return 0;
77924 +}
77925 diff --git a/tools/gcc/kallocstat_plugin.c b/tools/gcc/kallocstat_plugin.c
77926 new file mode 100644
77927 index 0000000..a5eabce
77928 --- /dev/null
77929 +++ b/tools/gcc/kallocstat_plugin.c
77930 @@ -0,0 +1,167 @@
77931 +/*
77932 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
77933 + * Licensed under the GPL v2
77934 + *
77935 + * Note: the choice of the license means that the compilation process is
77936 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
77937 + * but for the kernel it doesn't matter since it doesn't link against
77938 + * any of the gcc libraries
77939 + *
77940 + * gcc plugin to find the distribution of k*alloc sizes
77941 + *
77942 + * TODO:
77943 + *
77944 + * BUGS:
77945 + * - none known
77946 + */
77947 +#include "gcc-plugin.h"
77948 +#include "config.h"
77949 +#include "system.h"
77950 +#include "coretypes.h"
77951 +#include "tree.h"
77952 +#include "tree-pass.h"
77953 +#include "flags.h"
77954 +#include "intl.h"
77955 +#include "toplev.h"
77956 +#include "plugin.h"
77957 +//#include "expr.h" where are you...
77958 +#include "diagnostic.h"
77959 +#include "plugin-version.h"
77960 +#include "tm.h"
77961 +#include "function.h"
77962 +#include "basic-block.h"
77963 +#include "gimple.h"
77964 +#include "rtl.h"
77965 +#include "emit-rtl.h"
77966 +
77967 +extern void print_gimple_stmt(FILE *, gimple, int, int);
77968 +
77969 +int plugin_is_GPL_compatible;
77970 +
77971 +static const char * const kalloc_functions[] = {
77972 + "__kmalloc",
77973 + "kmalloc",
77974 + "kmalloc_large",
77975 + "kmalloc_node",
77976 + "kmalloc_order",
77977 + "kmalloc_order_trace",
77978 + "kmalloc_slab",
77979 + "kzalloc",
77980 + "kzalloc_node",
77981 +};
77982 +
77983 +static struct plugin_info kallocstat_plugin_info = {
77984 + .version = "201111150100",
77985 +};
77986 +
77987 +static unsigned int execute_kallocstat(void);
77988 +
77989 +static struct gimple_opt_pass kallocstat_pass = {
77990 + .pass = {
77991 + .type = GIMPLE_PASS,
77992 + .name = "kallocstat",
77993 + .gate = NULL,
77994 + .execute = execute_kallocstat,
77995 + .sub = NULL,
77996 + .next = NULL,
77997 + .static_pass_number = 0,
77998 + .tv_id = TV_NONE,
77999 + .properties_required = 0,
78000 + .properties_provided = 0,
78001 + .properties_destroyed = 0,
78002 + .todo_flags_start = 0,
78003 + .todo_flags_finish = 0
78004 + }
78005 +};
78006 +
78007 +static bool is_kalloc(const char *fnname)
78008 +{
78009 + size_t i;
78010 +
78011 + for (i = 0; i < ARRAY_SIZE(kalloc_functions); i++)
78012 + if (!strcmp(fnname, kalloc_functions[i]))
78013 + return true;
78014 + return false;
78015 +}
78016 +
78017 +static unsigned int execute_kallocstat(void)
78018 +{
78019 + basic_block bb;
78020 +
78021 + // 1. loop through BBs and GIMPLE statements
78022 + FOR_EACH_BB(bb) {
78023 + gimple_stmt_iterator gsi;
78024 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
78025 + // gimple match:
78026 + tree fndecl, size;
78027 + gimple call_stmt;
78028 + const char *fnname;
78029 +
78030 + // is it a call
78031 + call_stmt = gsi_stmt(gsi);
78032 + if (!is_gimple_call(call_stmt))
78033 + continue;
78034 + fndecl = gimple_call_fndecl(call_stmt);
78035 + if (fndecl == NULL_TREE)
78036 + continue;
78037 + if (TREE_CODE(fndecl) != FUNCTION_DECL)
78038 + continue;
78039 +
78040 + // is it a call to k*alloc
78041 + fnname = IDENTIFIER_POINTER(DECL_NAME(fndecl));
78042 + if (!is_kalloc(fnname))
78043 + continue;
78044 +
78045 + // is the size arg the result of a simple const assignment
78046 + size = gimple_call_arg(call_stmt, 0);
78047 + while (true) {
78048 + gimple def_stmt;
78049 + expanded_location xloc;
78050 + size_t size_val;
78051 +
78052 + if (TREE_CODE(size) != SSA_NAME)
78053 + break;
78054 + def_stmt = SSA_NAME_DEF_STMT(size);
78055 + if (!def_stmt || !is_gimple_assign(def_stmt))
78056 + break;
78057 + if (gimple_num_ops(def_stmt) != 2)
78058 + break;
78059 + size = gimple_assign_rhs1(def_stmt);
78060 + if (!TREE_CONSTANT(size))
78061 + continue;
78062 + xloc = expand_location(gimple_location(def_stmt));
78063 + if (!xloc.file)
78064 + xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
78065 + size_val = TREE_INT_CST_LOW(size);
78066 + fprintf(stderr, "kallocsize: %8zu %8zx %s %s:%u\n", size_val, size_val, fnname, xloc.file, xloc.line);
78067 + break;
78068 + }
78069 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
78070 +//debug_tree(gimple_call_fn(call_stmt));
78071 +//print_node(stderr, "pax", fndecl, 4);
78072 + }
78073 + }
78074 +
78075 + return 0;
78076 +}
78077 +
78078 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
78079 +{
78080 + const char * const plugin_name = plugin_info->base_name;
78081 + struct register_pass_info kallocstat_pass_info = {
78082 + .pass = &kallocstat_pass.pass,
78083 + .reference_pass_name = "ssa",
78084 + .ref_pass_instance_number = 0,
78085 + .pos_op = PASS_POS_INSERT_AFTER
78086 + };
78087 +
78088 + if (!plugin_default_version_check(version, &gcc_version)) {
78089 + error(G_("incompatible gcc/plugin versions"));
78090 + return 1;
78091 + }
78092 +
78093 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kallocstat_plugin_info);
78094 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kallocstat_pass_info);
78095 +
78096 + return 0;
78097 +}
78098 diff --git a/tools/gcc/kernexec_plugin.c b/tools/gcc/kernexec_plugin.c
78099 new file mode 100644
78100 index 0000000..51f747e
78101 --- /dev/null
78102 +++ b/tools/gcc/kernexec_plugin.c
78103 @@ -0,0 +1,348 @@
78104 +/*
78105 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
78106 + * Licensed under the GPL v2
78107 + *
78108 + * Note: the choice of the license means that the compilation process is
78109 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
78110 + * but for the kernel it doesn't matter since it doesn't link against
78111 + * any of the gcc libraries
78112 + *
78113 + * gcc plugin to make KERNEXEC/amd64 almost as good as it is on i386
78114 + *
78115 + * TODO:
78116 + *
78117 + * BUGS:
78118 + * - none known
78119 + */
78120 +#include "gcc-plugin.h"
78121 +#include "config.h"
78122 +#include "system.h"
78123 +#include "coretypes.h"
78124 +#include "tree.h"
78125 +#include "tree-pass.h"
78126 +#include "flags.h"
78127 +#include "intl.h"
78128 +#include "toplev.h"
78129 +#include "plugin.h"
78130 +//#include "expr.h" where are you...
78131 +#include "diagnostic.h"
78132 +#include "plugin-version.h"
78133 +#include "tm.h"
78134 +#include "function.h"
78135 +#include "basic-block.h"
78136 +#include "gimple.h"
78137 +#include "rtl.h"
78138 +#include "emit-rtl.h"
78139 +#include "tree-flow.h"
78140 +
78141 +extern void print_gimple_stmt(FILE *, gimple, int, int);
78142 +extern rtx emit_move_insn(rtx x, rtx y);
78143 +
78144 +int plugin_is_GPL_compatible;
78145 +
78146 +static struct plugin_info kernexec_plugin_info = {
78147 + .version = "201111291120",
78148 + .help = "method=[bts|or]\tinstrumentation method\n"
78149 +};
78150 +
78151 +static unsigned int execute_kernexec_fptr(void);
78152 +static unsigned int execute_kernexec_retaddr(void);
78153 +static bool kernexec_cmodel_check(void);
78154 +
78155 +static void (*kernexec_instrument_fptr)(gimple_stmt_iterator);
78156 +static void (*kernexec_instrument_retaddr)(rtx);
78157 +
78158 +static struct gimple_opt_pass kernexec_fptr_pass = {
78159 + .pass = {
78160 + .type = GIMPLE_PASS,
78161 + .name = "kernexec_fptr",
78162 + .gate = kernexec_cmodel_check,
78163 + .execute = execute_kernexec_fptr,
78164 + .sub = NULL,
78165 + .next = NULL,
78166 + .static_pass_number = 0,
78167 + .tv_id = TV_NONE,
78168 + .properties_required = 0,
78169 + .properties_provided = 0,
78170 + .properties_destroyed = 0,
78171 + .todo_flags_start = 0,
78172 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
78173 + }
78174 +};
78175 +
78176 +static struct rtl_opt_pass kernexec_retaddr_pass = {
78177 + .pass = {
78178 + .type = RTL_PASS,
78179 + .name = "kernexec_retaddr",
78180 + .gate = kernexec_cmodel_check,
78181 + .execute = execute_kernexec_retaddr,
78182 + .sub = NULL,
78183 + .next = NULL,
78184 + .static_pass_number = 0,
78185 + .tv_id = TV_NONE,
78186 + .properties_required = 0,
78187 + .properties_provided = 0,
78188 + .properties_destroyed = 0,
78189 + .todo_flags_start = 0,
78190 + .todo_flags_finish = TODO_dump_func | TODO_ggc_collect
78191 + }
78192 +};
78193 +
78194 +static bool kernexec_cmodel_check(void)
78195 +{
78196 + tree section;
78197 +
78198 + if (ix86_cmodel != CM_KERNEL)
78199 + return false;
78200 +
78201 + section = lookup_attribute("section", DECL_ATTRIBUTES(current_function_decl));
78202 + if (!section || !TREE_VALUE(section))
78203 + return true;
78204 +
78205 + section = TREE_VALUE(TREE_VALUE(section));
78206 + if (strncmp(TREE_STRING_POINTER(section), ".vsyscall_", 10))
78207 + return true;
78208 +
78209 + return false;
78210 +}
78211 +
78212 +/*
78213 + * add special KERNEXEC instrumentation: force MSB of fptr to 1, which will produce
78214 + * a non-canonical address from a userland ptr and will just trigger a GPF on dereference
78215 + */
78216 +static void kernexec_instrument_fptr_bts(gimple_stmt_iterator gsi)
78217 +{
78218 + gimple assign_intptr, assign_new_fptr, call_stmt;
78219 + tree intptr, old_fptr, new_fptr, kernexec_mask;
78220 +
78221 + call_stmt = gsi_stmt(gsi);
78222 + old_fptr = gimple_call_fn(call_stmt);
78223 +
78224 + // create temporary unsigned long variable used for bitops and cast fptr to it
78225 + intptr = create_tmp_var(long_unsigned_type_node, "kernexec_bts");
78226 + add_referenced_var(intptr);
78227 + mark_sym_for_renaming(intptr);
78228 + assign_intptr = gimple_build_assign(intptr, fold_convert(long_unsigned_type_node, old_fptr));
78229 + gsi_insert_before(&gsi, assign_intptr, GSI_SAME_STMT);
78230 + update_stmt(assign_intptr);
78231 +
78232 + // apply logical or to temporary unsigned long and bitmask
78233 + kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0x8000000000000000LL);
78234 +// kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0xffffffff80000000LL);
78235 + assign_intptr = gimple_build_assign(intptr, fold_build2(BIT_IOR_EXPR, long_long_unsigned_type_node, intptr, kernexec_mask));
78236 + gsi_insert_before(&gsi, assign_intptr, GSI_SAME_STMT);
78237 + update_stmt(assign_intptr);
78238 +
78239 + // cast temporary unsigned long back to a temporary fptr variable
78240 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec");
78241 + add_referenced_var(new_fptr);
78242 + mark_sym_for_renaming(new_fptr);
78243 + assign_new_fptr = gimple_build_assign(new_fptr, fold_convert(TREE_TYPE(old_fptr), intptr));
78244 + gsi_insert_before(&gsi, assign_new_fptr, GSI_SAME_STMT);
78245 + update_stmt(assign_new_fptr);
78246 +
78247 + // replace call stmt fn with the new fptr
78248 + gimple_call_set_fn(call_stmt, new_fptr);
78249 + update_stmt(call_stmt);
78250 +}
78251 +
78252 +static void kernexec_instrument_fptr_or(gimple_stmt_iterator gsi)
78253 +{
78254 + gimple asm_or_stmt, call_stmt;
78255 + tree old_fptr, new_fptr, input, output;
78256 + VEC(tree, gc) *inputs = NULL;
78257 + VEC(tree, gc) *outputs = NULL;
78258 +
78259 + call_stmt = gsi_stmt(gsi);
78260 + old_fptr = gimple_call_fn(call_stmt);
78261 +
78262 + // create temporary fptr variable
78263 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_or");
78264 + add_referenced_var(new_fptr);
78265 + mark_sym_for_renaming(new_fptr);
78266 +
78267 + // build asm volatile("orq %%r10, %0\n\t" : "=r"(new_fptr) : "0"(old_fptr));
78268 + input = build_tree_list(NULL_TREE, build_string(2, "0"));
78269 + input = chainon(NULL_TREE, build_tree_list(input, old_fptr));
78270 + output = build_tree_list(NULL_TREE, build_string(3, "=r"));
78271 + output = chainon(NULL_TREE, build_tree_list(output, new_fptr));
78272 + VEC_safe_push(tree, gc, inputs, input);
78273 + VEC_safe_push(tree, gc, outputs, output);
78274 + asm_or_stmt = gimple_build_asm_vec("orq %%r10, %0\n\t", inputs, outputs, NULL, NULL);
78275 + gimple_asm_set_volatile(asm_or_stmt, true);
78276 + gsi_insert_before(&gsi, asm_or_stmt, GSI_SAME_STMT);
78277 + update_stmt(asm_or_stmt);
78278 +
78279 + // replace call stmt fn with the new fptr
78280 + gimple_call_set_fn(call_stmt, new_fptr);
78281 + update_stmt(call_stmt);
78282 +}
78283 +
78284 +/*
78285 + * find all C level function pointer dereferences and forcibly set the highest bit of the pointer
78286 + */
78287 +static unsigned int execute_kernexec_fptr(void)
78288 +{
78289 + basic_block bb;
78290 + gimple_stmt_iterator gsi;
78291 +
78292 + // 1. loop through BBs and GIMPLE statements
78293 + FOR_EACH_BB(bb) {
78294 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
78295 + // gimple match: h_1 = get_fptr (); D.2709_3 = h_1 (x_2(D));
78296 + tree fn;
78297 + gimple call_stmt;
78298 +
78299 + // is it a call ...
78300 + call_stmt = gsi_stmt(gsi);
78301 + if (!is_gimple_call(call_stmt))
78302 + continue;
78303 + fn = gimple_call_fn(call_stmt);
78304 + if (TREE_CODE(fn) == ADDR_EXPR)
78305 + continue;
78306 + if (TREE_CODE(fn) != SSA_NAME)
78307 + gcc_unreachable();
78308 +
78309 + // ... through a function pointer
78310 + fn = SSA_NAME_VAR(fn);
78311 + if (TREE_CODE(fn) != VAR_DECL && TREE_CODE(fn) != PARM_DECL)
78312 + continue;
78313 + fn = TREE_TYPE(fn);
78314 + if (TREE_CODE(fn) != POINTER_TYPE)
78315 + continue;
78316 + fn = TREE_TYPE(fn);
78317 + if (TREE_CODE(fn) != FUNCTION_TYPE)
78318 + continue;
78319 +
78320 + kernexec_instrument_fptr(gsi);
78321 +
78322 +//debug_tree(gimple_call_fn(call_stmt));
78323 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
78324 + }
78325 + }
78326 +
78327 + return 0;
78328 +}
78329 +
78330 +// add special KERNEXEC instrumentation: btsq $63,(%rsp) just before retn
78331 +static void kernexec_instrument_retaddr_bts(rtx insn)
78332 +{
78333 + rtx btsq;
78334 + rtvec argvec, constraintvec, labelvec;
78335 + int line;
78336 +
78337 + // create asm volatile("btsq $63,(%%rsp)":::)
78338 + argvec = rtvec_alloc(0);
78339 + constraintvec = rtvec_alloc(0);
78340 + labelvec = rtvec_alloc(0);
78341 + line = expand_location(RTL_LOCATION(insn)).line;
78342 + btsq = gen_rtx_ASM_OPERANDS(VOIDmode, "btsq $63,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
78343 + MEM_VOLATILE_P(btsq) = 1;
78344 +// RTX_FRAME_RELATED_P(btsq) = 1; // not for ASM_OPERANDS
78345 + emit_insn_before(btsq, insn);
78346 +}
78347 +
78348 +// add special KERNEXEC instrumentation: orq %r10,(%rsp) just before retn
78349 +static void kernexec_instrument_retaddr_or(rtx insn)
78350 +{
78351 + rtx orq;
78352 + rtvec argvec, constraintvec, labelvec;
78353 + int line;
78354 +
78355 + // create asm volatile("orq %%r10,(%%rsp)":::)
78356 + argvec = rtvec_alloc(0);
78357 + constraintvec = rtvec_alloc(0);
78358 + labelvec = rtvec_alloc(0);
78359 + line = expand_location(RTL_LOCATION(insn)).line;
78360 + orq = gen_rtx_ASM_OPERANDS(VOIDmode, "orq %%r10,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
78361 + MEM_VOLATILE_P(orq) = 1;
78362 +// RTX_FRAME_RELATED_P(orq) = 1; // not for ASM_OPERANDS
78363 + emit_insn_before(orq, insn);
78364 +}
78365 +
78366 +/*
78367 + * find all asm level function returns and forcibly set the highest bit of the return address
78368 + */
78369 +static unsigned int execute_kernexec_retaddr(void)
78370 +{
78371 + rtx insn;
78372 +
78373 + // 1. find function returns
78374 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
78375 + // rtl match: (jump_insn 41 40 42 2 (return) fptr.c:42 634 {return_internal} (nil))
78376 + // (jump_insn 12 9 11 2 (parallel [ (return) (unspec [ (0) ] UNSPEC_REP) ]) fptr.c:46 635 {return_internal_long} (nil))
78377 + rtx body;
78378 +
78379 + // is it a retn
78380 + if (!JUMP_P(insn))
78381 + continue;
78382 + body = PATTERN(insn);
78383 + if (GET_CODE(body) == PARALLEL)
78384 + body = XVECEXP(body, 0, 0);
78385 + if (GET_CODE(body) != RETURN)
78386 + continue;
78387 + kernexec_instrument_retaddr(insn);
78388 + }
78389 +
78390 +// print_simple_rtl(stderr, get_insns());
78391 +// print_rtl(stderr, get_insns());
78392 +
78393 + return 0;
78394 +}
78395 +
78396 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
78397 +{
78398 + const char * const plugin_name = plugin_info->base_name;
78399 + const int argc = plugin_info->argc;
78400 + const struct plugin_argument * const argv = plugin_info->argv;
78401 + int i;
78402 + struct register_pass_info kernexec_fptr_pass_info = {
78403 + .pass = &kernexec_fptr_pass.pass,
78404 + .reference_pass_name = "ssa",
78405 + .ref_pass_instance_number = 0,
78406 + .pos_op = PASS_POS_INSERT_AFTER
78407 + };
78408 + struct register_pass_info kernexec_retaddr_pass_info = {
78409 + .pass = &kernexec_retaddr_pass.pass,
78410 + .reference_pass_name = "pro_and_epilogue",
78411 + .ref_pass_instance_number = 0,
78412 + .pos_op = PASS_POS_INSERT_AFTER
78413 + };
78414 +
78415 + if (!plugin_default_version_check(version, &gcc_version)) {
78416 + error(G_("incompatible gcc/plugin versions"));
78417 + return 1;
78418 + }
78419 +
78420 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kernexec_plugin_info);
78421 +
78422 + if (TARGET_64BIT == 0)
78423 + return 0;
78424 +
78425 + for (i = 0; i < argc; ++i) {
78426 + if (!strcmp(argv[i].key, "method")) {
78427 + if (!argv[i].value) {
78428 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
78429 + continue;
78430 + }
78431 + if (!strcmp(argv[i].value, "bts")) {
78432 + kernexec_instrument_fptr = kernexec_instrument_fptr_bts;
78433 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_bts;
78434 + } else if (!strcmp(argv[i].value, "or")) {
78435 + kernexec_instrument_fptr = kernexec_instrument_fptr_or;
78436 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_or;
78437 + fix_register("r10", 1, 1);
78438 + } else
78439 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
78440 + continue;
78441 + }
78442 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
78443 + }
78444 + if (!kernexec_instrument_fptr || !kernexec_instrument_retaddr)
78445 + error(G_("no instrumentation method was selected via '-fplugin-arg-%s-method'"), plugin_name);
78446 +
78447 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_fptr_pass_info);
78448 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_retaddr_pass_info);
78449 +
78450 + return 0;
78451 +}
78452 diff --git a/tools/gcc/stackleak_plugin.c b/tools/gcc/stackleak_plugin.c
78453 new file mode 100644
78454 index 0000000..d44f37c
78455 --- /dev/null
78456 +++ b/tools/gcc/stackleak_plugin.c
78457 @@ -0,0 +1,291 @@
78458 +/*
78459 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
78460 + * Licensed under the GPL v2
78461 + *
78462 + * Note: the choice of the license means that the compilation process is
78463 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
78464 + * but for the kernel it doesn't matter since it doesn't link against
78465 + * any of the gcc libraries
78466 + *
78467 + * gcc plugin to help implement various PaX features
78468 + *
78469 + * - track lowest stack pointer
78470 + *
78471 + * TODO:
78472 + * - initialize all local variables
78473 + *
78474 + * BUGS:
78475 + * - none known
78476 + */
78477 +#include "gcc-plugin.h"
78478 +#include "config.h"
78479 +#include "system.h"
78480 +#include "coretypes.h"
78481 +#include "tree.h"
78482 +#include "tree-pass.h"
78483 +#include "flags.h"
78484 +#include "intl.h"
78485 +#include "toplev.h"
78486 +#include "plugin.h"
78487 +//#include "expr.h" where are you...
78488 +#include "diagnostic.h"
78489 +#include "plugin-version.h"
78490 +#include "tm.h"
78491 +#include "function.h"
78492 +#include "basic-block.h"
78493 +#include "gimple.h"
78494 +#include "rtl.h"
78495 +#include "emit-rtl.h"
78496 +
78497 +extern void print_gimple_stmt(FILE *, gimple, int, int);
78498 +
78499 +int plugin_is_GPL_compatible;
78500 +
78501 +static int track_frame_size = -1;
78502 +static const char track_function[] = "pax_track_stack";
78503 +static const char check_function[] = "pax_check_alloca";
78504 +static bool init_locals;
78505 +
78506 +static struct plugin_info stackleak_plugin_info = {
78507 + .version = "201111150100",
78508 + .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
78509 +// "initialize-locals\t\tforcibly initialize all stack frames\n"
78510 +};
78511 +
78512 +static bool gate_stackleak_track_stack(void);
78513 +static unsigned int execute_stackleak_tree_instrument(void);
78514 +static unsigned int execute_stackleak_final(void);
78515 +
78516 +static struct gimple_opt_pass stackleak_tree_instrument_pass = {
78517 + .pass = {
78518 + .type = GIMPLE_PASS,
78519 + .name = "stackleak_tree_instrument",
78520 + .gate = gate_stackleak_track_stack,
78521 + .execute = execute_stackleak_tree_instrument,
78522 + .sub = NULL,
78523 + .next = NULL,
78524 + .static_pass_number = 0,
78525 + .tv_id = TV_NONE,
78526 + .properties_required = PROP_gimple_leh | PROP_cfg,
78527 + .properties_provided = 0,
78528 + .properties_destroyed = 0,
78529 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
78530 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_update_ssa
78531 + }
78532 +};
78533 +
78534 +static struct rtl_opt_pass stackleak_final_rtl_opt_pass = {
78535 + .pass = {
78536 + .type = RTL_PASS,
78537 + .name = "stackleak_final",
78538 + .gate = gate_stackleak_track_stack,
78539 + .execute = execute_stackleak_final,
78540 + .sub = NULL,
78541 + .next = NULL,
78542 + .static_pass_number = 0,
78543 + .tv_id = TV_NONE,
78544 + .properties_required = 0,
78545 + .properties_provided = 0,
78546 + .properties_destroyed = 0,
78547 + .todo_flags_start = 0,
78548 + .todo_flags_finish = TODO_dump_func
78549 + }
78550 +};
78551 +
78552 +static bool gate_stackleak_track_stack(void)
78553 +{
78554 + return track_frame_size >= 0;
78555 +}
78556 +
78557 +static void stackleak_check_alloca(gimple_stmt_iterator gsi)
78558 +{
78559 + gimple check_alloca;
78560 + tree fndecl, fntype, alloca_size;
78561 +
78562 + // insert call to void pax_check_alloca(unsigned long size)
78563 + fntype = build_function_type_list(void_type_node, long_unsigned_type_node, NULL_TREE);
78564 + fndecl = build_fn_decl(check_function, fntype);
78565 + DECL_ASSEMBLER_NAME(fndecl); // for LTO
78566 + alloca_size = gimple_call_arg(gsi_stmt(gsi), 0);
78567 + check_alloca = gimple_build_call(fndecl, 1, alloca_size);
78568 + gsi_insert_before(&gsi, check_alloca, GSI_CONTINUE_LINKING);
78569 +}
78570 +
78571 +static void stackleak_add_instrumentation(gimple_stmt_iterator gsi)
78572 +{
78573 + gimple track_stack;
78574 + tree fndecl, fntype;
78575 +
78576 + // insert call to void pax_track_stack(void)
78577 + fntype = build_function_type_list(void_type_node, NULL_TREE);
78578 + fndecl = build_fn_decl(track_function, fntype);
78579 + DECL_ASSEMBLER_NAME(fndecl); // for LTO
78580 + track_stack = gimple_build_call(fndecl, 0);
78581 + gsi_insert_after(&gsi, track_stack, GSI_CONTINUE_LINKING);
78582 +}
78583 +
78584 +#if BUILDING_GCC_VERSION == 4005
78585 +static bool gimple_call_builtin_p(gimple stmt, enum built_in_function code)
78586 +{
78587 + tree fndecl;
78588 +
78589 + if (!is_gimple_call(stmt))
78590 + return false;
78591 + fndecl = gimple_call_fndecl(stmt);
78592 + if (!fndecl)
78593 + return false;
78594 + if (DECL_BUILT_IN_CLASS(fndecl) != BUILT_IN_NORMAL)
78595 + return false;
78596 +// print_node(stderr, "pax", fndecl, 4);
78597 + return DECL_FUNCTION_CODE(fndecl) == code;
78598 +}
78599 +#endif
78600 +
78601 +static bool is_alloca(gimple stmt)
78602 +{
78603 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA))
78604 + return true;
78605 +
78606 +#if BUILDING_GCC_VERSION >= 4007
78607 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA_WITH_ALIGN))
78608 + return true;
78609 +#endif
78610 +
78611 + return false;
78612 +}
78613 +
78614 +static unsigned int execute_stackleak_tree_instrument(void)
78615 +{
78616 + basic_block bb, entry_bb;
78617 + bool prologue_instrumented = false;
78618 +
78619 + entry_bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb;
78620 +
78621 + // 1. loop through BBs and GIMPLE statements
78622 + FOR_EACH_BB(bb) {
78623 + gimple_stmt_iterator gsi;
78624 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
78625 + // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
78626 + if (!is_alloca(gsi_stmt(gsi)))
78627 + continue;
78628 +
78629 + // 2. insert stack overflow check before each __builtin_alloca call
78630 + stackleak_check_alloca(gsi);
78631 +
78632 + // 3. insert track call after each __builtin_alloca call
78633 + stackleak_add_instrumentation(gsi);
78634 + if (bb == entry_bb)
78635 + prologue_instrumented = true;
78636 + }
78637 + }
78638 +
78639 + // 4. insert track call at the beginning
78640 + if (!prologue_instrumented) {
78641 + bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
78642 + if (dom_info_available_p(CDI_DOMINATORS))
78643 + set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR);
78644 + stackleak_add_instrumentation(gsi_start_bb(bb));
78645 + }
78646 +
78647 + return 0;
78648 +}
78649 +
78650 +static unsigned int execute_stackleak_final(void)
78651 +{
78652 + rtx insn;
78653 +
78654 + if (cfun->calls_alloca)
78655 + return 0;
78656 +
78657 + // keep calls only if function frame is big enough
78658 + if (get_frame_size() >= track_frame_size)
78659 + return 0;
78660 +
78661 + // 1. find pax_track_stack calls
78662 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
78663 + // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
78664 + rtx body;
78665 +
78666 + if (!CALL_P(insn))
78667 + continue;
78668 + body = PATTERN(insn);
78669 + if (GET_CODE(body) != CALL)
78670 + continue;
78671 + body = XEXP(body, 0);
78672 + if (GET_CODE(body) != MEM)
78673 + continue;
78674 + body = XEXP(body, 0);
78675 + if (GET_CODE(body) != SYMBOL_REF)
78676 + continue;
78677 + if (strcmp(XSTR(body, 0), track_function))
78678 + continue;
78679 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
78680 + // 2. delete call
78681 + insn = delete_insn_and_edges(insn);
78682 +#if BUILDING_GCC_VERSION >= 4007
78683 + if (GET_CODE(insn) == NOTE && NOTE_KIND(insn) == NOTE_INSN_CALL_ARG_LOCATION)
78684 + insn = delete_insn_and_edges(insn);
78685 +#endif
78686 + }
78687 +
78688 +// print_simple_rtl(stderr, get_insns());
78689 +// print_rtl(stderr, get_insns());
78690 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
78691 +
78692 + return 0;
78693 +}
78694 +
78695 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
78696 +{
78697 + const char * const plugin_name = plugin_info->base_name;
78698 + const int argc = plugin_info->argc;
78699 + const struct plugin_argument * const argv = plugin_info->argv;
78700 + int i;
78701 + struct register_pass_info stackleak_tree_instrument_pass_info = {
78702 + .pass = &stackleak_tree_instrument_pass.pass,
78703 +// .reference_pass_name = "tree_profile",
78704 + .reference_pass_name = "optimized",
78705 + .ref_pass_instance_number = 0,
78706 + .pos_op = PASS_POS_INSERT_AFTER
78707 + };
78708 + struct register_pass_info stackleak_final_pass_info = {
78709 + .pass = &stackleak_final_rtl_opt_pass.pass,
78710 + .reference_pass_name = "final",
78711 + .ref_pass_instance_number = 0,
78712 + .pos_op = PASS_POS_INSERT_BEFORE
78713 + };
78714 +
78715 + if (!plugin_default_version_check(version, &gcc_version)) {
78716 + error(G_("incompatible gcc/plugin versions"));
78717 + return 1;
78718 + }
78719 +
78720 + register_callback(plugin_name, PLUGIN_INFO, NULL, &stackleak_plugin_info);
78721 +
78722 + for (i = 0; i < argc; ++i) {
78723 + if (!strcmp(argv[i].key, "track-lowest-sp")) {
78724 + if (!argv[i].value) {
78725 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
78726 + continue;
78727 + }
78728 + track_frame_size = atoi(argv[i].value);
78729 + if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
78730 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
78731 + continue;
78732 + }
78733 + if (!strcmp(argv[i].key, "initialize-locals")) {
78734 + if (argv[i].value) {
78735 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
78736 + continue;
78737 + }
78738 + init_locals = true;
78739 + continue;
78740 + }
78741 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
78742 + }
78743 +
78744 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info);
78745 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info);
78746 +
78747 + return 0;
78748 +}
78749 diff --git a/tools/perf/util/include/asm/alternative-asm.h b/tools/perf/util/include/asm/alternative-asm.h
78750 index 6789d78..4afd019 100644
78751 --- a/tools/perf/util/include/asm/alternative-asm.h
78752 +++ b/tools/perf/util/include/asm/alternative-asm.h
78753 @@ -5,4 +5,7 @@
78754
78755 #define altinstruction_entry #
78756
78757 + .macro pax_force_retaddr rip=0, reload=0
78758 + .endm
78759 +
78760 #endif
78761 diff --git a/usr/gen_init_cpio.c b/usr/gen_init_cpio.c
78762 index af0f22f..9a7d479 100644
78763 --- a/usr/gen_init_cpio.c
78764 +++ b/usr/gen_init_cpio.c
78765 @@ -303,7 +303,7 @@ static int cpio_mkfile(const char *name, const char *location,
78766 int retval;
78767 int rc = -1;
78768 int namesize;
78769 - int i;
78770 + unsigned int i;
78771
78772 mode |= S_IFREG;
78773
78774 @@ -392,9 +392,10 @@ static char *cpio_replace_env(char *new_location)
78775 *env_var = *expanded = '\0';
78776 strncat(env_var, start + 2, end - start - 2);
78777 strncat(expanded, new_location, start - new_location);
78778 - strncat(expanded, getenv(env_var), PATH_MAX);
78779 - strncat(expanded, end + 1, PATH_MAX);
78780 + strncat(expanded, getenv(env_var), PATH_MAX - strlen(expanded));
78781 + strncat(expanded, end + 1, PATH_MAX - strlen(expanded));
78782 strncpy(new_location, expanded, PATH_MAX);
78783 + new_location[PATH_MAX] = 0;
78784 } else
78785 break;
78786 }
78787 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
78788 index d9cfb78..4f27c10 100644
78789 --- a/virt/kvm/kvm_main.c
78790 +++ b/virt/kvm/kvm_main.c
78791 @@ -75,7 +75,7 @@ LIST_HEAD(vm_list);
78792
78793 static cpumask_var_t cpus_hardware_enabled;
78794 static int kvm_usage_count = 0;
78795 -static atomic_t hardware_enable_failed;
78796 +static atomic_unchecked_t hardware_enable_failed;
78797
78798 struct kmem_cache *kvm_vcpu_cache;
78799 EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
78800 @@ -2268,7 +2268,7 @@ static void hardware_enable_nolock(void *junk)
78801
78802 if (r) {
78803 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
78804 - atomic_inc(&hardware_enable_failed);
78805 + atomic_inc_unchecked(&hardware_enable_failed);
78806 printk(KERN_INFO "kvm: enabling virtualization on "
78807 "CPU%d failed\n", cpu);
78808 }
78809 @@ -2322,10 +2322,10 @@ static int hardware_enable_all(void)
78810
78811 kvm_usage_count++;
78812 if (kvm_usage_count == 1) {
78813 - atomic_set(&hardware_enable_failed, 0);
78814 + atomic_set_unchecked(&hardware_enable_failed, 0);
78815 on_each_cpu(hardware_enable_nolock, NULL, 1);
78816
78817 - if (atomic_read(&hardware_enable_failed)) {
78818 + if (atomic_read_unchecked(&hardware_enable_failed)) {
78819 hardware_disable_all_nolock();
78820 r = -EBUSY;
78821 }
78822 @@ -2676,7 +2676,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
78823 kvm_arch_vcpu_put(vcpu);
78824 }
78825
78826 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
78827 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
78828 struct module *module)
78829 {
78830 int r;
78831 @@ -2739,7 +2739,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
78832 if (!vcpu_align)
78833 vcpu_align = __alignof__(struct kvm_vcpu);
78834 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
78835 - 0, NULL);
78836 + SLAB_USERCOPY, NULL);
78837 if (!kvm_vcpu_cache) {
78838 r = -ENOMEM;
78839 goto out_free_3;
78840 @@ -2749,9 +2749,11 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
78841 if (r)
78842 goto out_free;
78843
78844 - kvm_chardev_ops.owner = module;
78845 - kvm_vm_fops.owner = module;
78846 - kvm_vcpu_fops.owner = module;
78847 + pax_open_kernel();
78848 + *(void **)&kvm_chardev_ops.owner = module;
78849 + *(void **)&kvm_vm_fops.owner = module;
78850 + *(void **)&kvm_vcpu_fops.owner = module;
78851 + pax_close_kernel();
78852
78853 r = misc_register(&kvm_dev);
78854 if (r) {