]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blob - test/grsecurity-2.9-3.2.7-201202232125.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.9-3.2.7-201202232125.patch
1 diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2 index dfa6fc6..0095943 100644
3 --- a/Documentation/dontdiff
4 +++ b/Documentation/dontdiff
5 @@ -5,6 +5,7 @@
6 *.cis
7 *.cpio
8 *.csp
9 +*.dbg
10 *.dsp
11 *.dvi
12 *.elf
13 @@ -14,6 +15,7 @@
14 *.gcov
15 *.gen.S
16 *.gif
17 +*.gmo
18 *.grep
19 *.grp
20 *.gz
21 @@ -48,9 +50,11 @@
22 *.tab.h
23 *.tex
24 *.ver
25 +*.vim
26 *.xml
27 *.xz
28 *_MODULES
29 +*_reg_safe.h
30 *_vga16.c
31 *~
32 \#*#
33 @@ -70,6 +74,7 @@ Kerntypes
34 Module.markers
35 Module.symvers
36 PENDING
37 +PERF*
38 SCCS
39 System.map*
40 TAGS
41 @@ -93,19 +98,24 @@ bounds.h
42 bsetup
43 btfixupprep
44 build
45 +builtin-policy.h
46 bvmlinux
47 bzImage*
48 capability_names.h
49 capflags.c
50 classlist.h*
51 +clut_vga16.c
52 +common-cmds.h
53 comp*.log
54 compile.h*
55 conf
56 config
57 config-*
58 config_data.h*
59 +config.c
60 config.mak
61 config.mak.autogen
62 +config.tmp
63 conmakehash
64 consolemap_deftbl.c*
65 cpustr.h
66 @@ -119,6 +129,7 @@ dslm
67 elf2ecoff
68 elfconfig.h*
69 evergreen_reg_safe.h
70 +exception_policy.conf
71 fixdep
72 flask.h
73 fore200e_mkfirm
74 @@ -126,12 +137,15 @@ fore200e_pca_fw.c*
75 gconf
76 gconf.glade.h
77 gen-devlist
78 +gen-kdb_cmds.c
79 gen_crc32table
80 gen_init_cpio
81 generated
82 genheaders
83 genksyms
84 *_gray256.c
85 +hash
86 +hid-example
87 hpet_example
88 hugepage-mmap
89 hugepage-shm
90 @@ -146,7 +160,7 @@ int32.c
91 int4.c
92 int8.c
93 kallsyms
94 -kconfig
95 +kern_constants.h
96 keywords.c
97 ksym.c*
98 ksym.h*
99 @@ -154,7 +168,6 @@ kxgettext
100 lkc_defs.h
101 lex.c
102 lex.*.c
103 -linux
104 logo_*.c
105 logo_*_clut224.c
106 logo_*_mono.c
107 @@ -166,14 +179,15 @@ machtypes.h
108 map
109 map_hugetlb
110 maui_boot.h
111 -media
112 mconf
113 +mdp
114 miboot*
115 mk_elfconfig
116 mkboot
117 mkbugboot
118 mkcpustr
119 mkdep
120 +mkpiggy
121 mkprep
122 mkregtable
123 mktables
124 @@ -209,6 +223,7 @@ r300_reg_safe.h
125 r420_reg_safe.h
126 r600_reg_safe.h
127 recordmcount
128 +regdb.c
129 relocs
130 rlim_names.h
131 rn50_reg_safe.h
132 @@ -219,6 +234,7 @@ setup
133 setup.bin
134 setup.elf
135 sImage
136 +slabinfo
137 sm_tbl*
138 split-include
139 syscalltab.h
140 @@ -229,6 +245,7 @@ tftpboot.img
141 timeconst.h
142 times.h*
143 trix_boot.h
144 +user_constants.h
145 utsrelease.h*
146 vdso-syms.lds
147 vdso.lds
148 @@ -246,7 +263,9 @@ vmlinux
149 vmlinux-*
150 vmlinux.aout
151 vmlinux.bin.all
152 +vmlinux.bin.bz2
153 vmlinux.lds
154 +vmlinux.relocs
155 vmlinuz
156 voffset.h
157 vsyscall.lds
158 @@ -254,9 +273,11 @@ vsyscall_32.lds
159 wanxlfw.inc
160 uImage
161 unifdef
162 +utsrelease.h
163 wakeup.bin
164 wakeup.elf
165 wakeup.lds
166 zImage*
167 zconf.hash.c
168 +zconf.lex.c
169 zoffset.h
170 diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
171 index 81c287f..d456d02 100644
172 --- a/Documentation/kernel-parameters.txt
173 +++ b/Documentation/kernel-parameters.txt
174 @@ -1935,6 +1935,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
175 the specified number of seconds. This is to be used if
176 your oopses keep scrolling off the screen.
177
178 + pax_nouderef [X86] disables UDEREF. Most likely needed under certain
179 + virtualization environments that don't cope well with the
180 + expand down segment used by UDEREF on X86-32 or the frequent
181 + page table updates on X86-64.
182 +
183 + pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
184 +
185 pcbit= [HW,ISDN]
186
187 pcd. [PARIDE]
188 diff --git a/Makefile b/Makefile
189 index d1bdc90..e95fe1a 100644
190 --- a/Makefile
191 +++ b/Makefile
192 @@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
193
194 HOSTCC = gcc
195 HOSTCXX = g++
196 -HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
197 -HOSTCXXFLAGS = -O2
198 +HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
199 +HOSTCLFAGS += $(call cc-option, -Wno-empty-body)
200 +HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
201
202 # Decide whether to build built-in, modular, or both.
203 # Normally, just do built-in.
204 @@ -407,8 +408,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exc
205 # Rules shared between *config targets and build targets
206
207 # Basic helpers built in scripts/
208 -PHONY += scripts_basic
209 -scripts_basic:
210 +PHONY += scripts_basic gcc-plugins
211 +scripts_basic: gcc-plugins
212 $(Q)$(MAKE) $(build)=scripts/basic
213 $(Q)rm -f .tmp_quiet_recordmcount
214
215 @@ -564,6 +565,46 @@ else
216 KBUILD_CFLAGS += -O2
217 endif
218
219 +ifndef DISABLE_PAX_PLUGINS
220 +ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(CC)"), y)
221 +ifndef DISABLE_PAX_CONSTIFY_PLUGIN
222 +CONSTIFY_PLUGIN := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
223 +endif
224 +ifdef CONFIG_PAX_MEMORY_STACKLEAK
225 +STACKLEAK_PLUGIN := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
226 +STACKLEAK_PLUGIN += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
227 +endif
228 +ifdef CONFIG_KALLOCSTAT_PLUGIN
229 +KALLOCSTAT_PLUGIN := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
230 +endif
231 +ifdef CONFIG_PAX_KERNEXEC_PLUGIN
232 +KERNEXEC_PLUGIN := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
233 +KERNEXEC_PLUGIN += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD)
234 +endif
235 +ifdef CONFIG_CHECKER_PLUGIN
236 +ifeq ($(call cc-ifversion, -ge, 0406, y), y)
237 +CHECKER_PLUGIN := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
238 +endif
239 +endif
240 +GCC_PLUGINS := $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN) $(KALLOCSTAT_PLUGIN) $(KERNEXEC_PLUGIN) $(CHECKER_PLUGIN)
241 +export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN
242 +ifeq ($(KBUILD_EXTMOD),)
243 +gcc-plugins:
244 + $(Q)$(MAKE) $(build)=tools/gcc
245 +else
246 +gcc-plugins: ;
247 +endif
248 +else
249 +gcc-plugins:
250 +ifeq ($(call cc-ifversion, -ge, 0405, y), y)
251 + $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
252 +else
253 + $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
254 +endif
255 + $(Q)echo "PAX_MEMORY_STACKLEAK and other features will be less secure"
256 +endif
257 +endif
258 +
259 include $(srctree)/arch/$(SRCARCH)/Makefile
260
261 ifneq ($(CONFIG_FRAME_WARN),0)
262 @@ -708,7 +749,7 @@ export mod_strip_cmd
263
264
265 ifeq ($(KBUILD_EXTMOD),)
266 -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
267 +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
268
269 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
270 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
271 @@ -932,6 +973,7 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
272
273 # The actual objects are generated when descending,
274 # make sure no implicit rule kicks in
275 +$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS)
276 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
277
278 # Handle descending into subdirectories listed in $(vmlinux-dirs)
279 @@ -941,7 +983,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
280 # Error messages still appears in the original language
281
282 PHONY += $(vmlinux-dirs)
283 -$(vmlinux-dirs): prepare scripts
284 +$(vmlinux-dirs): gcc-plugins prepare scripts
285 $(Q)$(MAKE) $(build)=$@
286
287 # Store (new) KERNELRELASE string in include/config/kernel.release
288 @@ -985,6 +1027,7 @@ prepare0: archprepare FORCE
289 $(Q)$(MAKE) $(build)=.
290
291 # All the preparing..
292 +prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS),$(KBUILD_CFLAGS))
293 prepare: prepare0
294
295 # Generate some files
296 @@ -1086,6 +1129,7 @@ all: modules
297 # using awk while concatenating to the final file.
298
299 PHONY += modules
300 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
301 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
302 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
303 @$(kecho) ' Building modules, stage 2.';
304 @@ -1101,7 +1145,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
305
306 # Target to prepare building external modules
307 PHONY += modules_prepare
308 -modules_prepare: prepare scripts
309 +modules_prepare: gcc-plugins prepare scripts
310
311 # Target to install modules
312 PHONY += modules_install
313 @@ -1198,6 +1242,7 @@ distclean: mrproper
314 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
315 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
316 -o -name '.*.rej' \
317 + -o -name '.*.rej' -o -name '*.so' \
318 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
319 -type f -print | xargs rm -f
320
321 @@ -1358,6 +1403,7 @@ PHONY += $(module-dirs) modules
322 $(module-dirs): crmodverdir $(objtree)/Module.symvers
323 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
324
325 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
326 modules: $(module-dirs)
327 @$(kecho) ' Building modules, stage 2.';
328 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
329 @@ -1484,17 +1530,19 @@ else
330 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
331 endif
332
333 -%.s: %.c prepare scripts FORCE
334 +%.s: KBUILD_CFLAGS += $(GCC_PLUGINS)
335 +%.s: %.c gcc-plugins prepare scripts FORCE
336 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
337 %.i: %.c prepare scripts FORCE
338 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
339 -%.o: %.c prepare scripts FORCE
340 +%.o: KBUILD_CFLAGS += $(GCC_PLUGINS)
341 +%.o: %.c gcc-plugins prepare scripts FORCE
342 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
343 %.lst: %.c prepare scripts FORCE
344 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
345 -%.s: %.S prepare scripts FORCE
346 +%.s: %.S gcc-plugins prepare scripts FORCE
347 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
348 -%.o: %.S prepare scripts FORCE
349 +%.o: %.S gcc-plugins prepare scripts FORCE
350 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
351 %.symtypes: %.c prepare scripts FORCE
352 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
353 @@ -1504,11 +1552,13 @@ endif
354 $(cmd_crmodverdir)
355 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
356 $(build)=$(build-dir)
357 -%/: prepare scripts FORCE
358 +%/: KBUILD_CFLAGS += $(GCC_PLUGINS)
359 +%/: gcc-plugins prepare scripts FORCE
360 $(cmd_crmodverdir)
361 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
362 $(build)=$(build-dir)
363 -%.ko: prepare scripts FORCE
364 +%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS)
365 +%.ko: gcc-plugins prepare scripts FORCE
366 $(cmd_crmodverdir)
367 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
368 $(build)=$(build-dir) $(@:.ko=.o)
369 diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
370 index da5449e..7418343 100644
371 --- a/arch/alpha/include/asm/elf.h
372 +++ b/arch/alpha/include/asm/elf.h
373 @@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
374
375 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
376
377 +#ifdef CONFIG_PAX_ASLR
378 +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
379 +
380 +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
381 +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
382 +#endif
383 +
384 /* $0 is set by ld.so to a pointer to a function which might be
385 registered using atexit. This provides a mean for the dynamic
386 linker to call DT_FINI functions for shared libraries that have
387 diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
388 index de98a73..bd4f1f8 100644
389 --- a/arch/alpha/include/asm/pgtable.h
390 +++ b/arch/alpha/include/asm/pgtable.h
391 @@ -101,6 +101,17 @@ struct vm_area_struct;
392 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
393 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
394 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
395 +
396 +#ifdef CONFIG_PAX_PAGEEXEC
397 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
398 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
399 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
400 +#else
401 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
402 +# define PAGE_COPY_NOEXEC PAGE_COPY
403 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
404 +#endif
405 +
406 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
407
408 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
409 diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
410 index 2fd00b7..cfd5069 100644
411 --- a/arch/alpha/kernel/module.c
412 +++ b/arch/alpha/kernel/module.c
413 @@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
414
415 /* The small sections were sorted to the end of the segment.
416 The following should definitely cover them. */
417 - gp = (u64)me->module_core + me->core_size - 0x8000;
418 + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
419 got = sechdrs[me->arch.gotsecindex].sh_addr;
420
421 for (i = 0; i < n; i++) {
422 diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
423 index 01e8715..be0e80f 100644
424 --- a/arch/alpha/kernel/osf_sys.c
425 +++ b/arch/alpha/kernel/osf_sys.c
426 @@ -1147,7 +1147,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
427 /* At this point: (!vma || addr < vma->vm_end). */
428 if (limit - len < addr)
429 return -ENOMEM;
430 - if (!vma || addr + len <= vma->vm_start)
431 + if (check_heap_stack_gap(vma, addr, len))
432 return addr;
433 addr = vma->vm_end;
434 vma = vma->vm_next;
435 @@ -1183,6 +1183,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
436 merely specific addresses, but regions of memory -- perhaps
437 this feature should be incorporated into all ports? */
438
439 +#ifdef CONFIG_PAX_RANDMMAP
440 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
441 +#endif
442 +
443 if (addr) {
444 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
445 if (addr != (unsigned long) -ENOMEM)
446 @@ -1190,8 +1194,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
447 }
448
449 /* Next, try allocating at TASK_UNMAPPED_BASE. */
450 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
451 - len, limit);
452 + addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
453 +
454 if (addr != (unsigned long) -ENOMEM)
455 return addr;
456
457 diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
458 index fadd5f8..904e73a 100644
459 --- a/arch/alpha/mm/fault.c
460 +++ b/arch/alpha/mm/fault.c
461 @@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
462 __reload_thread(pcb);
463 }
464
465 +#ifdef CONFIG_PAX_PAGEEXEC
466 +/*
467 + * PaX: decide what to do with offenders (regs->pc = fault address)
468 + *
469 + * returns 1 when task should be killed
470 + * 2 when patched PLT trampoline was detected
471 + * 3 when unpatched PLT trampoline was detected
472 + */
473 +static int pax_handle_fetch_fault(struct pt_regs *regs)
474 +{
475 +
476 +#ifdef CONFIG_PAX_EMUPLT
477 + int err;
478 +
479 + do { /* PaX: patched PLT emulation #1 */
480 + unsigned int ldah, ldq, jmp;
481 +
482 + err = get_user(ldah, (unsigned int *)regs->pc);
483 + err |= get_user(ldq, (unsigned int *)(regs->pc+4));
484 + err |= get_user(jmp, (unsigned int *)(regs->pc+8));
485 +
486 + if (err)
487 + break;
488 +
489 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
490 + (ldq & 0xFFFF0000U) == 0xA77B0000U &&
491 + jmp == 0x6BFB0000U)
492 + {
493 + unsigned long r27, addr;
494 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
495 + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
496 +
497 + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
498 + err = get_user(r27, (unsigned long *)addr);
499 + if (err)
500 + break;
501 +
502 + regs->r27 = r27;
503 + regs->pc = r27;
504 + return 2;
505 + }
506 + } while (0);
507 +
508 + do { /* PaX: patched PLT emulation #2 */
509 + unsigned int ldah, lda, br;
510 +
511 + err = get_user(ldah, (unsigned int *)regs->pc);
512 + err |= get_user(lda, (unsigned int *)(regs->pc+4));
513 + err |= get_user(br, (unsigned int *)(regs->pc+8));
514 +
515 + if (err)
516 + break;
517 +
518 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
519 + (lda & 0xFFFF0000U) == 0xA77B0000U &&
520 + (br & 0xFFE00000U) == 0xC3E00000U)
521 + {
522 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
523 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
524 + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
525 +
526 + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
527 + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
528 + return 2;
529 + }
530 + } while (0);
531 +
532 + do { /* PaX: unpatched PLT emulation */
533 + unsigned int br;
534 +
535 + err = get_user(br, (unsigned int *)regs->pc);
536 +
537 + if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
538 + unsigned int br2, ldq, nop, jmp;
539 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
540 +
541 + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
542 + err = get_user(br2, (unsigned int *)addr);
543 + err |= get_user(ldq, (unsigned int *)(addr+4));
544 + err |= get_user(nop, (unsigned int *)(addr+8));
545 + err |= get_user(jmp, (unsigned int *)(addr+12));
546 + err |= get_user(resolver, (unsigned long *)(addr+16));
547 +
548 + if (err)
549 + break;
550 +
551 + if (br2 == 0xC3600000U &&
552 + ldq == 0xA77B000CU &&
553 + nop == 0x47FF041FU &&
554 + jmp == 0x6B7B0000U)
555 + {
556 + regs->r28 = regs->pc+4;
557 + regs->r27 = addr+16;
558 + regs->pc = resolver;
559 + return 3;
560 + }
561 + }
562 + } while (0);
563 +#endif
564 +
565 + return 1;
566 +}
567 +
568 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
569 +{
570 + unsigned long i;
571 +
572 + printk(KERN_ERR "PAX: bytes at PC: ");
573 + for (i = 0; i < 5; i++) {
574 + unsigned int c;
575 + if (get_user(c, (unsigned int *)pc+i))
576 + printk(KERN_CONT "???????? ");
577 + else
578 + printk(KERN_CONT "%08x ", c);
579 + }
580 + printk("\n");
581 +}
582 +#endif
583
584 /*
585 * This routine handles page faults. It determines the address,
586 @@ -131,8 +249,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
587 good_area:
588 si_code = SEGV_ACCERR;
589 if (cause < 0) {
590 - if (!(vma->vm_flags & VM_EXEC))
591 + if (!(vma->vm_flags & VM_EXEC)) {
592 +
593 +#ifdef CONFIG_PAX_PAGEEXEC
594 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
595 + goto bad_area;
596 +
597 + up_read(&mm->mmap_sem);
598 + switch (pax_handle_fetch_fault(regs)) {
599 +
600 +#ifdef CONFIG_PAX_EMUPLT
601 + case 2:
602 + case 3:
603 + return;
604 +#endif
605 +
606 + }
607 + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
608 + do_group_exit(SIGKILL);
609 +#else
610 goto bad_area;
611 +#endif
612 +
613 + }
614 } else if (!cause) {
615 /* Allow reads even for write-only mappings */
616 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
617 diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
618 index 86976d0..8a57797 100644
619 --- a/arch/arm/include/asm/atomic.h
620 +++ b/arch/arm/include/asm/atomic.h
621 @@ -239,6 +239,14 @@ typedef struct {
622 u64 __aligned(8) counter;
623 } atomic64_t;
624
625 +#ifdef CONFIG_PAX_REFCOUNT
626 +typedef struct {
627 + u64 __aligned(8) counter;
628 +} atomic64_unchecked_t;
629 +#else
630 +typedef atomic64_t atomic64_unchecked_t;
631 +#endif
632 +
633 #define ATOMIC64_INIT(i) { (i) }
634
635 static inline u64 atomic64_read(atomic64_t *v)
636 diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
637 index 0e9ce8d..6ef1e03 100644
638 --- a/arch/arm/include/asm/elf.h
639 +++ b/arch/arm/include/asm/elf.h
640 @@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
641 the loader. We need to make sure that it is out of the way of the program
642 that it will "exec", and that there is sufficient room for the brk. */
643
644 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
645 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
646 +
647 +#ifdef CONFIG_PAX_ASLR
648 +#define PAX_ELF_ET_DYN_BASE 0x00008000UL
649 +
650 +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
651 +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
652 +#endif
653
654 /* When the program starts, a1 contains a pointer to a function to be
655 registered with atexit, as per the SVR4 ABI. A value of 0 means we
656 @@ -126,10 +133,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
657 extern void elf_set_personality(const struct elf32_hdr *);
658 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
659
660 -struct mm_struct;
661 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
662 -#define arch_randomize_brk arch_randomize_brk
663 -
664 extern int vectors_user_mapping(void);
665 #define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping()
666 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES
667 diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
668 index e51b1e8..32a3113 100644
669 --- a/arch/arm/include/asm/kmap_types.h
670 +++ b/arch/arm/include/asm/kmap_types.h
671 @@ -21,6 +21,7 @@ enum km_type {
672 KM_L1_CACHE,
673 KM_L2_CACHE,
674 KM_KDB,
675 + KM_CLEARPAGE,
676 KM_TYPE_NR
677 };
678
679 diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
680 index b293616..96310e5 100644
681 --- a/arch/arm/include/asm/uaccess.h
682 +++ b/arch/arm/include/asm/uaccess.h
683 @@ -22,6 +22,8 @@
684 #define VERIFY_READ 0
685 #define VERIFY_WRITE 1
686
687 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
688 +
689 /*
690 * The exception table consists of pairs of addresses: the first is the
691 * address of an instruction that is allowed to fault, and the second is
692 @@ -387,8 +389,23 @@ do { \
693
694
695 #ifdef CONFIG_MMU
696 -extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
697 -extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
698 +extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
699 +extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
700 +
701 +static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
702 +{
703 + if (!__builtin_constant_p(n))
704 + check_object_size(to, n, false);
705 + return ___copy_from_user(to, from, n);
706 +}
707 +
708 +static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
709 +{
710 + if (!__builtin_constant_p(n))
711 + check_object_size(from, n, true);
712 + return ___copy_to_user(to, from, n);
713 +}
714 +
715 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
716 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
717 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
718 @@ -403,6 +420,9 @@ extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
719
720 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
721 {
722 + if ((long)n < 0)
723 + return n;
724 +
725 if (access_ok(VERIFY_READ, from, n))
726 n = __copy_from_user(to, from, n);
727 else /* security hole - plug it */
728 @@ -412,6 +432,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
729
730 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
731 {
732 + if ((long)n < 0)
733 + return n;
734 +
735 if (access_ok(VERIFY_WRITE, to, n))
736 n = __copy_to_user(to, from, n);
737 return n;
738 diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
739 index 5b0bce6..becd81c 100644
740 --- a/arch/arm/kernel/armksyms.c
741 +++ b/arch/arm/kernel/armksyms.c
742 @@ -95,8 +95,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
743 #ifdef CONFIG_MMU
744 EXPORT_SYMBOL(copy_page);
745
746 -EXPORT_SYMBOL(__copy_from_user);
747 -EXPORT_SYMBOL(__copy_to_user);
748 +EXPORT_SYMBOL(___copy_from_user);
749 +EXPORT_SYMBOL(___copy_to_user);
750 EXPORT_SYMBOL(__clear_user);
751
752 EXPORT_SYMBOL(__get_user_1);
753 diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
754 index 3d0c6fb..3dcae52 100644
755 --- a/arch/arm/kernel/process.c
756 +++ b/arch/arm/kernel/process.c
757 @@ -28,7 +28,6 @@
758 #include <linux/tick.h>
759 #include <linux/utsname.h>
760 #include <linux/uaccess.h>
761 -#include <linux/random.h>
762 #include <linux/hw_breakpoint.h>
763 #include <linux/cpuidle.h>
764
765 @@ -484,12 +483,6 @@ unsigned long get_wchan(struct task_struct *p)
766 return 0;
767 }
768
769 -unsigned long arch_randomize_brk(struct mm_struct *mm)
770 -{
771 - unsigned long range_end = mm->brk + 0x02000000;
772 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
773 -}
774 -
775 #ifdef CONFIG_MMU
776 /*
777 * The vectors page is always readable from user space for the
778 diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
779 index 99a5727..a3d5bb1 100644
780 --- a/arch/arm/kernel/traps.c
781 +++ b/arch/arm/kernel/traps.c
782 @@ -259,6 +259,8 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt
783
784 static DEFINE_RAW_SPINLOCK(die_lock);
785
786 +extern void gr_handle_kernel_exploit(void);
787 +
788 /*
789 * This function is protected against re-entrancy.
790 */
791 @@ -288,6 +290,9 @@ void die(const char *str, struct pt_regs *regs, int err)
792 panic("Fatal exception in interrupt");
793 if (panic_on_oops)
794 panic("Fatal exception");
795 +
796 + gr_handle_kernel_exploit();
797 +
798 if (ret != NOTIFY_STOP)
799 do_exit(SIGSEGV);
800 }
801 diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
802 index 66a477a..bee61d3 100644
803 --- a/arch/arm/lib/copy_from_user.S
804 +++ b/arch/arm/lib/copy_from_user.S
805 @@ -16,7 +16,7 @@
806 /*
807 * Prototype:
808 *
809 - * size_t __copy_from_user(void *to, const void *from, size_t n)
810 + * size_t ___copy_from_user(void *to, const void *from, size_t n)
811 *
812 * Purpose:
813 *
814 @@ -84,11 +84,11 @@
815
816 .text
817
818 -ENTRY(__copy_from_user)
819 +ENTRY(___copy_from_user)
820
821 #include "copy_template.S"
822
823 -ENDPROC(__copy_from_user)
824 +ENDPROC(___copy_from_user)
825
826 .pushsection .fixup,"ax"
827 .align 0
828 diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
829 index d066df6..df28194 100644
830 --- a/arch/arm/lib/copy_to_user.S
831 +++ b/arch/arm/lib/copy_to_user.S
832 @@ -16,7 +16,7 @@
833 /*
834 * Prototype:
835 *
836 - * size_t __copy_to_user(void *to, const void *from, size_t n)
837 + * size_t ___copy_to_user(void *to, const void *from, size_t n)
838 *
839 * Purpose:
840 *
841 @@ -88,11 +88,11 @@
842 .text
843
844 ENTRY(__copy_to_user_std)
845 -WEAK(__copy_to_user)
846 +WEAK(___copy_to_user)
847
848 #include "copy_template.S"
849
850 -ENDPROC(__copy_to_user)
851 +ENDPROC(___copy_to_user)
852 ENDPROC(__copy_to_user_std)
853
854 .pushsection .fixup,"ax"
855 diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
856 index d0ece2a..5ae2f39 100644
857 --- a/arch/arm/lib/uaccess.S
858 +++ b/arch/arm/lib/uaccess.S
859 @@ -20,7 +20,7 @@
860
861 #define PAGE_SHIFT 12
862
863 -/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
864 +/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
865 * Purpose : copy a block to user memory from kernel memory
866 * Params : to - user memory
867 * : from - kernel memory
868 @@ -40,7 +40,7 @@ USER( T(strgtb) r3, [r0], #1) @ May fault
869 sub r2, r2, ip
870 b .Lc2u_dest_aligned
871
872 -ENTRY(__copy_to_user)
873 +ENTRY(___copy_to_user)
874 stmfd sp!, {r2, r4 - r7, lr}
875 cmp r2, #4
876 blt .Lc2u_not_enough
877 @@ -278,14 +278,14 @@ USER( T(strgeb) r3, [r0], #1) @ May fault
878 ldrgtb r3, [r1], #0
879 USER( T(strgtb) r3, [r0], #1) @ May fault
880 b .Lc2u_finished
881 -ENDPROC(__copy_to_user)
882 +ENDPROC(___copy_to_user)
883
884 .pushsection .fixup,"ax"
885 .align 0
886 9001: ldmfd sp!, {r0, r4 - r7, pc}
887 .popsection
888
889 -/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
890 +/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
891 * Purpose : copy a block from user memory to kernel memory
892 * Params : to - kernel memory
893 * : from - user memory
894 @@ -304,7 +304,7 @@ USER( T(ldrgtb) r3, [r1], #1) @ May fault
895 sub r2, r2, ip
896 b .Lcfu_dest_aligned
897
898 -ENTRY(__copy_from_user)
899 +ENTRY(___copy_from_user)
900 stmfd sp!, {r0, r2, r4 - r7, lr}
901 cmp r2, #4
902 blt .Lcfu_not_enough
903 @@ -544,7 +544,7 @@ USER( T(ldrgeb) r3, [r1], #1) @ May fault
904 USER( T(ldrgtb) r3, [r1], #1) @ May fault
905 strgtb r3, [r0], #1
906 b .Lcfu_finished
907 -ENDPROC(__copy_from_user)
908 +ENDPROC(___copy_from_user)
909
910 .pushsection .fixup,"ax"
911 .align 0
912 diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
913 index 025f742..8432b08 100644
914 --- a/arch/arm/lib/uaccess_with_memcpy.c
915 +++ b/arch/arm/lib/uaccess_with_memcpy.c
916 @@ -104,7 +104,7 @@ out:
917 }
918
919 unsigned long
920 -__copy_to_user(void __user *to, const void *from, unsigned long n)
921 +___copy_to_user(void __user *to, const void *from, unsigned long n)
922 {
923 /*
924 * This test is stubbed out of the main function above to keep
925 diff --git a/arch/arm/mach-ux500/mbox-db5500.c b/arch/arm/mach-ux500/mbox-db5500.c
926 index 2b2d51c..0127490 100644
927 --- a/arch/arm/mach-ux500/mbox-db5500.c
928 +++ b/arch/arm/mach-ux500/mbox-db5500.c
929 @@ -168,7 +168,7 @@ static ssize_t mbox_read_fifo(struct device *dev,
930 return sprintf(buf, "0x%X\n", mbox_value);
931 }
932
933 -static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
934 +static DEVICE_ATTR(fifo, S_IWUSR | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
935
936 static int mbox_show(struct seq_file *s, void *data)
937 {
938 diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
939 index aa33949..b242a2f 100644
940 --- a/arch/arm/mm/fault.c
941 +++ b/arch/arm/mm/fault.c
942 @@ -183,6 +183,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
943 }
944 #endif
945
946 +#ifdef CONFIG_PAX_PAGEEXEC
947 + if (fsr & FSR_LNX_PF) {
948 + pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
949 + do_group_exit(SIGKILL);
950 + }
951 +#endif
952 +
953 tsk->thread.address = addr;
954 tsk->thread.error_code = fsr;
955 tsk->thread.trap_no = 14;
956 @@ -384,6 +391,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
957 }
958 #endif /* CONFIG_MMU */
959
960 +#ifdef CONFIG_PAX_PAGEEXEC
961 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
962 +{
963 + long i;
964 +
965 + printk(KERN_ERR "PAX: bytes at PC: ");
966 + for (i = 0; i < 20; i++) {
967 + unsigned char c;
968 + if (get_user(c, (__force unsigned char __user *)pc+i))
969 + printk(KERN_CONT "?? ");
970 + else
971 + printk(KERN_CONT "%02x ", c);
972 + }
973 + printk("\n");
974 +
975 + printk(KERN_ERR "PAX: bytes at SP-4: ");
976 + for (i = -1; i < 20; i++) {
977 + unsigned long c;
978 + if (get_user(c, (__force unsigned long __user *)sp+i))
979 + printk(KERN_CONT "???????? ");
980 + else
981 + printk(KERN_CONT "%08lx ", c);
982 + }
983 + printk("\n");
984 +}
985 +#endif
986 +
987 /*
988 * First Level Translation Fault Handler
989 *
990 diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
991 index 44b628e..623ee2a 100644
992 --- a/arch/arm/mm/mmap.c
993 +++ b/arch/arm/mm/mmap.c
994 @@ -54,6 +54,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
995 if (len > TASK_SIZE)
996 return -ENOMEM;
997
998 +#ifdef CONFIG_PAX_RANDMMAP
999 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
1000 +#endif
1001 +
1002 if (addr) {
1003 if (do_align)
1004 addr = COLOUR_ALIGN(addr, pgoff);
1005 @@ -61,15 +65,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1006 addr = PAGE_ALIGN(addr);
1007
1008 vma = find_vma(mm, addr);
1009 - if (TASK_SIZE - len >= addr &&
1010 - (!vma || addr + len <= vma->vm_start))
1011 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1012 return addr;
1013 }
1014 if (len > mm->cached_hole_size) {
1015 - start_addr = addr = mm->free_area_cache;
1016 + start_addr = addr = mm->free_area_cache;
1017 } else {
1018 - start_addr = addr = TASK_UNMAPPED_BASE;
1019 - mm->cached_hole_size = 0;
1020 + start_addr = addr = mm->mmap_base;
1021 + mm->cached_hole_size = 0;
1022 }
1023 /* 8 bits of randomness in 20 address space bits */
1024 if ((current->flags & PF_RANDOMIZE) &&
1025 @@ -89,14 +92,14 @@ full_search:
1026 * Start a new search - just in case we missed
1027 * some holes.
1028 */
1029 - if (start_addr != TASK_UNMAPPED_BASE) {
1030 - start_addr = addr = TASK_UNMAPPED_BASE;
1031 + if (start_addr != mm->mmap_base) {
1032 + start_addr = addr = mm->mmap_base;
1033 mm->cached_hole_size = 0;
1034 goto full_search;
1035 }
1036 return -ENOMEM;
1037 }
1038 - if (!vma || addr + len <= vma->vm_start) {
1039 + if (check_heap_stack_gap(vma, addr, len)) {
1040 /*
1041 * Remember the place where we stopped the search:
1042 */
1043 diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
1044 index 3b3159b..425ea94 100644
1045 --- a/arch/avr32/include/asm/elf.h
1046 +++ b/arch/avr32/include/asm/elf.h
1047 @@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
1048 the loader. We need to make sure that it is out of the way of the program
1049 that it will "exec", and that there is sufficient room for the brk. */
1050
1051 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1052 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1053
1054 +#ifdef CONFIG_PAX_ASLR
1055 +#define PAX_ELF_ET_DYN_BASE 0x00001000UL
1056 +
1057 +#define PAX_DELTA_MMAP_LEN 15
1058 +#define PAX_DELTA_STACK_LEN 15
1059 +#endif
1060
1061 /* This yields a mask that user programs can use to figure out what
1062 instruction set this CPU supports. This could be done in user space,
1063 diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
1064 index b7f5c68..556135c 100644
1065 --- a/arch/avr32/include/asm/kmap_types.h
1066 +++ b/arch/avr32/include/asm/kmap_types.h
1067 @@ -22,7 +22,8 @@ D(10) KM_IRQ0,
1068 D(11) KM_IRQ1,
1069 D(12) KM_SOFTIRQ0,
1070 D(13) KM_SOFTIRQ1,
1071 -D(14) KM_TYPE_NR
1072 +D(14) KM_CLEARPAGE,
1073 +D(15) KM_TYPE_NR
1074 };
1075
1076 #undef D
1077 diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
1078 index f7040a1..db9f300 100644
1079 --- a/arch/avr32/mm/fault.c
1080 +++ b/arch/avr32/mm/fault.c
1081 @@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
1082
1083 int exception_trace = 1;
1084
1085 +#ifdef CONFIG_PAX_PAGEEXEC
1086 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1087 +{
1088 + unsigned long i;
1089 +
1090 + printk(KERN_ERR "PAX: bytes at PC: ");
1091 + for (i = 0; i < 20; i++) {
1092 + unsigned char c;
1093 + if (get_user(c, (unsigned char *)pc+i))
1094 + printk(KERN_CONT "???????? ");
1095 + else
1096 + printk(KERN_CONT "%02x ", c);
1097 + }
1098 + printk("\n");
1099 +}
1100 +#endif
1101 +
1102 /*
1103 * This routine handles page faults. It determines the address and the
1104 * problem, and then passes it off to one of the appropriate routines.
1105 @@ -156,6 +173,16 @@ bad_area:
1106 up_read(&mm->mmap_sem);
1107
1108 if (user_mode(regs)) {
1109 +
1110 +#ifdef CONFIG_PAX_PAGEEXEC
1111 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
1112 + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
1113 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
1114 + do_group_exit(SIGKILL);
1115 + }
1116 + }
1117 +#endif
1118 +
1119 if (exception_trace && printk_ratelimit())
1120 printk("%s%s[%d]: segfault at %08lx pc %08lx "
1121 "sp %08lx ecr %lu\n",
1122 diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
1123 index f8e16b2..c73ff79 100644
1124 --- a/arch/frv/include/asm/kmap_types.h
1125 +++ b/arch/frv/include/asm/kmap_types.h
1126 @@ -23,6 +23,7 @@ enum km_type {
1127 KM_IRQ1,
1128 KM_SOFTIRQ0,
1129 KM_SOFTIRQ1,
1130 + KM_CLEARPAGE,
1131 KM_TYPE_NR
1132 };
1133
1134 diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
1135 index 385fd30..6c3d97e 100644
1136 --- a/arch/frv/mm/elf-fdpic.c
1137 +++ b/arch/frv/mm/elf-fdpic.c
1138 @@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1139 if (addr) {
1140 addr = PAGE_ALIGN(addr);
1141 vma = find_vma(current->mm, addr);
1142 - if (TASK_SIZE - len >= addr &&
1143 - (!vma || addr + len <= vma->vm_start))
1144 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1145 goto success;
1146 }
1147
1148 @@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1149 for (; vma; vma = vma->vm_next) {
1150 if (addr > limit)
1151 break;
1152 - if (addr + len <= vma->vm_start)
1153 + if (check_heap_stack_gap(vma, addr, len))
1154 goto success;
1155 addr = vma->vm_end;
1156 }
1157 @@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1158 for (; vma; vma = vma->vm_next) {
1159 if (addr > limit)
1160 break;
1161 - if (addr + len <= vma->vm_start)
1162 + if (check_heap_stack_gap(vma, addr, len))
1163 goto success;
1164 addr = vma->vm_end;
1165 }
1166 diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
1167 index b5298eb..67c6e62 100644
1168 --- a/arch/ia64/include/asm/elf.h
1169 +++ b/arch/ia64/include/asm/elf.h
1170 @@ -42,6 +42,13 @@
1171 */
1172 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
1173
1174 +#ifdef CONFIG_PAX_ASLR
1175 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
1176 +
1177 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1178 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1179 +#endif
1180 +
1181 #define PT_IA_64_UNWIND 0x70000001
1182
1183 /* IA-64 relocations: */
1184 diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
1185 index 1a97af3..7529d31 100644
1186 --- a/arch/ia64/include/asm/pgtable.h
1187 +++ b/arch/ia64/include/asm/pgtable.h
1188 @@ -12,7 +12,7 @@
1189 * David Mosberger-Tang <davidm@hpl.hp.com>
1190 */
1191
1192 -
1193 +#include <linux/const.h>
1194 #include <asm/mman.h>
1195 #include <asm/page.h>
1196 #include <asm/processor.h>
1197 @@ -143,6 +143,17 @@
1198 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1199 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1200 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
1201 +
1202 +#ifdef CONFIG_PAX_PAGEEXEC
1203 +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
1204 +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1205 +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1206 +#else
1207 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
1208 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
1209 +# define PAGE_COPY_NOEXEC PAGE_COPY
1210 +#endif
1211 +
1212 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
1213 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
1214 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
1215 diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
1216 index b77768d..e0795eb 100644
1217 --- a/arch/ia64/include/asm/spinlock.h
1218 +++ b/arch/ia64/include/asm/spinlock.h
1219 @@ -72,7 +72,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
1220 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
1221
1222 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
1223 - ACCESS_ONCE(*p) = (tmp + 2) & ~1;
1224 + ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
1225 }
1226
1227 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
1228 diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
1229 index 449c8c0..432a3d2 100644
1230 --- a/arch/ia64/include/asm/uaccess.h
1231 +++ b/arch/ia64/include/asm/uaccess.h
1232 @@ -257,7 +257,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
1233 const void *__cu_from = (from); \
1234 long __cu_len = (n); \
1235 \
1236 - if (__access_ok(__cu_to, __cu_len, get_fs())) \
1237 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
1238 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
1239 __cu_len; \
1240 })
1241 @@ -269,7 +269,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
1242 long __cu_len = (n); \
1243 \
1244 __chk_user_ptr(__cu_from); \
1245 - if (__access_ok(__cu_from, __cu_len, get_fs())) \
1246 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
1247 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
1248 __cu_len; \
1249 })
1250 diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
1251 index 24603be..948052d 100644
1252 --- a/arch/ia64/kernel/module.c
1253 +++ b/arch/ia64/kernel/module.c
1254 @@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
1255 void
1256 module_free (struct module *mod, void *module_region)
1257 {
1258 - if (mod && mod->arch.init_unw_table &&
1259 - module_region == mod->module_init) {
1260 + if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
1261 unw_remove_unwind_table(mod->arch.init_unw_table);
1262 mod->arch.init_unw_table = NULL;
1263 }
1264 @@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
1265 }
1266
1267 static inline int
1268 +in_init_rx (const struct module *mod, uint64_t addr)
1269 +{
1270 + return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
1271 +}
1272 +
1273 +static inline int
1274 +in_init_rw (const struct module *mod, uint64_t addr)
1275 +{
1276 + return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
1277 +}
1278 +
1279 +static inline int
1280 in_init (const struct module *mod, uint64_t addr)
1281 {
1282 - return addr - (uint64_t) mod->module_init < mod->init_size;
1283 + return in_init_rx(mod, addr) || in_init_rw(mod, addr);
1284 +}
1285 +
1286 +static inline int
1287 +in_core_rx (const struct module *mod, uint64_t addr)
1288 +{
1289 + return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
1290 +}
1291 +
1292 +static inline int
1293 +in_core_rw (const struct module *mod, uint64_t addr)
1294 +{
1295 + return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
1296 }
1297
1298 static inline int
1299 in_core (const struct module *mod, uint64_t addr)
1300 {
1301 - return addr - (uint64_t) mod->module_core < mod->core_size;
1302 + return in_core_rx(mod, addr) || in_core_rw(mod, addr);
1303 }
1304
1305 static inline int
1306 @@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
1307 break;
1308
1309 case RV_BDREL:
1310 - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
1311 + if (in_init_rx(mod, val))
1312 + val -= (uint64_t) mod->module_init_rx;
1313 + else if (in_init_rw(mod, val))
1314 + val -= (uint64_t) mod->module_init_rw;
1315 + else if (in_core_rx(mod, val))
1316 + val -= (uint64_t) mod->module_core_rx;
1317 + else if (in_core_rw(mod, val))
1318 + val -= (uint64_t) mod->module_core_rw;
1319 break;
1320
1321 case RV_LTV:
1322 @@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
1323 * addresses have been selected...
1324 */
1325 uint64_t gp;
1326 - if (mod->core_size > MAX_LTOFF)
1327 + if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
1328 /*
1329 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
1330 * at the end of the module.
1331 */
1332 - gp = mod->core_size - MAX_LTOFF / 2;
1333 + gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
1334 else
1335 - gp = mod->core_size / 2;
1336 - gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
1337 + gp = (mod->core_size_rx + mod->core_size_rw) / 2;
1338 + gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
1339 mod->arch.gp = gp;
1340 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
1341 }
1342 diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
1343 index 609d500..7dde2a8 100644
1344 --- a/arch/ia64/kernel/sys_ia64.c
1345 +++ b/arch/ia64/kernel/sys_ia64.c
1346 @@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
1347 if (REGION_NUMBER(addr) == RGN_HPAGE)
1348 addr = 0;
1349 #endif
1350 +
1351 +#ifdef CONFIG_PAX_RANDMMAP
1352 + if (mm->pax_flags & MF_PAX_RANDMMAP)
1353 + addr = mm->free_area_cache;
1354 + else
1355 +#endif
1356 +
1357 if (!addr)
1358 addr = mm->free_area_cache;
1359
1360 @@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
1361 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
1362 /* At this point: (!vma || addr < vma->vm_end). */
1363 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
1364 - if (start_addr != TASK_UNMAPPED_BASE) {
1365 + if (start_addr != mm->mmap_base) {
1366 /* Start a new search --- just in case we missed some holes. */
1367 - addr = TASK_UNMAPPED_BASE;
1368 + addr = mm->mmap_base;
1369 goto full_search;
1370 }
1371 return -ENOMEM;
1372 }
1373 - if (!vma || addr + len <= vma->vm_start) {
1374 + if (check_heap_stack_gap(vma, addr, len)) {
1375 /* Remember the address where we stopped this search: */
1376 mm->free_area_cache = addr + len;
1377 return addr;
1378 diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
1379 index 53c0ba0..2accdde 100644
1380 --- a/arch/ia64/kernel/vmlinux.lds.S
1381 +++ b/arch/ia64/kernel/vmlinux.lds.S
1382 @@ -199,7 +199,7 @@ SECTIONS {
1383 /* Per-cpu data: */
1384 . = ALIGN(PERCPU_PAGE_SIZE);
1385 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
1386 - __phys_per_cpu_start = __per_cpu_load;
1387 + __phys_per_cpu_start = per_cpu_load;
1388 /*
1389 * ensure percpu data fits
1390 * into percpu page size
1391 diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
1392 index 20b3593..1ce77f0 100644
1393 --- a/arch/ia64/mm/fault.c
1394 +++ b/arch/ia64/mm/fault.c
1395 @@ -73,6 +73,23 @@ mapped_kernel_page_is_present (unsigned long address)
1396 return pte_present(pte);
1397 }
1398
1399 +#ifdef CONFIG_PAX_PAGEEXEC
1400 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1401 +{
1402 + unsigned long i;
1403 +
1404 + printk(KERN_ERR "PAX: bytes at PC: ");
1405 + for (i = 0; i < 8; i++) {
1406 + unsigned int c;
1407 + if (get_user(c, (unsigned int *)pc+i))
1408 + printk(KERN_CONT "???????? ");
1409 + else
1410 + printk(KERN_CONT "%08x ", c);
1411 + }
1412 + printk("\n");
1413 +}
1414 +#endif
1415 +
1416 void __kprobes
1417 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1418 {
1419 @@ -146,9 +163,23 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
1420 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1421 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1422
1423 - if ((vma->vm_flags & mask) != mask)
1424 + if ((vma->vm_flags & mask) != mask) {
1425 +
1426 +#ifdef CONFIG_PAX_PAGEEXEC
1427 + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1428 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1429 + goto bad_area;
1430 +
1431 + up_read(&mm->mmap_sem);
1432 + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1433 + do_group_exit(SIGKILL);
1434 + }
1435 +#endif
1436 +
1437 goto bad_area;
1438
1439 + }
1440 +
1441 /*
1442 * If for any reason at all we couldn't handle the fault, make
1443 * sure we exit gracefully rather than endlessly redo the
1444 diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
1445 index 5ca674b..e0e1b70 100644
1446 --- a/arch/ia64/mm/hugetlbpage.c
1447 +++ b/arch/ia64/mm/hugetlbpage.c
1448 @@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
1449 /* At this point: (!vmm || addr < vmm->vm_end). */
1450 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1451 return -ENOMEM;
1452 - if (!vmm || (addr + len) <= vmm->vm_start)
1453 + if (check_heap_stack_gap(vmm, addr, len))
1454 return addr;
1455 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1456 }
1457 diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
1458 index 00cb0e2..2ad8024 100644
1459 --- a/arch/ia64/mm/init.c
1460 +++ b/arch/ia64/mm/init.c
1461 @@ -120,6 +120,19 @@ ia64_init_addr_space (void)
1462 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1463 vma->vm_end = vma->vm_start + PAGE_SIZE;
1464 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1465 +
1466 +#ifdef CONFIG_PAX_PAGEEXEC
1467 + if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1468 + vma->vm_flags &= ~VM_EXEC;
1469 +
1470 +#ifdef CONFIG_PAX_MPROTECT
1471 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
1472 + vma->vm_flags &= ~VM_MAYEXEC;
1473 +#endif
1474 +
1475 + }
1476 +#endif
1477 +
1478 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1479 down_write(&current->mm->mmap_sem);
1480 if (insert_vm_struct(current->mm, vma)) {
1481 diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
1482 index 82abd15..d95ae5d 100644
1483 --- a/arch/m32r/lib/usercopy.c
1484 +++ b/arch/m32r/lib/usercopy.c
1485 @@ -14,6 +14,9 @@
1486 unsigned long
1487 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1488 {
1489 + if ((long)n < 0)
1490 + return n;
1491 +
1492 prefetch(from);
1493 if (access_ok(VERIFY_WRITE, to, n))
1494 __copy_user(to,from,n);
1495 @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1496 unsigned long
1497 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1498 {
1499 + if ((long)n < 0)
1500 + return n;
1501 +
1502 prefetchw(to);
1503 if (access_ok(VERIFY_READ, from, n))
1504 __copy_user_zeroing(to,from,n);
1505 diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
1506 index 455c0ac..ad65fbe 100644
1507 --- a/arch/mips/include/asm/elf.h
1508 +++ b/arch/mips/include/asm/elf.h
1509 @@ -372,13 +372,16 @@ extern const char *__elf_platform;
1510 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1511 #endif
1512
1513 +#ifdef CONFIG_PAX_ASLR
1514 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1515 +
1516 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1517 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1518 +#endif
1519 +
1520 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1521 struct linux_binprm;
1522 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
1523 int uses_interp);
1524
1525 -struct mm_struct;
1526 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1527 -#define arch_randomize_brk arch_randomize_brk
1528 -
1529 #endif /* _ASM_ELF_H */
1530 diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
1531 index e59cd1a..8e329d6 100644
1532 --- a/arch/mips/include/asm/page.h
1533 +++ b/arch/mips/include/asm/page.h
1534 @@ -93,7 +93,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
1535 #ifdef CONFIG_CPU_MIPS32
1536 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1537 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1538 - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1539 + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1540 #else
1541 typedef struct { unsigned long long pte; } pte_t;
1542 #define pte_val(x) ((x).pte)
1543 diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h
1544 index 6018c80..7c37203 100644
1545 --- a/arch/mips/include/asm/system.h
1546 +++ b/arch/mips/include/asm/system.h
1547 @@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1548 */
1549 #define __ARCH_WANT_UNLOCKED_CTXSW
1550
1551 -extern unsigned long arch_align_stack(unsigned long sp);
1552 +#define arch_align_stack(x) ((x) & ~0xfUL)
1553
1554 #endif /* _ASM_SYSTEM_H */
1555 diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
1556 index 9fdd8bc..4bd7f1a 100644
1557 --- a/arch/mips/kernel/binfmt_elfn32.c
1558 +++ b/arch/mips/kernel/binfmt_elfn32.c
1559 @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
1560 #undef ELF_ET_DYN_BASE
1561 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1562
1563 +#ifdef CONFIG_PAX_ASLR
1564 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1565 +
1566 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1567 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1568 +#endif
1569 +
1570 #include <asm/processor.h>
1571 #include <linux/module.h>
1572 #include <linux/elfcore.h>
1573 diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
1574 index ff44823..97f8906 100644
1575 --- a/arch/mips/kernel/binfmt_elfo32.c
1576 +++ b/arch/mips/kernel/binfmt_elfo32.c
1577 @@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
1578 #undef ELF_ET_DYN_BASE
1579 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1580
1581 +#ifdef CONFIG_PAX_ASLR
1582 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1583 +
1584 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1585 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1586 +#endif
1587 +
1588 #include <asm/processor.h>
1589
1590 /*
1591 diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
1592 index c47f96e..661d418 100644
1593 --- a/arch/mips/kernel/process.c
1594 +++ b/arch/mips/kernel/process.c
1595 @@ -481,15 +481,3 @@ unsigned long get_wchan(struct task_struct *task)
1596 out:
1597 return pc;
1598 }
1599 -
1600 -/*
1601 - * Don't forget that the stack pointer must be aligned on a 8 bytes
1602 - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1603 - */
1604 -unsigned long arch_align_stack(unsigned long sp)
1605 -{
1606 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1607 - sp -= get_random_int() & ~PAGE_MASK;
1608 -
1609 - return sp & ALMASK;
1610 -}
1611 diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
1612 index 937cf33..adb39bb 100644
1613 --- a/arch/mips/mm/fault.c
1614 +++ b/arch/mips/mm/fault.c
1615 @@ -28,6 +28,23 @@
1616 #include <asm/highmem.h> /* For VMALLOC_END */
1617 #include <linux/kdebug.h>
1618
1619 +#ifdef CONFIG_PAX_PAGEEXEC
1620 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1621 +{
1622 + unsigned long i;
1623 +
1624 + printk(KERN_ERR "PAX: bytes at PC: ");
1625 + for (i = 0; i < 5; i++) {
1626 + unsigned int c;
1627 + if (get_user(c, (unsigned int *)pc+i))
1628 + printk(KERN_CONT "???????? ");
1629 + else
1630 + printk(KERN_CONT "%08x ", c);
1631 + }
1632 + printk("\n");
1633 +}
1634 +#endif
1635 +
1636 /*
1637 * This routine handles page faults. It determines the address,
1638 * and the problem, and then passes it off to one of the appropriate
1639 diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
1640 index 302d779..7d35bf8 100644
1641 --- a/arch/mips/mm/mmap.c
1642 +++ b/arch/mips/mm/mmap.c
1643 @@ -95,6 +95,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1644 do_color_align = 1;
1645
1646 /* requesting a specific address */
1647 +
1648 +#ifdef CONFIG_PAX_RANDMMAP
1649 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1650 +#endif
1651 +
1652 if (addr) {
1653 if (do_color_align)
1654 addr = COLOUR_ALIGN(addr, pgoff);
1655 @@ -102,8 +107,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1656 addr = PAGE_ALIGN(addr);
1657
1658 vma = find_vma(mm, addr);
1659 - if (TASK_SIZE - len >= addr &&
1660 - (!vma || addr + len <= vma->vm_start))
1661 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len))
1662 return addr;
1663 }
1664
1665 @@ -118,7 +122,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1666 /* At this point: (!vma || addr < vma->vm_end). */
1667 if (TASK_SIZE - len < addr)
1668 return -ENOMEM;
1669 - if (!vma || addr + len <= vma->vm_start)
1670 + if (check_heap_stack_gap(vmm, addr, len))
1671 return addr;
1672 addr = vma->vm_end;
1673 if (do_color_align)
1674 @@ -145,7 +149,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1675 /* make sure it can fit in the remaining address space */
1676 if (likely(addr > len)) {
1677 vma = find_vma(mm, addr - len);
1678 - if (!vma || addr <= vma->vm_start) {
1679 + if (check_heap_stack_gap(vmm, addr - len, len))
1680 /* cache the address as a hint for next time */
1681 return mm->free_area_cache = addr - len;
1682 }
1683 @@ -165,7 +169,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1684 * return with success:
1685 */
1686 vma = find_vma(mm, addr);
1687 - if (likely(!vma || addr + len <= vma->vm_start)) {
1688 + if (check_heap_stack_gap(vmm, addr, len)) {
1689 /* cache the address as a hint for next time */
1690 return mm->free_area_cache = addr;
1691 }
1692 @@ -242,30 +246,3 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
1693 mm->unmap_area = arch_unmap_area_topdown;
1694 }
1695 }
1696 -
1697 -static inline unsigned long brk_rnd(void)
1698 -{
1699 - unsigned long rnd = get_random_int();
1700 -
1701 - rnd = rnd << PAGE_SHIFT;
1702 - /* 8MB for 32bit, 256MB for 64bit */
1703 - if (TASK_IS_32BIT_ADDR)
1704 - rnd = rnd & 0x7ffffful;
1705 - else
1706 - rnd = rnd & 0xffffffful;
1707 -
1708 - return rnd;
1709 -}
1710 -
1711 -unsigned long arch_randomize_brk(struct mm_struct *mm)
1712 -{
1713 - unsigned long base = mm->brk;
1714 - unsigned long ret;
1715 -
1716 - ret = PAGE_ALIGN(base + brk_rnd());
1717 -
1718 - if (ret < mm->brk)
1719 - return mm->brk;
1720 -
1721 - return ret;
1722 -}
1723 diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
1724 index 19f6cb1..6c78cf2 100644
1725 --- a/arch/parisc/include/asm/elf.h
1726 +++ b/arch/parisc/include/asm/elf.h
1727 @@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
1728
1729 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1730
1731 +#ifdef CONFIG_PAX_ASLR
1732 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
1733 +
1734 +#define PAX_DELTA_MMAP_LEN 16
1735 +#define PAX_DELTA_STACK_LEN 16
1736 +#endif
1737 +
1738 /* This yields a mask that user programs can use to figure out what
1739 instruction set this CPU supports. This could be done in user space,
1740 but it's not easy, and we've already done it here. */
1741 diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
1742 index 22dadeb..f6c2be4 100644
1743 --- a/arch/parisc/include/asm/pgtable.h
1744 +++ b/arch/parisc/include/asm/pgtable.h
1745 @@ -210,6 +210,17 @@ struct vm_area_struct;
1746 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1747 #define PAGE_COPY PAGE_EXECREAD
1748 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1749 +
1750 +#ifdef CONFIG_PAX_PAGEEXEC
1751 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1752 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1753 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1754 +#else
1755 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
1756 +# define PAGE_COPY_NOEXEC PAGE_COPY
1757 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
1758 +#endif
1759 +
1760 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
1761 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
1762 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
1763 diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
1764 index 5e34ccf..672bc9c 100644
1765 --- a/arch/parisc/kernel/module.c
1766 +++ b/arch/parisc/kernel/module.c
1767 @@ -98,16 +98,38 @@
1768
1769 /* three functions to determine where in the module core
1770 * or init pieces the location is */
1771 +static inline int in_init_rx(struct module *me, void *loc)
1772 +{
1773 + return (loc >= me->module_init_rx &&
1774 + loc < (me->module_init_rx + me->init_size_rx));
1775 +}
1776 +
1777 +static inline int in_init_rw(struct module *me, void *loc)
1778 +{
1779 + return (loc >= me->module_init_rw &&
1780 + loc < (me->module_init_rw + me->init_size_rw));
1781 +}
1782 +
1783 static inline int in_init(struct module *me, void *loc)
1784 {
1785 - return (loc >= me->module_init &&
1786 - loc <= (me->module_init + me->init_size));
1787 + return in_init_rx(me, loc) || in_init_rw(me, loc);
1788 +}
1789 +
1790 +static inline int in_core_rx(struct module *me, void *loc)
1791 +{
1792 + return (loc >= me->module_core_rx &&
1793 + loc < (me->module_core_rx + me->core_size_rx));
1794 +}
1795 +
1796 +static inline int in_core_rw(struct module *me, void *loc)
1797 +{
1798 + return (loc >= me->module_core_rw &&
1799 + loc < (me->module_core_rw + me->core_size_rw));
1800 }
1801
1802 static inline int in_core(struct module *me, void *loc)
1803 {
1804 - return (loc >= me->module_core &&
1805 - loc <= (me->module_core + me->core_size));
1806 + return in_core_rx(me, loc) || in_core_rw(me, loc);
1807 }
1808
1809 static inline int in_local(struct module *me, void *loc)
1810 @@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
1811 }
1812
1813 /* align things a bit */
1814 - me->core_size = ALIGN(me->core_size, 16);
1815 - me->arch.got_offset = me->core_size;
1816 - me->core_size += gots * sizeof(struct got_entry);
1817 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1818 + me->arch.got_offset = me->core_size_rw;
1819 + me->core_size_rw += gots * sizeof(struct got_entry);
1820
1821 - me->core_size = ALIGN(me->core_size, 16);
1822 - me->arch.fdesc_offset = me->core_size;
1823 - me->core_size += fdescs * sizeof(Elf_Fdesc);
1824 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1825 + me->arch.fdesc_offset = me->core_size_rw;
1826 + me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1827
1828 me->arch.got_max = gots;
1829 me->arch.fdesc_max = fdescs;
1830 @@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
1831
1832 BUG_ON(value == 0);
1833
1834 - got = me->module_core + me->arch.got_offset;
1835 + got = me->module_core_rw + me->arch.got_offset;
1836 for (i = 0; got[i].addr; i++)
1837 if (got[i].addr == value)
1838 goto out;
1839 @@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
1840 #ifdef CONFIG_64BIT
1841 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1842 {
1843 - Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1844 + Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1845
1846 if (!value) {
1847 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
1848 @@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1849
1850 /* Create new one */
1851 fdesc->addr = value;
1852 - fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1853 + fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1854 return (Elf_Addr)fdesc;
1855 }
1856 #endif /* CONFIG_64BIT */
1857 @@ -845,7 +867,7 @@ register_unwind_table(struct module *me,
1858
1859 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
1860 end = table + sechdrs[me->arch.unwind_section].sh_size;
1861 - gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1862 + gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1863
1864 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
1865 me->arch.unwind_section, table, end, gp);
1866 diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
1867 index c9b9322..02d8940 100644
1868 --- a/arch/parisc/kernel/sys_parisc.c
1869 +++ b/arch/parisc/kernel/sys_parisc.c
1870 @@ -43,7 +43,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
1871 /* At this point: (!vma || addr < vma->vm_end). */
1872 if (TASK_SIZE - len < addr)
1873 return -ENOMEM;
1874 - if (!vma || addr + len <= vma->vm_start)
1875 + if (check_heap_stack_gap(vma, addr, len))
1876 return addr;
1877 addr = vma->vm_end;
1878 }
1879 @@ -79,7 +79,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
1880 /* At this point: (!vma || addr < vma->vm_end). */
1881 if (TASK_SIZE - len < addr)
1882 return -ENOMEM;
1883 - if (!vma || addr + len <= vma->vm_start)
1884 + if (check_heap_stack_gap(vma, addr, len))
1885 return addr;
1886 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
1887 if (addr < vma->vm_end) /* handle wraparound */
1888 @@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
1889 if (flags & MAP_FIXED)
1890 return addr;
1891 if (!addr)
1892 - addr = TASK_UNMAPPED_BASE;
1893 + addr = current->mm->mmap_base;
1894
1895 if (filp) {
1896 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
1897 diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
1898 index f19e660..414fe24 100644
1899 --- a/arch/parisc/kernel/traps.c
1900 +++ b/arch/parisc/kernel/traps.c
1901 @@ -733,9 +733,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
1902
1903 down_read(&current->mm->mmap_sem);
1904 vma = find_vma(current->mm,regs->iaoq[0]);
1905 - if (vma && (regs->iaoq[0] >= vma->vm_start)
1906 - && (vma->vm_flags & VM_EXEC)) {
1907 -
1908 + if (vma && (regs->iaoq[0] >= vma->vm_start)) {
1909 fault_address = regs->iaoq[0];
1910 fault_space = regs->iasq[0];
1911
1912 diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
1913 index 18162ce..94de376 100644
1914 --- a/arch/parisc/mm/fault.c
1915 +++ b/arch/parisc/mm/fault.c
1916 @@ -15,6 +15,7 @@
1917 #include <linux/sched.h>
1918 #include <linux/interrupt.h>
1919 #include <linux/module.h>
1920 +#include <linux/unistd.h>
1921
1922 #include <asm/uaccess.h>
1923 #include <asm/traps.h>
1924 @@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
1925 static unsigned long
1926 parisc_acctyp(unsigned long code, unsigned int inst)
1927 {
1928 - if (code == 6 || code == 16)
1929 + if (code == 6 || code == 7 || code == 16)
1930 return VM_EXEC;
1931
1932 switch (inst & 0xf0000000) {
1933 @@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
1934 }
1935 #endif
1936
1937 +#ifdef CONFIG_PAX_PAGEEXEC
1938 +/*
1939 + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
1940 + *
1941 + * returns 1 when task should be killed
1942 + * 2 when rt_sigreturn trampoline was detected
1943 + * 3 when unpatched PLT trampoline was detected
1944 + */
1945 +static int pax_handle_fetch_fault(struct pt_regs *regs)
1946 +{
1947 +
1948 +#ifdef CONFIG_PAX_EMUPLT
1949 + int err;
1950 +
1951 + do { /* PaX: unpatched PLT emulation */
1952 + unsigned int bl, depwi;
1953 +
1954 + err = get_user(bl, (unsigned int *)instruction_pointer(regs));
1955 + err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
1956 +
1957 + if (err)
1958 + break;
1959 +
1960 + if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
1961 + unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
1962 +
1963 + err = get_user(ldw, (unsigned int *)addr);
1964 + err |= get_user(bv, (unsigned int *)(addr+4));
1965 + err |= get_user(ldw2, (unsigned int *)(addr+8));
1966 +
1967 + if (err)
1968 + break;
1969 +
1970 + if (ldw == 0x0E801096U &&
1971 + bv == 0xEAC0C000U &&
1972 + ldw2 == 0x0E881095U)
1973 + {
1974 + unsigned int resolver, map;
1975 +
1976 + err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
1977 + err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
1978 + if (err)
1979 + break;
1980 +
1981 + regs->gr[20] = instruction_pointer(regs)+8;
1982 + regs->gr[21] = map;
1983 + regs->gr[22] = resolver;
1984 + regs->iaoq[0] = resolver | 3UL;
1985 + regs->iaoq[1] = regs->iaoq[0] + 4;
1986 + return 3;
1987 + }
1988 + }
1989 + } while (0);
1990 +#endif
1991 +
1992 +#ifdef CONFIG_PAX_EMUTRAMP
1993 +
1994 +#ifndef CONFIG_PAX_EMUSIGRT
1995 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
1996 + return 1;
1997 +#endif
1998 +
1999 + do { /* PaX: rt_sigreturn emulation */
2000 + unsigned int ldi1, ldi2, bel, nop;
2001 +
2002 + err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
2003 + err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
2004 + err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
2005 + err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
2006 +
2007 + if (err)
2008 + break;
2009 +
2010 + if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
2011 + ldi2 == 0x3414015AU &&
2012 + bel == 0xE4008200U &&
2013 + nop == 0x08000240U)
2014 + {
2015 + regs->gr[25] = (ldi1 & 2) >> 1;
2016 + regs->gr[20] = __NR_rt_sigreturn;
2017 + regs->gr[31] = regs->iaoq[1] + 16;
2018 + regs->sr[0] = regs->iasq[1];
2019 + regs->iaoq[0] = 0x100UL;
2020 + regs->iaoq[1] = regs->iaoq[0] + 4;
2021 + regs->iasq[0] = regs->sr[2];
2022 + regs->iasq[1] = regs->sr[2];
2023 + return 2;
2024 + }
2025 + } while (0);
2026 +#endif
2027 +
2028 + return 1;
2029 +}
2030 +
2031 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2032 +{
2033 + unsigned long i;
2034 +
2035 + printk(KERN_ERR "PAX: bytes at PC: ");
2036 + for (i = 0; i < 5; i++) {
2037 + unsigned int c;
2038 + if (get_user(c, (unsigned int *)pc+i))
2039 + printk(KERN_CONT "???????? ");
2040 + else
2041 + printk(KERN_CONT "%08x ", c);
2042 + }
2043 + printk("\n");
2044 +}
2045 +#endif
2046 +
2047 int fixup_exception(struct pt_regs *regs)
2048 {
2049 const struct exception_table_entry *fix;
2050 @@ -192,8 +303,33 @@ good_area:
2051
2052 acc_type = parisc_acctyp(code,regs->iir);
2053
2054 - if ((vma->vm_flags & acc_type) != acc_type)
2055 + if ((vma->vm_flags & acc_type) != acc_type) {
2056 +
2057 +#ifdef CONFIG_PAX_PAGEEXEC
2058 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
2059 + (address & ~3UL) == instruction_pointer(regs))
2060 + {
2061 + up_read(&mm->mmap_sem);
2062 + switch (pax_handle_fetch_fault(regs)) {
2063 +
2064 +#ifdef CONFIG_PAX_EMUPLT
2065 + case 3:
2066 + return;
2067 +#endif
2068 +
2069 +#ifdef CONFIG_PAX_EMUTRAMP
2070 + case 2:
2071 + return;
2072 +#endif
2073 +
2074 + }
2075 + pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
2076 + do_group_exit(SIGKILL);
2077 + }
2078 +#endif
2079 +
2080 goto bad_area;
2081 + }
2082
2083 /*
2084 * If for any reason at all we couldn't handle the fault, make
2085 diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
2086 index 3bf9cca..e7457d0 100644
2087 --- a/arch/powerpc/include/asm/elf.h
2088 +++ b/arch/powerpc/include/asm/elf.h
2089 @@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
2090 the loader. We need to make sure that it is out of the way of the program
2091 that it will "exec", and that there is sufficient room for the brk. */
2092
2093 -extern unsigned long randomize_et_dyn(unsigned long base);
2094 -#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
2095 +#define ELF_ET_DYN_BASE (0x20000000)
2096 +
2097 +#ifdef CONFIG_PAX_ASLR
2098 +#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
2099 +
2100 +#ifdef __powerpc64__
2101 +#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
2102 +#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
2103 +#else
2104 +#define PAX_DELTA_MMAP_LEN 15
2105 +#define PAX_DELTA_STACK_LEN 15
2106 +#endif
2107 +#endif
2108
2109 /*
2110 * Our registers are always unsigned longs, whether we're a 32 bit
2111 @@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
2112 (0x7ff >> (PAGE_SHIFT - 12)) : \
2113 (0x3ffff >> (PAGE_SHIFT - 12)))
2114
2115 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2116 -#define arch_randomize_brk arch_randomize_brk
2117 -
2118 #endif /* __KERNEL__ */
2119
2120 /*
2121 diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
2122 index bca8fdc..61e9580 100644
2123 --- a/arch/powerpc/include/asm/kmap_types.h
2124 +++ b/arch/powerpc/include/asm/kmap_types.h
2125 @@ -27,6 +27,7 @@ enum km_type {
2126 KM_PPC_SYNC_PAGE,
2127 KM_PPC_SYNC_ICACHE,
2128 KM_KDB,
2129 + KM_CLEARPAGE,
2130 KM_TYPE_NR
2131 };
2132
2133 diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
2134 index d4a7f64..451de1c 100644
2135 --- a/arch/powerpc/include/asm/mman.h
2136 +++ b/arch/powerpc/include/asm/mman.h
2137 @@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
2138 }
2139 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
2140
2141 -static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
2142 +static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
2143 {
2144 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
2145 }
2146 diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
2147 index dd9c4fd..a2ced87 100644
2148 --- a/arch/powerpc/include/asm/page.h
2149 +++ b/arch/powerpc/include/asm/page.h
2150 @@ -141,8 +141,9 @@ extern phys_addr_t kernstart_addr;
2151 * and needs to be executable. This means the whole heap ends
2152 * up being executable.
2153 */
2154 -#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2155 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2156 +#define VM_DATA_DEFAULT_FLAGS32 \
2157 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2158 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2159
2160 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2161 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2162 @@ -170,6 +171,9 @@ extern phys_addr_t kernstart_addr;
2163 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
2164 #endif
2165
2166 +#define ktla_ktva(addr) (addr)
2167 +#define ktva_ktla(addr) (addr)
2168 +
2169 /*
2170 * Use the top bit of the higher-level page table entries to indicate whether
2171 * the entries we point to contain hugepages. This works because we know that
2172 diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
2173 index fb40ede..d3ce956 100644
2174 --- a/arch/powerpc/include/asm/page_64.h
2175 +++ b/arch/powerpc/include/asm/page_64.h
2176 @@ -144,15 +144,18 @@ do { \
2177 * stack by default, so in the absence of a PT_GNU_STACK program header
2178 * we turn execute permission off.
2179 */
2180 -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2181 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2182 +#define VM_STACK_DEFAULT_FLAGS32 \
2183 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2184 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2185
2186 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2187 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2188
2189 +#ifndef CONFIG_PAX_PAGEEXEC
2190 #define VM_STACK_DEFAULT_FLAGS \
2191 (is_32bit_task() ? \
2192 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
2193 +#endif
2194
2195 #include <asm-generic/getorder.h>
2196
2197 diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
2198 index 88b0bd9..e32bc67 100644
2199 --- a/arch/powerpc/include/asm/pgtable.h
2200 +++ b/arch/powerpc/include/asm/pgtable.h
2201 @@ -2,6 +2,7 @@
2202 #define _ASM_POWERPC_PGTABLE_H
2203 #ifdef __KERNEL__
2204
2205 +#include <linux/const.h>
2206 #ifndef __ASSEMBLY__
2207 #include <asm/processor.h> /* For TASK_SIZE */
2208 #include <asm/mmu.h>
2209 diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
2210 index 4aad413..85d86bf 100644
2211 --- a/arch/powerpc/include/asm/pte-hash32.h
2212 +++ b/arch/powerpc/include/asm/pte-hash32.h
2213 @@ -21,6 +21,7 @@
2214 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
2215 #define _PAGE_USER 0x004 /* usermode access allowed */
2216 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
2217 +#define _PAGE_EXEC _PAGE_GUARDED
2218 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
2219 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
2220 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
2221 diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
2222 index 559da19..7e5835c 100644
2223 --- a/arch/powerpc/include/asm/reg.h
2224 +++ b/arch/powerpc/include/asm/reg.h
2225 @@ -212,6 +212,7 @@
2226 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
2227 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
2228 #define DSISR_NOHPTE 0x40000000 /* no translation found */
2229 +#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
2230 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
2231 #define DSISR_ISSTORE 0x02000000 /* access was a store */
2232 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
2233 diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
2234 index e30a13d..2b7d994 100644
2235 --- a/arch/powerpc/include/asm/system.h
2236 +++ b/arch/powerpc/include/asm/system.h
2237 @@ -530,7 +530,7 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
2238 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
2239 #endif
2240
2241 -extern unsigned long arch_align_stack(unsigned long sp);
2242 +#define arch_align_stack(x) ((x) & ~0xfUL)
2243
2244 /* Used in very early kernel initialization. */
2245 extern unsigned long reloc_offset(void);
2246 diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
2247 index bd0fb84..a42a14b 100644
2248 --- a/arch/powerpc/include/asm/uaccess.h
2249 +++ b/arch/powerpc/include/asm/uaccess.h
2250 @@ -13,6 +13,8 @@
2251 #define VERIFY_READ 0
2252 #define VERIFY_WRITE 1
2253
2254 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
2255 +
2256 /*
2257 * The fs value determines whether argument validity checking should be
2258 * performed or not. If get_fs() == USER_DS, checking is performed, with
2259 @@ -327,52 +329,6 @@ do { \
2260 extern unsigned long __copy_tofrom_user(void __user *to,
2261 const void __user *from, unsigned long size);
2262
2263 -#ifndef __powerpc64__
2264 -
2265 -static inline unsigned long copy_from_user(void *to,
2266 - const void __user *from, unsigned long n)
2267 -{
2268 - unsigned long over;
2269 -
2270 - if (access_ok(VERIFY_READ, from, n))
2271 - return __copy_tofrom_user((__force void __user *)to, from, n);
2272 - if ((unsigned long)from < TASK_SIZE) {
2273 - over = (unsigned long)from + n - TASK_SIZE;
2274 - return __copy_tofrom_user((__force void __user *)to, from,
2275 - n - over) + over;
2276 - }
2277 - return n;
2278 -}
2279 -
2280 -static inline unsigned long copy_to_user(void __user *to,
2281 - const void *from, unsigned long n)
2282 -{
2283 - unsigned long over;
2284 -
2285 - if (access_ok(VERIFY_WRITE, to, n))
2286 - return __copy_tofrom_user(to, (__force void __user *)from, n);
2287 - if ((unsigned long)to < TASK_SIZE) {
2288 - over = (unsigned long)to + n - TASK_SIZE;
2289 - return __copy_tofrom_user(to, (__force void __user *)from,
2290 - n - over) + over;
2291 - }
2292 - return n;
2293 -}
2294 -
2295 -#else /* __powerpc64__ */
2296 -
2297 -#define __copy_in_user(to, from, size) \
2298 - __copy_tofrom_user((to), (from), (size))
2299 -
2300 -extern unsigned long copy_from_user(void *to, const void __user *from,
2301 - unsigned long n);
2302 -extern unsigned long copy_to_user(void __user *to, const void *from,
2303 - unsigned long n);
2304 -extern unsigned long copy_in_user(void __user *to, const void __user *from,
2305 - unsigned long n);
2306 -
2307 -#endif /* __powerpc64__ */
2308 -
2309 static inline unsigned long __copy_from_user_inatomic(void *to,
2310 const void __user *from, unsigned long n)
2311 {
2312 @@ -396,6 +352,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
2313 if (ret == 0)
2314 return 0;
2315 }
2316 +
2317 + if (!__builtin_constant_p(n))
2318 + check_object_size(to, n, false);
2319 +
2320 return __copy_tofrom_user((__force void __user *)to, from, n);
2321 }
2322
2323 @@ -422,6 +382,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
2324 if (ret == 0)
2325 return 0;
2326 }
2327 +
2328 + if (!__builtin_constant_p(n))
2329 + check_object_size(from, n, true);
2330 +
2331 return __copy_tofrom_user(to, (__force const void __user *)from, n);
2332 }
2333
2334 @@ -439,6 +403,92 @@ static inline unsigned long __copy_to_user(void __user *to,
2335 return __copy_to_user_inatomic(to, from, size);
2336 }
2337
2338 +#ifndef __powerpc64__
2339 +
2340 +static inline unsigned long __must_check copy_from_user(void *to,
2341 + const void __user *from, unsigned long n)
2342 +{
2343 + unsigned long over;
2344 +
2345 + if ((long)n < 0)
2346 + return n;
2347 +
2348 + if (access_ok(VERIFY_READ, from, n)) {
2349 + if (!__builtin_constant_p(n))
2350 + check_object_size(to, n, false);
2351 + return __copy_tofrom_user((__force void __user *)to, from, n);
2352 + }
2353 + if ((unsigned long)from < TASK_SIZE) {
2354 + over = (unsigned long)from + n - TASK_SIZE;
2355 + if (!__builtin_constant_p(n - over))
2356 + check_object_size(to, n - over, false);
2357 + return __copy_tofrom_user((__force void __user *)to, from,
2358 + n - over) + over;
2359 + }
2360 + return n;
2361 +}
2362 +
2363 +static inline unsigned long __must_check copy_to_user(void __user *to,
2364 + const void *from, unsigned long n)
2365 +{
2366 + unsigned long over;
2367 +
2368 + if ((long)n < 0)
2369 + return n;
2370 +
2371 + if (access_ok(VERIFY_WRITE, to, n)) {
2372 + if (!__builtin_constant_p(n))
2373 + check_object_size(from, n, true);
2374 + return __copy_tofrom_user(to, (__force void __user *)from, n);
2375 + }
2376 + if ((unsigned long)to < TASK_SIZE) {
2377 + over = (unsigned long)to + n - TASK_SIZE;
2378 + if (!__builtin_constant_p(n))
2379 + check_object_size(from, n - over, true);
2380 + return __copy_tofrom_user(to, (__force void __user *)from,
2381 + n - over) + over;
2382 + }
2383 + return n;
2384 +}
2385 +
2386 +#else /* __powerpc64__ */
2387 +
2388 +#define __copy_in_user(to, from, size) \
2389 + __copy_tofrom_user((to), (from), (size))
2390 +
2391 +static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2392 +{
2393 + if ((long)n < 0 || n > INT_MAX)
2394 + return n;
2395 +
2396 + if (!__builtin_constant_p(n))
2397 + check_object_size(to, n, false);
2398 +
2399 + if (likely(access_ok(VERIFY_READ, from, n)))
2400 + n = __copy_from_user(to, from, n);
2401 + else
2402 + memset(to, 0, n);
2403 + return n;
2404 +}
2405 +
2406 +static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2407 +{
2408 + if ((long)n < 0 || n > INT_MAX)
2409 + return n;
2410 +
2411 + if (likely(access_ok(VERIFY_WRITE, to, n))) {
2412 + if (!__builtin_constant_p(n))
2413 + check_object_size(from, n, true);
2414 + n = __copy_to_user(to, from, n);
2415 + }
2416 + return n;
2417 +}
2418 +
2419 +extern unsigned long copy_in_user(void __user *to, const void __user *from,
2420 + unsigned long n);
2421 +
2422 +#endif /* __powerpc64__ */
2423 +
2424 extern unsigned long __clear_user(void __user *addr, unsigned long size);
2425
2426 static inline unsigned long clear_user(void __user *addr, unsigned long size)
2427 diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
2428 index 429983c..7af363b 100644
2429 --- a/arch/powerpc/kernel/exceptions-64e.S
2430 +++ b/arch/powerpc/kernel/exceptions-64e.S
2431 @@ -587,6 +587,7 @@ storage_fault_common:
2432 std r14,_DAR(r1)
2433 std r15,_DSISR(r1)
2434 addi r3,r1,STACK_FRAME_OVERHEAD
2435 + bl .save_nvgprs
2436 mr r4,r14
2437 mr r5,r15
2438 ld r14,PACA_EXGEN+EX_R14(r13)
2439 @@ -596,8 +597,7 @@ storage_fault_common:
2440 cmpdi r3,0
2441 bne- 1f
2442 b .ret_from_except_lite
2443 -1: bl .save_nvgprs
2444 - mr r5,r3
2445 +1: mr r5,r3
2446 addi r3,r1,STACK_FRAME_OVERHEAD
2447 ld r4,_DAR(r1)
2448 bl .bad_page_fault
2449 diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
2450 index cf9c69b..ebc9640 100644
2451 --- a/arch/powerpc/kernel/exceptions-64s.S
2452 +++ b/arch/powerpc/kernel/exceptions-64s.S
2453 @@ -1004,10 +1004,10 @@ handle_page_fault:
2454 11: ld r4,_DAR(r1)
2455 ld r5,_DSISR(r1)
2456 addi r3,r1,STACK_FRAME_OVERHEAD
2457 + bl .save_nvgprs
2458 bl .do_page_fault
2459 cmpdi r3,0
2460 beq+ 13f
2461 - bl .save_nvgprs
2462 mr r5,r3
2463 addi r3,r1,STACK_FRAME_OVERHEAD
2464 lwz r4,_DAR(r1)
2465 diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
2466 index 0b6d796..d760ddb 100644
2467 --- a/arch/powerpc/kernel/module_32.c
2468 +++ b/arch/powerpc/kernel/module_32.c
2469 @@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
2470 me->arch.core_plt_section = i;
2471 }
2472 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
2473 - printk("Module doesn't contain .plt or .init.plt sections.\n");
2474 + printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2475 return -ENOEXEC;
2476 }
2477
2478 @@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *location,
2479
2480 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2481 /* Init, or core PLT? */
2482 - if (location >= mod->module_core
2483 - && location < mod->module_core + mod->core_size)
2484 + if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2485 + (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2486 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2487 - else
2488 + else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2489 + (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2490 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2491 + else {
2492 + printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2493 + return ~0UL;
2494 + }
2495
2496 /* Find this entry, or if that fails, the next avail. entry */
2497 while (entry->jump[0]) {
2498 diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
2499 index 6457574..08b28d3 100644
2500 --- a/arch/powerpc/kernel/process.c
2501 +++ b/arch/powerpc/kernel/process.c
2502 @@ -660,8 +660,8 @@ void show_regs(struct pt_regs * regs)
2503 * Lookup NIP late so we have the best change of getting the
2504 * above info out without failing
2505 */
2506 - printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
2507 - printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
2508 + printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
2509 + printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
2510 #endif
2511 show_stack(current, (unsigned long *) regs->gpr[1]);
2512 if (!user_mode(regs))
2513 @@ -1165,10 +1165,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
2514 newsp = stack[0];
2515 ip = stack[STACK_FRAME_LR_SAVE];
2516 if (!firstframe || ip != lr) {
2517 - printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2518 + printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
2519 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2520 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
2521 - printk(" (%pS)",
2522 + printk(" (%pA)",
2523 (void *)current->ret_stack[curr_frame].ret);
2524 curr_frame--;
2525 }
2526 @@ -1188,7 +1188,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
2527 struct pt_regs *regs = (struct pt_regs *)
2528 (sp + STACK_FRAME_OVERHEAD);
2529 lr = regs->link;
2530 - printk("--- Exception: %lx at %pS\n LR = %pS\n",
2531 + printk("--- Exception: %lx at %pA\n LR = %pA\n",
2532 regs->trap, (void *)regs->nip, (void *)lr);
2533 firstframe = 1;
2534 }
2535 @@ -1263,58 +1263,3 @@ void thread_info_cache_init(void)
2536 }
2537
2538 #endif /* THREAD_SHIFT < PAGE_SHIFT */
2539 -
2540 -unsigned long arch_align_stack(unsigned long sp)
2541 -{
2542 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2543 - sp -= get_random_int() & ~PAGE_MASK;
2544 - return sp & ~0xf;
2545 -}
2546 -
2547 -static inline unsigned long brk_rnd(void)
2548 -{
2549 - unsigned long rnd = 0;
2550 -
2551 - /* 8MB for 32bit, 1GB for 64bit */
2552 - if (is_32bit_task())
2553 - rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2554 - else
2555 - rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2556 -
2557 - return rnd << PAGE_SHIFT;
2558 -}
2559 -
2560 -unsigned long arch_randomize_brk(struct mm_struct *mm)
2561 -{
2562 - unsigned long base = mm->brk;
2563 - unsigned long ret;
2564 -
2565 -#ifdef CONFIG_PPC_STD_MMU_64
2566 - /*
2567 - * If we are using 1TB segments and we are allowed to randomise
2568 - * the heap, we can put it above 1TB so it is backed by a 1TB
2569 - * segment. Otherwise the heap will be in the bottom 1TB
2570 - * which always uses 256MB segments and this may result in a
2571 - * performance penalty.
2572 - */
2573 - if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2574 - base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2575 -#endif
2576 -
2577 - ret = PAGE_ALIGN(base + brk_rnd());
2578 -
2579 - if (ret < mm->brk)
2580 - return mm->brk;
2581 -
2582 - return ret;
2583 -}
2584 -
2585 -unsigned long randomize_et_dyn(unsigned long base)
2586 -{
2587 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2588 -
2589 - if (ret < base)
2590 - return base;
2591 -
2592 - return ret;
2593 -}
2594 diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
2595 index 836a5a1..27289a3 100644
2596 --- a/arch/powerpc/kernel/signal_32.c
2597 +++ b/arch/powerpc/kernel/signal_32.c
2598 @@ -859,7 +859,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
2599 /* Save user registers on the stack */
2600 frame = &rt_sf->uc.uc_mcontext;
2601 addr = frame;
2602 - if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2603 + if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2604 if (save_user_regs(regs, frame, 0, 1))
2605 goto badframe;
2606 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2607 diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
2608 index a50b5ec..547078a 100644
2609 --- a/arch/powerpc/kernel/signal_64.c
2610 +++ b/arch/powerpc/kernel/signal_64.c
2611 @@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
2612 current->thread.fpscr.val = 0;
2613
2614 /* Set up to return from userspace. */
2615 - if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2616 + if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2617 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2618 } else {
2619 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2620 diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
2621 index 5459d14..10f8070 100644
2622 --- a/arch/powerpc/kernel/traps.c
2623 +++ b/arch/powerpc/kernel/traps.c
2624 @@ -98,6 +98,8 @@ static void pmac_backlight_unblank(void)
2625 static inline void pmac_backlight_unblank(void) { }
2626 #endif
2627
2628 +extern void gr_handle_kernel_exploit(void);
2629 +
2630 int die(const char *str, struct pt_regs *regs, long err)
2631 {
2632 static struct {
2633 @@ -171,6 +173,8 @@ int die(const char *str, struct pt_regs *regs, long err)
2634 if (panic_on_oops)
2635 panic("Fatal exception");
2636
2637 + gr_handle_kernel_exploit();
2638 +
2639 oops_exit();
2640 do_exit(err);
2641
2642 diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
2643 index 7d14bb6..1305601 100644
2644 --- a/arch/powerpc/kernel/vdso.c
2645 +++ b/arch/powerpc/kernel/vdso.c
2646 @@ -35,6 +35,7 @@
2647 #include <asm/firmware.h>
2648 #include <asm/vdso.h>
2649 #include <asm/vdso_datapage.h>
2650 +#include <asm/mman.h>
2651
2652 #include "setup.h"
2653
2654 @@ -219,7 +220,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
2655 vdso_base = VDSO32_MBASE;
2656 #endif
2657
2658 - current->mm->context.vdso_base = 0;
2659 + current->mm->context.vdso_base = ~0UL;
2660
2661 /* vDSO has a problem and was disabled, just don't "enable" it for the
2662 * process
2663 @@ -239,7 +240,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
2664 vdso_base = get_unmapped_area(NULL, vdso_base,
2665 (vdso_pages << PAGE_SHIFT) +
2666 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2667 - 0, 0);
2668 + 0, MAP_PRIVATE | MAP_EXECUTABLE);
2669 if (IS_ERR_VALUE(vdso_base)) {
2670 rc = vdso_base;
2671 goto fail_mmapsem;
2672 diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
2673 index 5eea6f3..5d10396 100644
2674 --- a/arch/powerpc/lib/usercopy_64.c
2675 +++ b/arch/powerpc/lib/usercopy_64.c
2676 @@ -9,22 +9,6 @@
2677 #include <linux/module.h>
2678 #include <asm/uaccess.h>
2679
2680 -unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2681 -{
2682 - if (likely(access_ok(VERIFY_READ, from, n)))
2683 - n = __copy_from_user(to, from, n);
2684 - else
2685 - memset(to, 0, n);
2686 - return n;
2687 -}
2688 -
2689 -unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2690 -{
2691 - if (likely(access_ok(VERIFY_WRITE, to, n)))
2692 - n = __copy_to_user(to, from, n);
2693 - return n;
2694 -}
2695 -
2696 unsigned long copy_in_user(void __user *to, const void __user *from,
2697 unsigned long n)
2698 {
2699 @@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
2700 return n;
2701 }
2702
2703 -EXPORT_SYMBOL(copy_from_user);
2704 -EXPORT_SYMBOL(copy_to_user);
2705 EXPORT_SYMBOL(copy_in_user);
2706
2707 diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
2708 index 5efe8c9..db9ceef 100644
2709 --- a/arch/powerpc/mm/fault.c
2710 +++ b/arch/powerpc/mm/fault.c
2711 @@ -32,6 +32,10 @@
2712 #include <linux/perf_event.h>
2713 #include <linux/magic.h>
2714 #include <linux/ratelimit.h>
2715 +#include <linux/slab.h>
2716 +#include <linux/pagemap.h>
2717 +#include <linux/compiler.h>
2718 +#include <linux/unistd.h>
2719
2720 #include <asm/firmware.h>
2721 #include <asm/page.h>
2722 @@ -43,6 +47,7 @@
2723 #include <asm/tlbflush.h>
2724 #include <asm/siginfo.h>
2725 #include <mm/mmu_decl.h>
2726 +#include <asm/ptrace.h>
2727
2728 #ifdef CONFIG_KPROBES
2729 static inline int notify_page_fault(struct pt_regs *regs)
2730 @@ -66,6 +71,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
2731 }
2732 #endif
2733
2734 +#ifdef CONFIG_PAX_PAGEEXEC
2735 +/*
2736 + * PaX: decide what to do with offenders (regs->nip = fault address)
2737 + *
2738 + * returns 1 when task should be killed
2739 + */
2740 +static int pax_handle_fetch_fault(struct pt_regs *regs)
2741 +{
2742 + return 1;
2743 +}
2744 +
2745 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2746 +{
2747 + unsigned long i;
2748 +
2749 + printk(KERN_ERR "PAX: bytes at PC: ");
2750 + for (i = 0; i < 5; i++) {
2751 + unsigned int c;
2752 + if (get_user(c, (unsigned int __user *)pc+i))
2753 + printk(KERN_CONT "???????? ");
2754 + else
2755 + printk(KERN_CONT "%08x ", c);
2756 + }
2757 + printk("\n");
2758 +}
2759 +#endif
2760 +
2761 /*
2762 * Check whether the instruction at regs->nip is a store using
2763 * an update addressing form which will update r1.
2764 @@ -136,7 +168,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
2765 * indicate errors in DSISR but can validly be set in SRR1.
2766 */
2767 if (trap == 0x400)
2768 - error_code &= 0x48200000;
2769 + error_code &= 0x58200000;
2770 else
2771 is_write = error_code & DSISR_ISSTORE;
2772 #else
2773 @@ -259,7 +291,7 @@ good_area:
2774 * "undefined". Of those that can be set, this is the only
2775 * one which seems bad.
2776 */
2777 - if (error_code & 0x10000000)
2778 + if (error_code & DSISR_GUARDED)
2779 /* Guarded storage error. */
2780 goto bad_area;
2781 #endif /* CONFIG_8xx */
2782 @@ -274,7 +306,7 @@ good_area:
2783 * processors use the same I/D cache coherency mechanism
2784 * as embedded.
2785 */
2786 - if (error_code & DSISR_PROTFAULT)
2787 + if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
2788 goto bad_area;
2789 #endif /* CONFIG_PPC_STD_MMU */
2790
2791 @@ -343,6 +375,23 @@ bad_area:
2792 bad_area_nosemaphore:
2793 /* User mode accesses cause a SIGSEGV */
2794 if (user_mode(regs)) {
2795 +
2796 +#ifdef CONFIG_PAX_PAGEEXEC
2797 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2798 +#ifdef CONFIG_PPC_STD_MMU
2799 + if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
2800 +#else
2801 + if (is_exec && regs->nip == address) {
2802 +#endif
2803 + switch (pax_handle_fetch_fault(regs)) {
2804 + }
2805 +
2806 + pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
2807 + do_group_exit(SIGKILL);
2808 + }
2809 + }
2810 +#endif
2811 +
2812 _exception(SIGSEGV, regs, code, address);
2813 return 0;
2814 }
2815 diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
2816 index 5a783d8..c23e14b 100644
2817 --- a/arch/powerpc/mm/mmap_64.c
2818 +++ b/arch/powerpc/mm/mmap_64.c
2819 @@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
2820 */
2821 if (mmap_is_legacy()) {
2822 mm->mmap_base = TASK_UNMAPPED_BASE;
2823 +
2824 +#ifdef CONFIG_PAX_RANDMMAP
2825 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2826 + mm->mmap_base += mm->delta_mmap;
2827 +#endif
2828 +
2829 mm->get_unmapped_area = arch_get_unmapped_area;
2830 mm->unmap_area = arch_unmap_area;
2831 } else {
2832 mm->mmap_base = mmap_base();
2833 +
2834 +#ifdef CONFIG_PAX_RANDMMAP
2835 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2836 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2837 +#endif
2838 +
2839 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2840 mm->unmap_area = arch_unmap_area_topdown;
2841 }
2842 diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
2843 index 73709f7..6b90313 100644
2844 --- a/arch/powerpc/mm/slice.c
2845 +++ b/arch/powerpc/mm/slice.c
2846 @@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
2847 if ((mm->task_size - len) < addr)
2848 return 0;
2849 vma = find_vma(mm, addr);
2850 - return (!vma || (addr + len) <= vma->vm_start);
2851 + return check_heap_stack_gap(vma, addr, len);
2852 }
2853
2854 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
2855 @@ -256,7 +256,7 @@ full_search:
2856 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
2857 continue;
2858 }
2859 - if (!vma || addr + len <= vma->vm_start) {
2860 + if (check_heap_stack_gap(vma, addr, len)) {
2861 /*
2862 * Remember the place where we stopped the search:
2863 */
2864 @@ -313,10 +313,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
2865 }
2866 }
2867
2868 - addr = mm->mmap_base;
2869 - while (addr > len) {
2870 + if (mm->mmap_base < len)
2871 + addr = -ENOMEM;
2872 + else
2873 + addr = mm->mmap_base - len;
2874 +
2875 + while (!IS_ERR_VALUE(addr)) {
2876 /* Go down by chunk size */
2877 - addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
2878 + addr = _ALIGN_DOWN(addr, 1ul << pshift);
2879
2880 /* Check for hit with different page size */
2881 mask = slice_range_to_mask(addr, len);
2882 @@ -336,7 +340,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
2883 * return with success:
2884 */
2885 vma = find_vma(mm, addr);
2886 - if (!vma || (addr + len) <= vma->vm_start) {
2887 + if (check_heap_stack_gap(vma, addr, len)) {
2888 /* remember the address as a hint for next time */
2889 if (use_cache)
2890 mm->free_area_cache = addr;
2891 @@ -348,7 +352,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
2892 mm->cached_hole_size = vma->vm_start - addr;
2893
2894 /* try just below the current vma->vm_start */
2895 - addr = vma->vm_start;
2896 + addr = skip_heap_stack_gap(vma, len);
2897 }
2898
2899 /*
2900 @@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
2901 if (fixed && addr > (mm->task_size - len))
2902 return -EINVAL;
2903
2904 +#ifdef CONFIG_PAX_RANDMMAP
2905 + if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
2906 + addr = 0;
2907 +#endif
2908 +
2909 /* If hint, make sure it matches our alignment restrictions */
2910 if (!fixed && addr) {
2911 addr = _ALIGN_UP(addr, 1ul << pshift);
2912 diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
2913 index 547f1a6..0b22b53 100644
2914 --- a/arch/s390/include/asm/elf.h
2915 +++ b/arch/s390/include/asm/elf.h
2916 @@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
2917 the loader. We need to make sure that it is out of the way of the program
2918 that it will "exec", and that there is sufficient room for the brk. */
2919
2920 -extern unsigned long randomize_et_dyn(unsigned long base);
2921 -#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
2922 +#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
2923 +
2924 +#ifdef CONFIG_PAX_ASLR
2925 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
2926 +
2927 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
2928 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
2929 +#endif
2930
2931 /* This yields a mask that user programs can use to figure out what
2932 instruction set this CPU supports. */
2933 @@ -211,7 +217,4 @@ struct linux_binprm;
2934 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
2935 int arch_setup_additional_pages(struct linux_binprm *, int);
2936
2937 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2938 -#define arch_randomize_brk arch_randomize_brk
2939 -
2940 #endif
2941 diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h
2942 index ef573c1..75a1ce6 100644
2943 --- a/arch/s390/include/asm/system.h
2944 +++ b/arch/s390/include/asm/system.h
2945 @@ -262,7 +262,7 @@ extern void (*_machine_restart)(char *command);
2946 extern void (*_machine_halt)(void);
2947 extern void (*_machine_power_off)(void);
2948
2949 -extern unsigned long arch_align_stack(unsigned long sp);
2950 +#define arch_align_stack(x) ((x) & ~0xfUL)
2951
2952 static inline int tprot(unsigned long addr)
2953 {
2954 diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
2955 index 2b23885..e136e31 100644
2956 --- a/arch/s390/include/asm/uaccess.h
2957 +++ b/arch/s390/include/asm/uaccess.h
2958 @@ -235,6 +235,10 @@ static inline unsigned long __must_check
2959 copy_to_user(void __user *to, const void *from, unsigned long n)
2960 {
2961 might_fault();
2962 +
2963 + if ((long)n < 0)
2964 + return n;
2965 +
2966 if (access_ok(VERIFY_WRITE, to, n))
2967 n = __copy_to_user(to, from, n);
2968 return n;
2969 @@ -260,6 +264,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
2970 static inline unsigned long __must_check
2971 __copy_from_user(void *to, const void __user *from, unsigned long n)
2972 {
2973 + if ((long)n < 0)
2974 + return n;
2975 +
2976 if (__builtin_constant_p(n) && (n <= 256))
2977 return uaccess.copy_from_user_small(n, from, to);
2978 else
2979 @@ -294,6 +301,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
2980 unsigned int sz = __compiletime_object_size(to);
2981
2982 might_fault();
2983 +
2984 + if ((long)n < 0)
2985 + return n;
2986 +
2987 if (unlikely(sz != -1 && sz < n)) {
2988 copy_from_user_overflow();
2989 return n;
2990 diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
2991 index dfcb343..eda788a 100644
2992 --- a/arch/s390/kernel/module.c
2993 +++ b/arch/s390/kernel/module.c
2994 @@ -161,11 +161,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
2995
2996 /* Increase core size by size of got & plt and set start
2997 offsets for got and plt. */
2998 - me->core_size = ALIGN(me->core_size, 4);
2999 - me->arch.got_offset = me->core_size;
3000 - me->core_size += me->arch.got_size;
3001 - me->arch.plt_offset = me->core_size;
3002 - me->core_size += me->arch.plt_size;
3003 + me->core_size_rw = ALIGN(me->core_size_rw, 4);
3004 + me->arch.got_offset = me->core_size_rw;
3005 + me->core_size_rw += me->arch.got_size;
3006 + me->arch.plt_offset = me->core_size_rx;
3007 + me->core_size_rx += me->arch.plt_size;
3008 return 0;
3009 }
3010
3011 @@ -242,7 +242,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3012 if (info->got_initialized == 0) {
3013 Elf_Addr *gotent;
3014
3015 - gotent = me->module_core + me->arch.got_offset +
3016 + gotent = me->module_core_rw + me->arch.got_offset +
3017 info->got_offset;
3018 *gotent = val;
3019 info->got_initialized = 1;
3020 @@ -266,7 +266,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3021 else if (r_type == R_390_GOTENT ||
3022 r_type == R_390_GOTPLTENT)
3023 *(unsigned int *) loc =
3024 - (val + (Elf_Addr) me->module_core - loc) >> 1;
3025 + (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
3026 else if (r_type == R_390_GOT64 ||
3027 r_type == R_390_GOTPLT64)
3028 *(unsigned long *) loc = val;
3029 @@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3030 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
3031 if (info->plt_initialized == 0) {
3032 unsigned int *ip;
3033 - ip = me->module_core + me->arch.plt_offset +
3034 + ip = me->module_core_rx + me->arch.plt_offset +
3035 info->plt_offset;
3036 #ifndef CONFIG_64BIT
3037 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
3038 @@ -305,7 +305,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3039 val - loc + 0xffffUL < 0x1ffffeUL) ||
3040 (r_type == R_390_PLT32DBL &&
3041 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
3042 - val = (Elf_Addr) me->module_core +
3043 + val = (Elf_Addr) me->module_core_rx +
3044 me->arch.plt_offset +
3045 info->plt_offset;
3046 val += rela->r_addend - loc;
3047 @@ -327,7 +327,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3048 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
3049 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
3050 val = val + rela->r_addend -
3051 - ((Elf_Addr) me->module_core + me->arch.got_offset);
3052 + ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
3053 if (r_type == R_390_GOTOFF16)
3054 *(unsigned short *) loc = val;
3055 else if (r_type == R_390_GOTOFF32)
3056 @@ -337,7 +337,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3057 break;
3058 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
3059 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
3060 - val = (Elf_Addr) me->module_core + me->arch.got_offset +
3061 + val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
3062 rela->r_addend - loc;
3063 if (r_type == R_390_GOTPC)
3064 *(unsigned int *) loc = val;
3065 diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
3066 index 9451b21..ed8956f 100644
3067 --- a/arch/s390/kernel/process.c
3068 +++ b/arch/s390/kernel/process.c
3069 @@ -321,39 +321,3 @@ unsigned long get_wchan(struct task_struct *p)
3070 }
3071 return 0;
3072 }
3073 -
3074 -unsigned long arch_align_stack(unsigned long sp)
3075 -{
3076 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
3077 - sp -= get_random_int() & ~PAGE_MASK;
3078 - return sp & ~0xf;
3079 -}
3080 -
3081 -static inline unsigned long brk_rnd(void)
3082 -{
3083 - /* 8MB for 32bit, 1GB for 64bit */
3084 - if (is_32bit_task())
3085 - return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
3086 - else
3087 - return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
3088 -}
3089 -
3090 -unsigned long arch_randomize_brk(struct mm_struct *mm)
3091 -{
3092 - unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
3093 -
3094 - if (ret < mm->brk)
3095 - return mm->brk;
3096 - return ret;
3097 -}
3098 -
3099 -unsigned long randomize_et_dyn(unsigned long base)
3100 -{
3101 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
3102 -
3103 - if (!(current->flags & PF_RANDOMIZE))
3104 - return base;
3105 - if (ret < base)
3106 - return base;
3107 - return ret;
3108 -}
3109 diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
3110 index f09c748..cf9ec1d 100644
3111 --- a/arch/s390/mm/mmap.c
3112 +++ b/arch/s390/mm/mmap.c
3113 @@ -92,10 +92,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3114 */
3115 if (mmap_is_legacy()) {
3116 mm->mmap_base = TASK_UNMAPPED_BASE;
3117 +
3118 +#ifdef CONFIG_PAX_RANDMMAP
3119 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3120 + mm->mmap_base += mm->delta_mmap;
3121 +#endif
3122 +
3123 mm->get_unmapped_area = arch_get_unmapped_area;
3124 mm->unmap_area = arch_unmap_area;
3125 } else {
3126 mm->mmap_base = mmap_base();
3127 +
3128 +#ifdef CONFIG_PAX_RANDMMAP
3129 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3130 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3131 +#endif
3132 +
3133 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3134 mm->unmap_area = arch_unmap_area_topdown;
3135 }
3136 @@ -167,10 +179,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3137 */
3138 if (mmap_is_legacy()) {
3139 mm->mmap_base = TASK_UNMAPPED_BASE;
3140 +
3141 +#ifdef CONFIG_PAX_RANDMMAP
3142 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3143 + mm->mmap_base += mm->delta_mmap;
3144 +#endif
3145 +
3146 mm->get_unmapped_area = s390_get_unmapped_area;
3147 mm->unmap_area = arch_unmap_area;
3148 } else {
3149 mm->mmap_base = mmap_base();
3150 +
3151 +#ifdef CONFIG_PAX_RANDMMAP
3152 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3153 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3154 +#endif
3155 +
3156 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
3157 mm->unmap_area = arch_unmap_area_topdown;
3158 }
3159 diff --git a/arch/score/include/asm/system.h b/arch/score/include/asm/system.h
3160 index 589d5c7..669e274 100644
3161 --- a/arch/score/include/asm/system.h
3162 +++ b/arch/score/include/asm/system.h
3163 @@ -17,7 +17,7 @@ do { \
3164 #define finish_arch_switch(prev) do {} while (0)
3165
3166 typedef void (*vi_handler_t)(void);
3167 -extern unsigned long arch_align_stack(unsigned long sp);
3168 +#define arch_align_stack(x) (x)
3169
3170 #define mb() barrier()
3171 #define rmb() barrier()
3172 diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
3173 index 25d0803..d6c8e36 100644
3174 --- a/arch/score/kernel/process.c
3175 +++ b/arch/score/kernel/process.c
3176 @@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_struct *task)
3177
3178 return task_pt_regs(task)->cp0_epc;
3179 }
3180 -
3181 -unsigned long arch_align_stack(unsigned long sp)
3182 -{
3183 - return sp;
3184 -}
3185 diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
3186 index afeb710..d1d1289 100644
3187 --- a/arch/sh/mm/mmap.c
3188 +++ b/arch/sh/mm/mmap.c
3189 @@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
3190 addr = PAGE_ALIGN(addr);
3191
3192 vma = find_vma(mm, addr);
3193 - if (TASK_SIZE - len >= addr &&
3194 - (!vma || addr + len <= vma->vm_start))
3195 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3196 return addr;
3197 }
3198
3199 @@ -106,7 +105,7 @@ full_search:
3200 }
3201 return -ENOMEM;
3202 }
3203 - if (likely(!vma || addr + len <= vma->vm_start)) {
3204 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3205 /*
3206 * Remember the place where we stopped the search:
3207 */
3208 @@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3209 addr = PAGE_ALIGN(addr);
3210
3211 vma = find_vma(mm, addr);
3212 - if (TASK_SIZE - len >= addr &&
3213 - (!vma || addr + len <= vma->vm_start))
3214 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3215 return addr;
3216 }
3217
3218 @@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3219 /* make sure it can fit in the remaining address space */
3220 if (likely(addr > len)) {
3221 vma = find_vma(mm, addr-len);
3222 - if (!vma || addr <= vma->vm_start) {
3223 + if (check_heap_stack_gap(vma, addr - len, len)) {
3224 /* remember the address as a hint for next time */
3225 return (mm->free_area_cache = addr-len);
3226 }
3227 @@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3228 if (unlikely(mm->mmap_base < len))
3229 goto bottomup;
3230
3231 - addr = mm->mmap_base-len;
3232 - if (do_colour_align)
3233 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3234 + addr = mm->mmap_base - len;
3235
3236 do {
3237 + if (do_colour_align)
3238 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3239 /*
3240 * Lookup failure means no vma is above this address,
3241 * else if new region fits below vma->vm_start,
3242 * return with success:
3243 */
3244 vma = find_vma(mm, addr);
3245 - if (likely(!vma || addr+len <= vma->vm_start)) {
3246 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3247 /* remember the address as a hint for next time */
3248 return (mm->free_area_cache = addr);
3249 }
3250 @@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3251 mm->cached_hole_size = vma->vm_start - addr;
3252
3253 /* try just below the current vma->vm_start */
3254 - addr = vma->vm_start-len;
3255 - if (do_colour_align)
3256 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3257 - } while (likely(len < vma->vm_start));
3258 + addr = skip_heap_stack_gap(vma, len);
3259 + } while (!IS_ERR_VALUE(addr));
3260
3261 bottomup:
3262 /*
3263 diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
3264 index ad1fb5d..fc5315b 100644
3265 --- a/arch/sparc/Makefile
3266 +++ b/arch/sparc/Makefile
3267 @@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/
3268 # Export what is needed by arch/sparc/boot/Makefile
3269 export VMLINUX_INIT VMLINUX_MAIN
3270 VMLINUX_INIT := $(head-y) $(init-y)
3271 -VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
3272 +VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
3273 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
3274 VMLINUX_MAIN += $(drivers-y) $(net-y)
3275
3276 diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
3277 index 9f421df..b81fc12 100644
3278 --- a/arch/sparc/include/asm/atomic_64.h
3279 +++ b/arch/sparc/include/asm/atomic_64.h
3280 @@ -14,18 +14,40 @@
3281 #define ATOMIC64_INIT(i) { (i) }
3282
3283 #define atomic_read(v) (*(volatile int *)&(v)->counter)
3284 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
3285 +{
3286 + return v->counter;
3287 +}
3288 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
3289 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
3290 +{
3291 + return v->counter;
3292 +}
3293
3294 #define atomic_set(v, i) (((v)->counter) = i)
3295 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
3296 +{
3297 + v->counter = i;
3298 +}
3299 #define atomic64_set(v, i) (((v)->counter) = i)
3300 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
3301 +{
3302 + v->counter = i;
3303 +}
3304
3305 extern void atomic_add(int, atomic_t *);
3306 +extern void atomic_add_unchecked(int, atomic_unchecked_t *);
3307 extern void atomic64_add(long, atomic64_t *);
3308 +extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
3309 extern void atomic_sub(int, atomic_t *);
3310 +extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
3311 extern void atomic64_sub(long, atomic64_t *);
3312 +extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
3313
3314 extern int atomic_add_ret(int, atomic_t *);
3315 +extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
3316 extern long atomic64_add_ret(long, atomic64_t *);
3317 +extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
3318 extern int atomic_sub_ret(int, atomic_t *);
3319 extern long atomic64_sub_ret(long, atomic64_t *);
3320
3321 @@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
3322 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
3323
3324 #define atomic_inc_return(v) atomic_add_ret(1, v)
3325 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
3326 +{
3327 + return atomic_add_ret_unchecked(1, v);
3328 +}
3329 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
3330 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
3331 +{
3332 + return atomic64_add_ret_unchecked(1, v);
3333 +}
3334
3335 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
3336 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
3337
3338 #define atomic_add_return(i, v) atomic_add_ret(i, v)
3339 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
3340 +{
3341 + return atomic_add_ret_unchecked(i, v);
3342 +}
3343 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
3344 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
3345 +{
3346 + return atomic64_add_ret_unchecked(i, v);
3347 +}
3348
3349 /*
3350 * atomic_inc_and_test - increment and test
3351 @@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
3352 * other cases.
3353 */
3354 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
3355 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
3356 +{
3357 + return atomic_inc_return_unchecked(v) == 0;
3358 +}
3359 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
3360
3361 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
3362 @@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
3363 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
3364
3365 #define atomic_inc(v) atomic_add(1, v)
3366 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
3367 +{
3368 + atomic_add_unchecked(1, v);
3369 +}
3370 #define atomic64_inc(v) atomic64_add(1, v)
3371 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
3372 +{
3373 + atomic64_add_unchecked(1, v);
3374 +}
3375
3376 #define atomic_dec(v) atomic_sub(1, v)
3377 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
3378 +{
3379 + atomic_sub_unchecked(1, v);
3380 +}
3381 #define atomic64_dec(v) atomic64_sub(1, v)
3382 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
3383 +{
3384 + atomic64_sub_unchecked(1, v);
3385 +}
3386
3387 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
3388 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
3389
3390 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
3391 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
3392 +{
3393 + return cmpxchg(&v->counter, old, new);
3394 +}
3395 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
3396 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
3397 +{
3398 + return xchg(&v->counter, new);
3399 +}
3400
3401 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
3402 {
3403 - int c, old;
3404 + int c, old, new;
3405 c = atomic_read(v);
3406 for (;;) {
3407 - if (unlikely(c == (u)))
3408 + if (unlikely(c == u))
3409 break;
3410 - old = atomic_cmpxchg((v), c, c + (a));
3411 +
3412 + asm volatile("addcc %2, %0, %0\n"
3413 +
3414 +#ifdef CONFIG_PAX_REFCOUNT
3415 + "tvs %%icc, 6\n"
3416 +#endif
3417 +
3418 + : "=r" (new)
3419 + : "0" (c), "ir" (a)
3420 + : "cc");
3421 +
3422 + old = atomic_cmpxchg(v, c, new);
3423 if (likely(old == c))
3424 break;
3425 c = old;
3426 @@ -89,20 +166,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
3427 #define atomic64_cmpxchg(v, o, n) \
3428 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
3429 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
3430 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
3431 +{
3432 + return xchg(&v->counter, new);
3433 +}
3434
3435 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
3436 {
3437 - long c, old;
3438 + long c, old, new;
3439 c = atomic64_read(v);
3440 for (;;) {
3441 - if (unlikely(c == (u)))
3442 + if (unlikely(c == u))
3443 break;
3444 - old = atomic64_cmpxchg((v), c, c + (a));
3445 +
3446 + asm volatile("addcc %2, %0, %0\n"
3447 +
3448 +#ifdef CONFIG_PAX_REFCOUNT
3449 + "tvs %%xcc, 6\n"
3450 +#endif
3451 +
3452 + : "=r" (new)
3453 + : "0" (c), "ir" (a)
3454 + : "cc");
3455 +
3456 + old = atomic64_cmpxchg(v, c, new);
3457 if (likely(old == c))
3458 break;
3459 c = old;
3460 }
3461 - return c != (u);
3462 + return c != u;
3463 }
3464
3465 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3466 diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
3467 index 69358b5..17b4745 100644
3468 --- a/arch/sparc/include/asm/cache.h
3469 +++ b/arch/sparc/include/asm/cache.h
3470 @@ -10,7 +10,7 @@
3471 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
3472
3473 #define L1_CACHE_SHIFT 5
3474 -#define L1_CACHE_BYTES 32
3475 +#define L1_CACHE_BYTES 32UL
3476
3477 #ifdef CONFIG_SPARC32
3478 #define SMP_CACHE_BYTES_SHIFT 5
3479 diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
3480 index 4269ca6..e3da77f 100644
3481 --- a/arch/sparc/include/asm/elf_32.h
3482 +++ b/arch/sparc/include/asm/elf_32.h
3483 @@ -114,6 +114,13 @@ typedef struct {
3484
3485 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3486
3487 +#ifdef CONFIG_PAX_ASLR
3488 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
3489 +
3490 +#define PAX_DELTA_MMAP_LEN 16
3491 +#define PAX_DELTA_STACK_LEN 16
3492 +#endif
3493 +
3494 /* This yields a mask that user programs can use to figure out what
3495 instruction set this cpu supports. This can NOT be done in userspace
3496 on Sparc. */
3497 diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
3498 index 7df8b7f..4946269 100644
3499 --- a/arch/sparc/include/asm/elf_64.h
3500 +++ b/arch/sparc/include/asm/elf_64.h
3501 @@ -180,6 +180,13 @@ typedef struct {
3502 #define ELF_ET_DYN_BASE 0x0000010000000000UL
3503 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3504
3505 +#ifdef CONFIG_PAX_ASLR
3506 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3507 +
3508 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3509 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
3510 +#endif
3511 +
3512 extern unsigned long sparc64_elf_hwcap;
3513 #define ELF_HWCAP sparc64_elf_hwcap
3514
3515 diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
3516 index a790cc6..091ed94 100644
3517 --- a/arch/sparc/include/asm/pgtable_32.h
3518 +++ b/arch/sparc/include/asm/pgtable_32.h
3519 @@ -45,6 +45,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3520 BTFIXUPDEF_INT(page_none)
3521 BTFIXUPDEF_INT(page_copy)
3522 BTFIXUPDEF_INT(page_readonly)
3523 +
3524 +#ifdef CONFIG_PAX_PAGEEXEC
3525 +BTFIXUPDEF_INT(page_shared_noexec)
3526 +BTFIXUPDEF_INT(page_copy_noexec)
3527 +BTFIXUPDEF_INT(page_readonly_noexec)
3528 +#endif
3529 +
3530 BTFIXUPDEF_INT(page_kernel)
3531
3532 #define PMD_SHIFT SUN4C_PMD_SHIFT
3533 @@ -66,6 +73,16 @@ extern pgprot_t PAGE_SHARED;
3534 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
3535 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
3536
3537 +#ifdef CONFIG_PAX_PAGEEXEC
3538 +extern pgprot_t PAGE_SHARED_NOEXEC;
3539 +# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
3540 +# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
3541 +#else
3542 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
3543 +# define PAGE_COPY_NOEXEC PAGE_COPY
3544 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
3545 +#endif
3546 +
3547 extern unsigned long page_kernel;
3548
3549 #ifdef MODULE
3550 diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
3551 index f6ae2b2..b03ffc7 100644
3552 --- a/arch/sparc/include/asm/pgtsrmmu.h
3553 +++ b/arch/sparc/include/asm/pgtsrmmu.h
3554 @@ -115,6 +115,13 @@
3555 SRMMU_EXEC | SRMMU_REF)
3556 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
3557 SRMMU_EXEC | SRMMU_REF)
3558 +
3559 +#ifdef CONFIG_PAX_PAGEEXEC
3560 +#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
3561 +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3562 +#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3563 +#endif
3564 +
3565 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
3566 SRMMU_DIRTY | SRMMU_REF)
3567
3568 diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
3569 index 9689176..63c18ea 100644
3570 --- a/arch/sparc/include/asm/spinlock_64.h
3571 +++ b/arch/sparc/include/asm/spinlock_64.h
3572 @@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
3573
3574 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
3575
3576 -static void inline arch_read_lock(arch_rwlock_t *lock)
3577 +static inline void arch_read_lock(arch_rwlock_t *lock)
3578 {
3579 unsigned long tmp1, tmp2;
3580
3581 __asm__ __volatile__ (
3582 "1: ldsw [%2], %0\n"
3583 " brlz,pn %0, 2f\n"
3584 -"4: add %0, 1, %1\n"
3585 +"4: addcc %0, 1, %1\n"
3586 +
3587 +#ifdef CONFIG_PAX_REFCOUNT
3588 +" tvs %%icc, 6\n"
3589 +#endif
3590 +
3591 " cas [%2], %0, %1\n"
3592 " cmp %0, %1\n"
3593 " bne,pn %%icc, 1b\n"
3594 @@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
3595 " .previous"
3596 : "=&r" (tmp1), "=&r" (tmp2)
3597 : "r" (lock)
3598 - : "memory");
3599 + : "memory", "cc");
3600 }
3601
3602 -static int inline arch_read_trylock(arch_rwlock_t *lock)
3603 +static inline int arch_read_trylock(arch_rwlock_t *lock)
3604 {
3605 int tmp1, tmp2;
3606
3607 @@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
3608 "1: ldsw [%2], %0\n"
3609 " brlz,a,pn %0, 2f\n"
3610 " mov 0, %0\n"
3611 -" add %0, 1, %1\n"
3612 +" addcc %0, 1, %1\n"
3613 +
3614 +#ifdef CONFIG_PAX_REFCOUNT
3615 +" tvs %%icc, 6\n"
3616 +#endif
3617 +
3618 " cas [%2], %0, %1\n"
3619 " cmp %0, %1\n"
3620 " bne,pn %%icc, 1b\n"
3621 @@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
3622 return tmp1;
3623 }
3624
3625 -static void inline arch_read_unlock(arch_rwlock_t *lock)
3626 +static inline void arch_read_unlock(arch_rwlock_t *lock)
3627 {
3628 unsigned long tmp1, tmp2;
3629
3630 __asm__ __volatile__(
3631 "1: lduw [%2], %0\n"
3632 -" sub %0, 1, %1\n"
3633 +" subcc %0, 1, %1\n"
3634 +
3635 +#ifdef CONFIG_PAX_REFCOUNT
3636 +" tvs %%icc, 6\n"
3637 +#endif
3638 +
3639 " cas [%2], %0, %1\n"
3640 " cmp %0, %1\n"
3641 " bne,pn %%xcc, 1b\n"
3642 @@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
3643 : "memory");
3644 }
3645
3646 -static void inline arch_write_lock(arch_rwlock_t *lock)
3647 +static inline void arch_write_lock(arch_rwlock_t *lock)
3648 {
3649 unsigned long mask, tmp1, tmp2;
3650
3651 @@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
3652 : "memory");
3653 }
3654
3655 -static void inline arch_write_unlock(arch_rwlock_t *lock)
3656 +static inline void arch_write_unlock(arch_rwlock_t *lock)
3657 {
3658 __asm__ __volatile__(
3659 " stw %%g0, [%0]"
3660 @@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
3661 : "memory");
3662 }
3663
3664 -static int inline arch_write_trylock(arch_rwlock_t *lock)
3665 +static inline int arch_write_trylock(arch_rwlock_t *lock)
3666 {
3667 unsigned long mask, tmp1, tmp2, result;
3668
3669 diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
3670 index fa57532..e1a4c53 100644
3671 --- a/arch/sparc/include/asm/thread_info_32.h
3672 +++ b/arch/sparc/include/asm/thread_info_32.h
3673 @@ -50,6 +50,8 @@ struct thread_info {
3674 unsigned long w_saved;
3675
3676 struct restart_block restart_block;
3677 +
3678 + unsigned long lowest_stack;
3679 };
3680
3681 /*
3682 diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
3683 index 60d86be..952dea1 100644
3684 --- a/arch/sparc/include/asm/thread_info_64.h
3685 +++ b/arch/sparc/include/asm/thread_info_64.h
3686 @@ -63,6 +63,8 @@ struct thread_info {
3687 struct pt_regs *kern_una_regs;
3688 unsigned int kern_una_insn;
3689
3690 + unsigned long lowest_stack;
3691 +
3692 unsigned long fpregs[0] __attribute__ ((aligned(64)));
3693 };
3694
3695 diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
3696 index e88fbe5..96b0ce5 100644
3697 --- a/arch/sparc/include/asm/uaccess.h
3698 +++ b/arch/sparc/include/asm/uaccess.h
3699 @@ -1,5 +1,13 @@
3700 #ifndef ___ASM_SPARC_UACCESS_H
3701 #define ___ASM_SPARC_UACCESS_H
3702 +
3703 +#ifdef __KERNEL__
3704 +#ifndef __ASSEMBLY__
3705 +#include <linux/types.h>
3706 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
3707 +#endif
3708 +#endif
3709 +
3710 #if defined(__sparc__) && defined(__arch64__)
3711 #include <asm/uaccess_64.h>
3712 #else
3713 diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
3714 index 8303ac4..07f333d 100644
3715 --- a/arch/sparc/include/asm/uaccess_32.h
3716 +++ b/arch/sparc/include/asm/uaccess_32.h
3717 @@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
3718
3719 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3720 {
3721 - if (n && __access_ok((unsigned long) to, n))
3722 + if ((long)n < 0)
3723 + return n;
3724 +
3725 + if (n && __access_ok((unsigned long) to, n)) {
3726 + if (!__builtin_constant_p(n))
3727 + check_object_size(from, n, true);
3728 return __copy_user(to, (__force void __user *) from, n);
3729 - else
3730 + } else
3731 return n;
3732 }
3733
3734 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
3735 {
3736 + if ((long)n < 0)
3737 + return n;
3738 +
3739 + if (!__builtin_constant_p(n))
3740 + check_object_size(from, n, true);
3741 +
3742 return __copy_user(to, (__force void __user *) from, n);
3743 }
3744
3745 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
3746 {
3747 - if (n && __access_ok((unsigned long) from, n))
3748 + if ((long)n < 0)
3749 + return n;
3750 +
3751 + if (n && __access_ok((unsigned long) from, n)) {
3752 + if (!__builtin_constant_p(n))
3753 + check_object_size(to, n, false);
3754 return __copy_user((__force void __user *) to, from, n);
3755 - else
3756 + } else
3757 return n;
3758 }
3759
3760 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
3761 {
3762 + if ((long)n < 0)
3763 + return n;
3764 +
3765 return __copy_user((__force void __user *) to, from, n);
3766 }
3767
3768 diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
3769 index 3e1449f..5293a0e 100644
3770 --- a/arch/sparc/include/asm/uaccess_64.h
3771 +++ b/arch/sparc/include/asm/uaccess_64.h
3772 @@ -10,6 +10,7 @@
3773 #include <linux/compiler.h>
3774 #include <linux/string.h>
3775 #include <linux/thread_info.h>
3776 +#include <linux/kernel.h>
3777 #include <asm/asi.h>
3778 #include <asm/system.h>
3779 #include <asm/spitfire.h>
3780 @@ -213,8 +214,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
3781 static inline unsigned long __must_check
3782 copy_from_user(void *to, const void __user *from, unsigned long size)
3783 {
3784 - unsigned long ret = ___copy_from_user(to, from, size);
3785 + unsigned long ret;
3786
3787 + if ((long)size < 0 || size > INT_MAX)
3788 + return size;
3789 +
3790 + if (!__builtin_constant_p(size))
3791 + check_object_size(to, size, false);
3792 +
3793 + ret = ___copy_from_user(to, from, size);
3794 if (unlikely(ret))
3795 ret = copy_from_user_fixup(to, from, size);
3796
3797 @@ -230,8 +238,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
3798 static inline unsigned long __must_check
3799 copy_to_user(void __user *to, const void *from, unsigned long size)
3800 {
3801 - unsigned long ret = ___copy_to_user(to, from, size);
3802 + unsigned long ret;
3803
3804 + if ((long)size < 0 || size > INT_MAX)
3805 + return size;
3806 +
3807 + if (!__builtin_constant_p(size))
3808 + check_object_size(from, size, true);
3809 +
3810 + ret = ___copy_to_user(to, from, size);
3811 if (unlikely(ret))
3812 ret = copy_to_user_fixup(to, from, size);
3813 return ret;
3814 diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
3815 index cb85458..e063f17 100644
3816 --- a/arch/sparc/kernel/Makefile
3817 +++ b/arch/sparc/kernel/Makefile
3818 @@ -3,7 +3,7 @@
3819 #
3820
3821 asflags-y := -ansi
3822 -ccflags-y := -Werror
3823 +#ccflags-y := -Werror
3824
3825 extra-y := head_$(BITS).o
3826 extra-y += init_task.o
3827 diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
3828 index f793742..4d880af 100644
3829 --- a/arch/sparc/kernel/process_32.c
3830 +++ b/arch/sparc/kernel/process_32.c
3831 @@ -204,7 +204,7 @@ void __show_backtrace(unsigned long fp)
3832 rw->ins[4], rw->ins[5],
3833 rw->ins[6],
3834 rw->ins[7]);
3835 - printk("%pS\n", (void *) rw->ins[7]);
3836 + printk("%pA\n", (void *) rw->ins[7]);
3837 rw = (struct reg_window32 *) rw->ins[6];
3838 }
3839 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
3840 @@ -271,14 +271,14 @@ void show_regs(struct pt_regs *r)
3841
3842 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
3843 r->psr, r->pc, r->npc, r->y, print_tainted());
3844 - printk("PC: <%pS>\n", (void *) r->pc);
3845 + printk("PC: <%pA>\n", (void *) r->pc);
3846 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3847 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
3848 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
3849 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3850 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
3851 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
3852 - printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
3853 + printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
3854
3855 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3856 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
3857 @@ -313,7 +313,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
3858 rw = (struct reg_window32 *) fp;
3859 pc = rw->ins[7];
3860 printk("[%08lx : ", pc);
3861 - printk("%pS ] ", (void *) pc);
3862 + printk("%pA ] ", (void *) pc);
3863 fp = rw->ins[6];
3864 } while (++count < 16);
3865 printk("\n");
3866 diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
3867 index 3739a06..48b2ff0 100644
3868 --- a/arch/sparc/kernel/process_64.c
3869 +++ b/arch/sparc/kernel/process_64.c
3870 @@ -180,14 +180,14 @@ static void show_regwindow(struct pt_regs *regs)
3871 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
3872 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
3873 if (regs->tstate & TSTATE_PRIV)
3874 - printk("I7: <%pS>\n", (void *) rwk->ins[7]);
3875 + printk("I7: <%pA>\n", (void *) rwk->ins[7]);
3876 }
3877
3878 void show_regs(struct pt_regs *regs)
3879 {
3880 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
3881 regs->tpc, regs->tnpc, regs->y, print_tainted());
3882 - printk("TPC: <%pS>\n", (void *) regs->tpc);
3883 + printk("TPC: <%pA>\n", (void *) regs->tpc);
3884 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
3885 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
3886 regs->u_regs[3]);
3887 @@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
3888 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
3889 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
3890 regs->u_regs[15]);
3891 - printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
3892 + printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
3893 show_regwindow(regs);
3894 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
3895 }
3896 @@ -285,7 +285,7 @@ void arch_trigger_all_cpu_backtrace(void)
3897 ((tp && tp->task) ? tp->task->pid : -1));
3898
3899 if (gp->tstate & TSTATE_PRIV) {
3900 - printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
3901 + printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
3902 (void *) gp->tpc,
3903 (void *) gp->o7,
3904 (void *) gp->i7,
3905 diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
3906 index 42b282f..28ce9f2 100644
3907 --- a/arch/sparc/kernel/sys_sparc_32.c
3908 +++ b/arch/sparc/kernel/sys_sparc_32.c
3909 @@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
3910 if (ARCH_SUN4C && len > 0x20000000)
3911 return -ENOMEM;
3912 if (!addr)
3913 - addr = TASK_UNMAPPED_BASE;
3914 + addr = current->mm->mmap_base;
3915
3916 if (flags & MAP_SHARED)
3917 addr = COLOUR_ALIGN(addr);
3918 @@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
3919 }
3920 if (TASK_SIZE - PAGE_SIZE - len < addr)
3921 return -ENOMEM;
3922 - if (!vmm || addr + len <= vmm->vm_start)
3923 + if (check_heap_stack_gap(vmm, addr, len))
3924 return addr;
3925 addr = vmm->vm_end;
3926 if (flags & MAP_SHARED)
3927 diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
3928 index 441521a..b767073 100644
3929 --- a/arch/sparc/kernel/sys_sparc_64.c
3930 +++ b/arch/sparc/kernel/sys_sparc_64.c
3931 @@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
3932 /* We do not accept a shared mapping if it would violate
3933 * cache aliasing constraints.
3934 */
3935 - if ((flags & MAP_SHARED) &&
3936 + if ((filp || (flags & MAP_SHARED)) &&
3937 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3938 return -EINVAL;
3939 return addr;
3940 @@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
3941 if (filp || (flags & MAP_SHARED))
3942 do_color_align = 1;
3943
3944 +#ifdef CONFIG_PAX_RANDMMAP
3945 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
3946 +#endif
3947 +
3948 if (addr) {
3949 if (do_color_align)
3950 addr = COLOUR_ALIGN(addr, pgoff);
3951 @@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
3952 addr = PAGE_ALIGN(addr);
3953
3954 vma = find_vma(mm, addr);
3955 - if (task_size - len >= addr &&
3956 - (!vma || addr + len <= vma->vm_start))
3957 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3958 return addr;
3959 }
3960
3961 if (len > mm->cached_hole_size) {
3962 - start_addr = addr = mm->free_area_cache;
3963 + start_addr = addr = mm->free_area_cache;
3964 } else {
3965 - start_addr = addr = TASK_UNMAPPED_BASE;
3966 + start_addr = addr = mm->mmap_base;
3967 mm->cached_hole_size = 0;
3968 }
3969
3970 @@ -174,14 +177,14 @@ full_search:
3971 vma = find_vma(mm, VA_EXCLUDE_END);
3972 }
3973 if (unlikely(task_size < addr)) {
3974 - if (start_addr != TASK_UNMAPPED_BASE) {
3975 - start_addr = addr = TASK_UNMAPPED_BASE;
3976 + if (start_addr != mm->mmap_base) {
3977 + start_addr = addr = mm->mmap_base;
3978 mm->cached_hole_size = 0;
3979 goto full_search;
3980 }
3981 return -ENOMEM;
3982 }
3983 - if (likely(!vma || addr + len <= vma->vm_start)) {
3984 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3985 /*
3986 * Remember the place where we stopped the search:
3987 */
3988 @@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3989 /* We do not accept a shared mapping if it would violate
3990 * cache aliasing constraints.
3991 */
3992 - if ((flags & MAP_SHARED) &&
3993 + if ((filp || (flags & MAP_SHARED)) &&
3994 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3995 return -EINVAL;
3996 return addr;
3997 @@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3998 addr = PAGE_ALIGN(addr);
3999
4000 vma = find_vma(mm, addr);
4001 - if (task_size - len >= addr &&
4002 - (!vma || addr + len <= vma->vm_start))
4003 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4004 return addr;
4005 }
4006
4007 @@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4008 /* make sure it can fit in the remaining address space */
4009 if (likely(addr > len)) {
4010 vma = find_vma(mm, addr-len);
4011 - if (!vma || addr <= vma->vm_start) {
4012 + if (check_heap_stack_gap(vma, addr - len, len)) {
4013 /* remember the address as a hint for next time */
4014 return (mm->free_area_cache = addr-len);
4015 }
4016 @@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4017 if (unlikely(mm->mmap_base < len))
4018 goto bottomup;
4019
4020 - addr = mm->mmap_base-len;
4021 - if (do_color_align)
4022 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4023 + addr = mm->mmap_base - len;
4024
4025 do {
4026 + if (do_color_align)
4027 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4028 /*
4029 * Lookup failure means no vma is above this address,
4030 * else if new region fits below vma->vm_start,
4031 * return with success:
4032 */
4033 vma = find_vma(mm, addr);
4034 - if (likely(!vma || addr+len <= vma->vm_start)) {
4035 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4036 /* remember the address as a hint for next time */
4037 return (mm->free_area_cache = addr);
4038 }
4039 @@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4040 mm->cached_hole_size = vma->vm_start - addr;
4041
4042 /* try just below the current vma->vm_start */
4043 - addr = vma->vm_start-len;
4044 - if (do_color_align)
4045 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4046 - } while (likely(len < vma->vm_start));
4047 + addr = skip_heap_stack_gap(vma, len);
4048 + } while (!IS_ERR_VALUE(addr));
4049
4050 bottomup:
4051 /*
4052 @@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4053 gap == RLIM_INFINITY ||
4054 sysctl_legacy_va_layout) {
4055 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4056 +
4057 +#ifdef CONFIG_PAX_RANDMMAP
4058 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4059 + mm->mmap_base += mm->delta_mmap;
4060 +#endif
4061 +
4062 mm->get_unmapped_area = arch_get_unmapped_area;
4063 mm->unmap_area = arch_unmap_area;
4064 } else {
4065 @@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4066 gap = (task_size / 6 * 5);
4067
4068 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
4069 +
4070 +#ifdef CONFIG_PAX_RANDMMAP
4071 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4072 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4073 +#endif
4074 +
4075 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4076 mm->unmap_area = arch_unmap_area_topdown;
4077 }
4078 diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
4079 index 591f20c..0f1b925 100644
4080 --- a/arch/sparc/kernel/traps_32.c
4081 +++ b/arch/sparc/kernel/traps_32.c
4082 @@ -45,6 +45,8 @@ static void instruction_dump(unsigned long *pc)
4083 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
4084 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
4085
4086 +extern void gr_handle_kernel_exploit(void);
4087 +
4088 void die_if_kernel(char *str, struct pt_regs *regs)
4089 {
4090 static int die_counter;
4091 @@ -77,15 +79,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
4092 count++ < 30 &&
4093 (((unsigned long) rw) >= PAGE_OFFSET) &&
4094 !(((unsigned long) rw) & 0x7)) {
4095 - printk("Caller[%08lx]: %pS\n", rw->ins[7],
4096 + printk("Caller[%08lx]: %pA\n", rw->ins[7],
4097 (void *) rw->ins[7]);
4098 rw = (struct reg_window32 *)rw->ins[6];
4099 }
4100 }
4101 printk("Instruction DUMP:");
4102 instruction_dump ((unsigned long *) regs->pc);
4103 - if(regs->psr & PSR_PS)
4104 + if(regs->psr & PSR_PS) {
4105 + gr_handle_kernel_exploit();
4106 do_exit(SIGKILL);
4107 + }
4108 do_exit(SIGSEGV);
4109 }
4110
4111 diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
4112 index 0cbdaa4..438e4c9 100644
4113 --- a/arch/sparc/kernel/traps_64.c
4114 +++ b/arch/sparc/kernel/traps_64.c
4115 @@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
4116 i + 1,
4117 p->trapstack[i].tstate, p->trapstack[i].tpc,
4118 p->trapstack[i].tnpc, p->trapstack[i].tt);
4119 - printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
4120 + printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
4121 }
4122 }
4123
4124 @@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
4125
4126 lvl -= 0x100;
4127 if (regs->tstate & TSTATE_PRIV) {
4128 +
4129 +#ifdef CONFIG_PAX_REFCOUNT
4130 + if (lvl == 6)
4131 + pax_report_refcount_overflow(regs);
4132 +#endif
4133 +
4134 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
4135 die_if_kernel(buffer, regs);
4136 }
4137 @@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
4138 void bad_trap_tl1(struct pt_regs *regs, long lvl)
4139 {
4140 char buffer[32];
4141 -
4142 +
4143 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
4144 0, lvl, SIGTRAP) == NOTIFY_STOP)
4145 return;
4146
4147 +#ifdef CONFIG_PAX_REFCOUNT
4148 + if (lvl == 6)
4149 + pax_report_refcount_overflow(regs);
4150 +#endif
4151 +
4152 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
4153
4154 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
4155 @@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
4156 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
4157 printk("%s" "ERROR(%d): ",
4158 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
4159 - printk("TPC<%pS>\n", (void *) regs->tpc);
4160 + printk("TPC<%pA>\n", (void *) regs->tpc);
4161 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
4162 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
4163 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
4164 @@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
4165 smp_processor_id(),
4166 (type & 0x1) ? 'I' : 'D',
4167 regs->tpc);
4168 - printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
4169 + printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
4170 panic("Irrecoverable Cheetah+ parity error.");
4171 }
4172
4173 @@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
4174 smp_processor_id(),
4175 (type & 0x1) ? 'I' : 'D',
4176 regs->tpc);
4177 - printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
4178 + printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
4179 }
4180
4181 struct sun4v_error_entry {
4182 @@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
4183
4184 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
4185 regs->tpc, tl);
4186 - printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
4187 + printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
4188 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4189 - printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
4190 + printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
4191 (void *) regs->u_regs[UREG_I7]);
4192 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
4193 "pte[%lx] error[%lx]\n",
4194 @@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
4195
4196 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
4197 regs->tpc, tl);
4198 - printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
4199 + printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
4200 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4201 - printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
4202 + printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
4203 (void *) regs->u_regs[UREG_I7]);
4204 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
4205 "pte[%lx] error[%lx]\n",
4206 @@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
4207 fp = (unsigned long)sf->fp + STACK_BIAS;
4208 }
4209
4210 - printk(" [%016lx] %pS\n", pc, (void *) pc);
4211 + printk(" [%016lx] %pA\n", pc, (void *) pc);
4212 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4213 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
4214 int index = tsk->curr_ret_stack;
4215 if (tsk->ret_stack && index >= graph) {
4216 pc = tsk->ret_stack[index - graph].ret;
4217 - printk(" [%016lx] %pS\n", pc, (void *) pc);
4218 + printk(" [%016lx] %pA\n", pc, (void *) pc);
4219 graph++;
4220 }
4221 }
4222 @@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
4223 return (struct reg_window *) (fp + STACK_BIAS);
4224 }
4225
4226 +extern void gr_handle_kernel_exploit(void);
4227 +
4228 void die_if_kernel(char *str, struct pt_regs *regs)
4229 {
4230 static int die_counter;
4231 @@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
4232 while (rw &&
4233 count++ < 30 &&
4234 kstack_valid(tp, (unsigned long) rw)) {
4235 - printk("Caller[%016lx]: %pS\n", rw->ins[7],
4236 + printk("Caller[%016lx]: %pA\n", rw->ins[7],
4237 (void *) rw->ins[7]);
4238
4239 rw = kernel_stack_up(rw);
4240 @@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
4241 }
4242 user_instruction_dump ((unsigned int __user *) regs->tpc);
4243 }
4244 - if (regs->tstate & TSTATE_PRIV)
4245 + if (regs->tstate & TSTATE_PRIV) {
4246 + gr_handle_kernel_exploit();
4247 do_exit(SIGKILL);
4248 + }
4249 do_exit(SIGSEGV);
4250 }
4251 EXPORT_SYMBOL(die_if_kernel);
4252 diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
4253 index 76e4ac1..78f8bb1 100644
4254 --- a/arch/sparc/kernel/unaligned_64.c
4255 +++ b/arch/sparc/kernel/unaligned_64.c
4256 @@ -279,7 +279,7 @@ static void log_unaligned(struct pt_regs *regs)
4257 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
4258
4259 if (__ratelimit(&ratelimit)) {
4260 - printk("Kernel unaligned access at TPC[%lx] %pS\n",
4261 + printk("Kernel unaligned access at TPC[%lx] %pA\n",
4262 regs->tpc, (void *) regs->tpc);
4263 }
4264 }
4265 diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
4266 index a3fc437..fea9957 100644
4267 --- a/arch/sparc/lib/Makefile
4268 +++ b/arch/sparc/lib/Makefile
4269 @@ -2,7 +2,7 @@
4270 #
4271
4272 asflags-y := -ansi -DST_DIV0=0x02
4273 -ccflags-y := -Werror
4274 +#ccflags-y := -Werror
4275
4276 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
4277 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
4278 diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
4279 index 59186e0..f747d7a 100644
4280 --- a/arch/sparc/lib/atomic_64.S
4281 +++ b/arch/sparc/lib/atomic_64.S
4282 @@ -18,7 +18,12 @@
4283 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
4284 BACKOFF_SETUP(%o2)
4285 1: lduw [%o1], %g1
4286 - add %g1, %o0, %g7
4287 + addcc %g1, %o0, %g7
4288 +
4289 +#ifdef CONFIG_PAX_REFCOUNT
4290 + tvs %icc, 6
4291 +#endif
4292 +
4293 cas [%o1], %g1, %g7
4294 cmp %g1, %g7
4295 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
4296 @@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
4297 2: BACKOFF_SPIN(%o2, %o3, 1b)
4298 .size atomic_add, .-atomic_add
4299
4300 + .globl atomic_add_unchecked
4301 + .type atomic_add_unchecked,#function
4302 +atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4303 + BACKOFF_SETUP(%o2)
4304 +1: lduw [%o1], %g1
4305 + add %g1, %o0, %g7
4306 + cas [%o1], %g1, %g7
4307 + cmp %g1, %g7
4308 + bne,pn %icc, 2f
4309 + nop
4310 + retl
4311 + nop
4312 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4313 + .size atomic_add_unchecked, .-atomic_add_unchecked
4314 +
4315 .globl atomic_sub
4316 .type atomic_sub,#function
4317 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4318 BACKOFF_SETUP(%o2)
4319 1: lduw [%o1], %g1
4320 - sub %g1, %o0, %g7
4321 + subcc %g1, %o0, %g7
4322 +
4323 +#ifdef CONFIG_PAX_REFCOUNT
4324 + tvs %icc, 6
4325 +#endif
4326 +
4327 cas [%o1], %g1, %g7
4328 cmp %g1, %g7
4329 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
4330 @@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4331 2: BACKOFF_SPIN(%o2, %o3, 1b)
4332 .size atomic_sub, .-atomic_sub
4333
4334 + .globl atomic_sub_unchecked
4335 + .type atomic_sub_unchecked,#function
4336 +atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4337 + BACKOFF_SETUP(%o2)
4338 +1: lduw [%o1], %g1
4339 + sub %g1, %o0, %g7
4340 + cas [%o1], %g1, %g7
4341 + cmp %g1, %g7
4342 + bne,pn %icc, 2f
4343 + nop
4344 + retl
4345 + nop
4346 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4347 + .size atomic_sub_unchecked, .-atomic_sub_unchecked
4348 +
4349 .globl atomic_add_ret
4350 .type atomic_add_ret,#function
4351 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4352 BACKOFF_SETUP(%o2)
4353 1: lduw [%o1], %g1
4354 - add %g1, %o0, %g7
4355 + addcc %g1, %o0, %g7
4356 +
4357 +#ifdef CONFIG_PAX_REFCOUNT
4358 + tvs %icc, 6
4359 +#endif
4360 +
4361 cas [%o1], %g1, %g7
4362 cmp %g1, %g7
4363 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
4364 @@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4365 2: BACKOFF_SPIN(%o2, %o3, 1b)
4366 .size atomic_add_ret, .-atomic_add_ret
4367
4368 + .globl atomic_add_ret_unchecked
4369 + .type atomic_add_ret_unchecked,#function
4370 +atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4371 + BACKOFF_SETUP(%o2)
4372 +1: lduw [%o1], %g1
4373 + addcc %g1, %o0, %g7
4374 + cas [%o1], %g1, %g7
4375 + cmp %g1, %g7
4376 + bne,pn %icc, 2f
4377 + add %g7, %o0, %g7
4378 + sra %g7, 0, %o0
4379 + retl
4380 + nop
4381 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4382 + .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
4383 +
4384 .globl atomic_sub_ret
4385 .type atomic_sub_ret,#function
4386 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4387 BACKOFF_SETUP(%o2)
4388 1: lduw [%o1], %g1
4389 - sub %g1, %o0, %g7
4390 + subcc %g1, %o0, %g7
4391 +
4392 +#ifdef CONFIG_PAX_REFCOUNT
4393 + tvs %icc, 6
4394 +#endif
4395 +
4396 cas [%o1], %g1, %g7
4397 cmp %g1, %g7
4398 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
4399 @@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4400 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
4401 BACKOFF_SETUP(%o2)
4402 1: ldx [%o1], %g1
4403 - add %g1, %o0, %g7
4404 + addcc %g1, %o0, %g7
4405 +
4406 +#ifdef CONFIG_PAX_REFCOUNT
4407 + tvs %xcc, 6
4408 +#endif
4409 +
4410 casx [%o1], %g1, %g7
4411 cmp %g1, %g7
4412 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4413 @@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
4414 2: BACKOFF_SPIN(%o2, %o3, 1b)
4415 .size atomic64_add, .-atomic64_add
4416
4417 + .globl atomic64_add_unchecked
4418 + .type atomic64_add_unchecked,#function
4419 +atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4420 + BACKOFF_SETUP(%o2)
4421 +1: ldx [%o1], %g1
4422 + addcc %g1, %o0, %g7
4423 + casx [%o1], %g1, %g7
4424 + cmp %g1, %g7
4425 + bne,pn %xcc, 2f
4426 + nop
4427 + retl
4428 + nop
4429 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4430 + .size atomic64_add_unchecked, .-atomic64_add_unchecked
4431 +
4432 .globl atomic64_sub
4433 .type atomic64_sub,#function
4434 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4435 BACKOFF_SETUP(%o2)
4436 1: ldx [%o1], %g1
4437 - sub %g1, %o0, %g7
4438 + subcc %g1, %o0, %g7
4439 +
4440 +#ifdef CONFIG_PAX_REFCOUNT
4441 + tvs %xcc, 6
4442 +#endif
4443 +
4444 casx [%o1], %g1, %g7
4445 cmp %g1, %g7
4446 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4447 @@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4448 2: BACKOFF_SPIN(%o2, %o3, 1b)
4449 .size atomic64_sub, .-atomic64_sub
4450
4451 + .globl atomic64_sub_unchecked
4452 + .type atomic64_sub_unchecked,#function
4453 +atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4454 + BACKOFF_SETUP(%o2)
4455 +1: ldx [%o1], %g1
4456 + subcc %g1, %o0, %g7
4457 + casx [%o1], %g1, %g7
4458 + cmp %g1, %g7
4459 + bne,pn %xcc, 2f
4460 + nop
4461 + retl
4462 + nop
4463 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4464 + .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
4465 +
4466 .globl atomic64_add_ret
4467 .type atomic64_add_ret,#function
4468 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4469 BACKOFF_SETUP(%o2)
4470 1: ldx [%o1], %g1
4471 - add %g1, %o0, %g7
4472 + addcc %g1, %o0, %g7
4473 +
4474 +#ifdef CONFIG_PAX_REFCOUNT
4475 + tvs %xcc, 6
4476 +#endif
4477 +
4478 casx [%o1], %g1, %g7
4479 cmp %g1, %g7
4480 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4481 @@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4482 2: BACKOFF_SPIN(%o2, %o3, 1b)
4483 .size atomic64_add_ret, .-atomic64_add_ret
4484
4485 + .globl atomic64_add_ret_unchecked
4486 + .type atomic64_add_ret_unchecked,#function
4487 +atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4488 + BACKOFF_SETUP(%o2)
4489 +1: ldx [%o1], %g1
4490 + addcc %g1, %o0, %g7
4491 + casx [%o1], %g1, %g7
4492 + cmp %g1, %g7
4493 + bne,pn %xcc, 2f
4494 + add %g7, %o0, %g7
4495 + mov %g7, %o0
4496 + retl
4497 + nop
4498 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4499 + .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
4500 +
4501 .globl atomic64_sub_ret
4502 .type atomic64_sub_ret,#function
4503 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4504 BACKOFF_SETUP(%o2)
4505 1: ldx [%o1], %g1
4506 - sub %g1, %o0, %g7
4507 + subcc %g1, %o0, %g7
4508 +
4509 +#ifdef CONFIG_PAX_REFCOUNT
4510 + tvs %xcc, 6
4511 +#endif
4512 +
4513 casx [%o1], %g1, %g7
4514 cmp %g1, %g7
4515 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4516 diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
4517 index 1b30bb3..b4a16c7 100644
4518 --- a/arch/sparc/lib/ksyms.c
4519 +++ b/arch/sparc/lib/ksyms.c
4520 @@ -142,12 +142,18 @@ EXPORT_SYMBOL(__downgrade_write);
4521
4522 /* Atomic counter implementation. */
4523 EXPORT_SYMBOL(atomic_add);
4524 +EXPORT_SYMBOL(atomic_add_unchecked);
4525 EXPORT_SYMBOL(atomic_add_ret);
4526 +EXPORT_SYMBOL(atomic_add_ret_unchecked);
4527 EXPORT_SYMBOL(atomic_sub);
4528 +EXPORT_SYMBOL(atomic_sub_unchecked);
4529 EXPORT_SYMBOL(atomic_sub_ret);
4530 EXPORT_SYMBOL(atomic64_add);
4531 +EXPORT_SYMBOL(atomic64_add_unchecked);
4532 EXPORT_SYMBOL(atomic64_add_ret);
4533 +EXPORT_SYMBOL(atomic64_add_ret_unchecked);
4534 EXPORT_SYMBOL(atomic64_sub);
4535 +EXPORT_SYMBOL(atomic64_sub_unchecked);
4536 EXPORT_SYMBOL(atomic64_sub_ret);
4537
4538 /* Atomic bit operations. */
4539 diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
4540 index 301421c..e2535d1 100644
4541 --- a/arch/sparc/mm/Makefile
4542 +++ b/arch/sparc/mm/Makefile
4543 @@ -2,7 +2,7 @@
4544 #
4545
4546 asflags-y := -ansi
4547 -ccflags-y := -Werror
4548 +#ccflags-y := -Werror
4549
4550 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
4551 obj-y += fault_$(BITS).o
4552 diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
4553 index 8023fd7..c8e89e9 100644
4554 --- a/arch/sparc/mm/fault_32.c
4555 +++ b/arch/sparc/mm/fault_32.c
4556 @@ -21,6 +21,9 @@
4557 #include <linux/perf_event.h>
4558 #include <linux/interrupt.h>
4559 #include <linux/kdebug.h>
4560 +#include <linux/slab.h>
4561 +#include <linux/pagemap.h>
4562 +#include <linux/compiler.h>
4563
4564 #include <asm/system.h>
4565 #include <asm/page.h>
4566 @@ -208,6 +211,268 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
4567 return safe_compute_effective_address(regs, insn);
4568 }
4569
4570 +#ifdef CONFIG_PAX_PAGEEXEC
4571 +#ifdef CONFIG_PAX_DLRESOLVE
4572 +static void pax_emuplt_close(struct vm_area_struct *vma)
4573 +{
4574 + vma->vm_mm->call_dl_resolve = 0UL;
4575 +}
4576 +
4577 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4578 +{
4579 + unsigned int *kaddr;
4580 +
4581 + vmf->page = alloc_page(GFP_HIGHUSER);
4582 + if (!vmf->page)
4583 + return VM_FAULT_OOM;
4584 +
4585 + kaddr = kmap(vmf->page);
4586 + memset(kaddr, 0, PAGE_SIZE);
4587 + kaddr[0] = 0x9DE3BFA8U; /* save */
4588 + flush_dcache_page(vmf->page);
4589 + kunmap(vmf->page);
4590 + return VM_FAULT_MAJOR;
4591 +}
4592 +
4593 +static const struct vm_operations_struct pax_vm_ops = {
4594 + .close = pax_emuplt_close,
4595 + .fault = pax_emuplt_fault
4596 +};
4597 +
4598 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4599 +{
4600 + int ret;
4601 +
4602 + INIT_LIST_HEAD(&vma->anon_vma_chain);
4603 + vma->vm_mm = current->mm;
4604 + vma->vm_start = addr;
4605 + vma->vm_end = addr + PAGE_SIZE;
4606 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4607 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4608 + vma->vm_ops = &pax_vm_ops;
4609 +
4610 + ret = insert_vm_struct(current->mm, vma);
4611 + if (ret)
4612 + return ret;
4613 +
4614 + ++current->mm->total_vm;
4615 + return 0;
4616 +}
4617 +#endif
4618 +
4619 +/*
4620 + * PaX: decide what to do with offenders (regs->pc = fault address)
4621 + *
4622 + * returns 1 when task should be killed
4623 + * 2 when patched PLT trampoline was detected
4624 + * 3 when unpatched PLT trampoline was detected
4625 + */
4626 +static int pax_handle_fetch_fault(struct pt_regs *regs)
4627 +{
4628 +
4629 +#ifdef CONFIG_PAX_EMUPLT
4630 + int err;
4631 +
4632 + do { /* PaX: patched PLT emulation #1 */
4633 + unsigned int sethi1, sethi2, jmpl;
4634 +
4635 + err = get_user(sethi1, (unsigned int *)regs->pc);
4636 + err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
4637 + err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
4638 +
4639 + if (err)
4640 + break;
4641 +
4642 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4643 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
4644 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
4645 + {
4646 + unsigned int addr;
4647 +
4648 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4649 + addr = regs->u_regs[UREG_G1];
4650 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4651 + regs->pc = addr;
4652 + regs->npc = addr+4;
4653 + return 2;
4654 + }
4655 + } while (0);
4656 +
4657 + { /* PaX: patched PLT emulation #2 */
4658 + unsigned int ba;
4659 +
4660 + err = get_user(ba, (unsigned int *)regs->pc);
4661 +
4662 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4663 + unsigned int addr;
4664 +
4665 + addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4666 + regs->pc = addr;
4667 + regs->npc = addr+4;
4668 + return 2;
4669 + }
4670 + }
4671 +
4672 + do { /* PaX: patched PLT emulation #3 */
4673 + unsigned int sethi, jmpl, nop;
4674 +
4675 + err = get_user(sethi, (unsigned int *)regs->pc);
4676 + err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
4677 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
4678 +
4679 + if (err)
4680 + break;
4681 +
4682 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4683 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4684 + nop == 0x01000000U)
4685 + {
4686 + unsigned int addr;
4687 +
4688 + addr = (sethi & 0x003FFFFFU) << 10;
4689 + regs->u_regs[UREG_G1] = addr;
4690 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4691 + regs->pc = addr;
4692 + regs->npc = addr+4;
4693 + return 2;
4694 + }
4695 + } while (0);
4696 +
4697 + do { /* PaX: unpatched PLT emulation step 1 */
4698 + unsigned int sethi, ba, nop;
4699 +
4700 + err = get_user(sethi, (unsigned int *)regs->pc);
4701 + err |= get_user(ba, (unsigned int *)(regs->pc+4));
4702 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
4703 +
4704 + if (err)
4705 + break;
4706 +
4707 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4708 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4709 + nop == 0x01000000U)
4710 + {
4711 + unsigned int addr, save, call;
4712 +
4713 + if ((ba & 0xFFC00000U) == 0x30800000U)
4714 + addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4715 + else
4716 + addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
4717 +
4718 + err = get_user(save, (unsigned int *)addr);
4719 + err |= get_user(call, (unsigned int *)(addr+4));
4720 + err |= get_user(nop, (unsigned int *)(addr+8));
4721 + if (err)
4722 + break;
4723 +
4724 +#ifdef CONFIG_PAX_DLRESOLVE
4725 + if (save == 0x9DE3BFA8U &&
4726 + (call & 0xC0000000U) == 0x40000000U &&
4727 + nop == 0x01000000U)
4728 + {
4729 + struct vm_area_struct *vma;
4730 + unsigned long call_dl_resolve;
4731 +
4732 + down_read(&current->mm->mmap_sem);
4733 + call_dl_resolve = current->mm->call_dl_resolve;
4734 + up_read(&current->mm->mmap_sem);
4735 + if (likely(call_dl_resolve))
4736 + goto emulate;
4737 +
4738 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4739 +
4740 + down_write(&current->mm->mmap_sem);
4741 + if (current->mm->call_dl_resolve) {
4742 + call_dl_resolve = current->mm->call_dl_resolve;
4743 + up_write(&current->mm->mmap_sem);
4744 + if (vma)
4745 + kmem_cache_free(vm_area_cachep, vma);
4746 + goto emulate;
4747 + }
4748 +
4749 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4750 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4751 + up_write(&current->mm->mmap_sem);
4752 + if (vma)
4753 + kmem_cache_free(vm_area_cachep, vma);
4754 + return 1;
4755 + }
4756 +
4757 + if (pax_insert_vma(vma, call_dl_resolve)) {
4758 + up_write(&current->mm->mmap_sem);
4759 + kmem_cache_free(vm_area_cachep, vma);
4760 + return 1;
4761 + }
4762 +
4763 + current->mm->call_dl_resolve = call_dl_resolve;
4764 + up_write(&current->mm->mmap_sem);
4765 +
4766 +emulate:
4767 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4768 + regs->pc = call_dl_resolve;
4769 + regs->npc = addr+4;
4770 + return 3;
4771 + }
4772 +#endif
4773 +
4774 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4775 + if ((save & 0xFFC00000U) == 0x05000000U &&
4776 + (call & 0xFFFFE000U) == 0x85C0A000U &&
4777 + nop == 0x01000000U)
4778 + {
4779 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4780 + regs->u_regs[UREG_G2] = addr + 4;
4781 + addr = (save & 0x003FFFFFU) << 10;
4782 + addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4783 + regs->pc = addr;
4784 + regs->npc = addr+4;
4785 + return 3;
4786 + }
4787 + }
4788 + } while (0);
4789 +
4790 + do { /* PaX: unpatched PLT emulation step 2 */
4791 + unsigned int save, call, nop;
4792 +
4793 + err = get_user(save, (unsigned int *)(regs->pc-4));
4794 + err |= get_user(call, (unsigned int *)regs->pc);
4795 + err |= get_user(nop, (unsigned int *)(regs->pc+4));
4796 + if (err)
4797 + break;
4798 +
4799 + if (save == 0x9DE3BFA8U &&
4800 + (call & 0xC0000000U) == 0x40000000U &&
4801 + nop == 0x01000000U)
4802 + {
4803 + unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
4804 +
4805 + regs->u_regs[UREG_RETPC] = regs->pc;
4806 + regs->pc = dl_resolve;
4807 + regs->npc = dl_resolve+4;
4808 + return 3;
4809 + }
4810 + } while (0);
4811 +#endif
4812 +
4813 + return 1;
4814 +}
4815 +
4816 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4817 +{
4818 + unsigned long i;
4819 +
4820 + printk(KERN_ERR "PAX: bytes at PC: ");
4821 + for (i = 0; i < 8; i++) {
4822 + unsigned int c;
4823 + if (get_user(c, (unsigned int *)pc+i))
4824 + printk(KERN_CONT "???????? ");
4825 + else
4826 + printk(KERN_CONT "%08x ", c);
4827 + }
4828 + printk("\n");
4829 +}
4830 +#endif
4831 +
4832 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
4833 int text_fault)
4834 {
4835 @@ -280,6 +545,24 @@ good_area:
4836 if(!(vma->vm_flags & VM_WRITE))
4837 goto bad_area;
4838 } else {
4839 +
4840 +#ifdef CONFIG_PAX_PAGEEXEC
4841 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
4842 + up_read(&mm->mmap_sem);
4843 + switch (pax_handle_fetch_fault(regs)) {
4844 +
4845 +#ifdef CONFIG_PAX_EMUPLT
4846 + case 2:
4847 + case 3:
4848 + return;
4849 +#endif
4850 +
4851 + }
4852 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
4853 + do_group_exit(SIGKILL);
4854 + }
4855 +#endif
4856 +
4857 /* Allow reads even for write-only mappings */
4858 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
4859 goto bad_area;
4860 diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
4861 index 504c062..6fcb9c6 100644
4862 --- a/arch/sparc/mm/fault_64.c
4863 +++ b/arch/sparc/mm/fault_64.c
4864 @@ -21,6 +21,9 @@
4865 #include <linux/kprobes.h>
4866 #include <linux/kdebug.h>
4867 #include <linux/percpu.h>
4868 +#include <linux/slab.h>
4869 +#include <linux/pagemap.h>
4870 +#include <linux/compiler.h>
4871
4872 #include <asm/page.h>
4873 #include <asm/pgtable.h>
4874 @@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
4875 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
4876 regs->tpc);
4877 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
4878 - printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
4879 + printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
4880 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
4881 dump_stack();
4882 unhandled_fault(regs->tpc, current, regs);
4883 @@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
4884 show_regs(regs);
4885 }
4886
4887 +#ifdef CONFIG_PAX_PAGEEXEC
4888 +#ifdef CONFIG_PAX_DLRESOLVE
4889 +static void pax_emuplt_close(struct vm_area_struct *vma)
4890 +{
4891 + vma->vm_mm->call_dl_resolve = 0UL;
4892 +}
4893 +
4894 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4895 +{
4896 + unsigned int *kaddr;
4897 +
4898 + vmf->page = alloc_page(GFP_HIGHUSER);
4899 + if (!vmf->page)
4900 + return VM_FAULT_OOM;
4901 +
4902 + kaddr = kmap(vmf->page);
4903 + memset(kaddr, 0, PAGE_SIZE);
4904 + kaddr[0] = 0x9DE3BFA8U; /* save */
4905 + flush_dcache_page(vmf->page);
4906 + kunmap(vmf->page);
4907 + return VM_FAULT_MAJOR;
4908 +}
4909 +
4910 +static const struct vm_operations_struct pax_vm_ops = {
4911 + .close = pax_emuplt_close,
4912 + .fault = pax_emuplt_fault
4913 +};
4914 +
4915 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4916 +{
4917 + int ret;
4918 +
4919 + INIT_LIST_HEAD(&vma->anon_vma_chain);
4920 + vma->vm_mm = current->mm;
4921 + vma->vm_start = addr;
4922 + vma->vm_end = addr + PAGE_SIZE;
4923 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4924 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4925 + vma->vm_ops = &pax_vm_ops;
4926 +
4927 + ret = insert_vm_struct(current->mm, vma);
4928 + if (ret)
4929 + return ret;
4930 +
4931 + ++current->mm->total_vm;
4932 + return 0;
4933 +}
4934 +#endif
4935 +
4936 +/*
4937 + * PaX: decide what to do with offenders (regs->tpc = fault address)
4938 + *
4939 + * returns 1 when task should be killed
4940 + * 2 when patched PLT trampoline was detected
4941 + * 3 when unpatched PLT trampoline was detected
4942 + */
4943 +static int pax_handle_fetch_fault(struct pt_regs *regs)
4944 +{
4945 +
4946 +#ifdef CONFIG_PAX_EMUPLT
4947 + int err;
4948 +
4949 + do { /* PaX: patched PLT emulation #1 */
4950 + unsigned int sethi1, sethi2, jmpl;
4951 +
4952 + err = get_user(sethi1, (unsigned int *)regs->tpc);
4953 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
4954 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
4955 +
4956 + if (err)
4957 + break;
4958 +
4959 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4960 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
4961 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
4962 + {
4963 + unsigned long addr;
4964 +
4965 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4966 + addr = regs->u_regs[UREG_G1];
4967 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4968 +
4969 + if (test_thread_flag(TIF_32BIT))
4970 + addr &= 0xFFFFFFFFUL;
4971 +
4972 + regs->tpc = addr;
4973 + regs->tnpc = addr+4;
4974 + return 2;
4975 + }
4976 + } while (0);
4977 +
4978 + { /* PaX: patched PLT emulation #2 */
4979 + unsigned int ba;
4980 +
4981 + err = get_user(ba, (unsigned int *)regs->tpc);
4982 +
4983 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4984 + unsigned long addr;
4985 +
4986 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4987 +
4988 + if (test_thread_flag(TIF_32BIT))
4989 + addr &= 0xFFFFFFFFUL;
4990 +
4991 + regs->tpc = addr;
4992 + regs->tnpc = addr+4;
4993 + return 2;
4994 + }
4995 + }
4996 +
4997 + do { /* PaX: patched PLT emulation #3 */
4998 + unsigned int sethi, jmpl, nop;
4999 +
5000 + err = get_user(sethi, (unsigned int *)regs->tpc);
5001 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
5002 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5003 +
5004 + if (err)
5005 + break;
5006 +
5007 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5008 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
5009 + nop == 0x01000000U)
5010 + {
5011 + unsigned long addr;
5012 +
5013 + addr = (sethi & 0x003FFFFFU) << 10;
5014 + regs->u_regs[UREG_G1] = addr;
5015 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5016 +
5017 + if (test_thread_flag(TIF_32BIT))
5018 + addr &= 0xFFFFFFFFUL;
5019 +
5020 + regs->tpc = addr;
5021 + regs->tnpc = addr+4;
5022 + return 2;
5023 + }
5024 + } while (0);
5025 +
5026 + do { /* PaX: patched PLT emulation #4 */
5027 + unsigned int sethi, mov1, call, mov2;
5028 +
5029 + err = get_user(sethi, (unsigned int *)regs->tpc);
5030 + err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
5031 + err |= get_user(call, (unsigned int *)(regs->tpc+8));
5032 + err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
5033 +
5034 + if (err)
5035 + break;
5036 +
5037 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5038 + mov1 == 0x8210000FU &&
5039 + (call & 0xC0000000U) == 0x40000000U &&
5040 + mov2 == 0x9E100001U)
5041 + {
5042 + unsigned long addr;
5043 +
5044 + regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
5045 + addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5046 +
5047 + if (test_thread_flag(TIF_32BIT))
5048 + addr &= 0xFFFFFFFFUL;
5049 +
5050 + regs->tpc = addr;
5051 + regs->tnpc = addr+4;
5052 + return 2;
5053 + }
5054 + } while (0);
5055 +
5056 + do { /* PaX: patched PLT emulation #5 */
5057 + unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
5058 +
5059 + err = get_user(sethi, (unsigned int *)regs->tpc);
5060 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5061 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5062 + err |= get_user(or1, (unsigned int *)(regs->tpc+12));
5063 + err |= get_user(or2, (unsigned int *)(regs->tpc+16));
5064 + err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
5065 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
5066 + err |= get_user(nop, (unsigned int *)(regs->tpc+28));
5067 +
5068 + if (err)
5069 + break;
5070 +
5071 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5072 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
5073 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5074 + (or1 & 0xFFFFE000U) == 0x82106000U &&
5075 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
5076 + sllx == 0x83287020U &&
5077 + jmpl == 0x81C04005U &&
5078 + nop == 0x01000000U)
5079 + {
5080 + unsigned long addr;
5081 +
5082 + regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5083 + regs->u_regs[UREG_G1] <<= 32;
5084 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5085 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5086 + regs->tpc = addr;
5087 + regs->tnpc = addr+4;
5088 + return 2;
5089 + }
5090 + } while (0);
5091 +
5092 + do { /* PaX: patched PLT emulation #6 */
5093 + unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
5094 +
5095 + err = get_user(sethi, (unsigned int *)regs->tpc);
5096 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5097 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5098 + err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
5099 + err |= get_user(or, (unsigned int *)(regs->tpc+16));
5100 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
5101 + err |= get_user(nop, (unsigned int *)(regs->tpc+24));
5102 +
5103 + if (err)
5104 + break;
5105 +
5106 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5107 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
5108 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5109 + sllx == 0x83287020U &&
5110 + (or & 0xFFFFE000U) == 0x8A116000U &&
5111 + jmpl == 0x81C04005U &&
5112 + nop == 0x01000000U)
5113 + {
5114 + unsigned long addr;
5115 +
5116 + regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
5117 + regs->u_regs[UREG_G1] <<= 32;
5118 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
5119 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5120 + regs->tpc = addr;
5121 + regs->tnpc = addr+4;
5122 + return 2;
5123 + }
5124 + } while (0);
5125 +
5126 + do { /* PaX: unpatched PLT emulation step 1 */
5127 + unsigned int sethi, ba, nop;
5128 +
5129 + err = get_user(sethi, (unsigned int *)regs->tpc);
5130 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5131 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5132 +
5133 + if (err)
5134 + break;
5135 +
5136 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5137 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
5138 + nop == 0x01000000U)
5139 + {
5140 + unsigned long addr;
5141 + unsigned int save, call;
5142 + unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
5143 +
5144 + if ((ba & 0xFFC00000U) == 0x30800000U)
5145 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5146 + else
5147 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5148 +
5149 + if (test_thread_flag(TIF_32BIT))
5150 + addr &= 0xFFFFFFFFUL;
5151 +
5152 + err = get_user(save, (unsigned int *)addr);
5153 + err |= get_user(call, (unsigned int *)(addr+4));
5154 + err |= get_user(nop, (unsigned int *)(addr+8));
5155 + if (err)
5156 + break;
5157 +
5158 +#ifdef CONFIG_PAX_DLRESOLVE
5159 + if (save == 0x9DE3BFA8U &&
5160 + (call & 0xC0000000U) == 0x40000000U &&
5161 + nop == 0x01000000U)
5162 + {
5163 + struct vm_area_struct *vma;
5164 + unsigned long call_dl_resolve;
5165 +
5166 + down_read(&current->mm->mmap_sem);
5167 + call_dl_resolve = current->mm->call_dl_resolve;
5168 + up_read(&current->mm->mmap_sem);
5169 + if (likely(call_dl_resolve))
5170 + goto emulate;
5171 +
5172 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5173 +
5174 + down_write(&current->mm->mmap_sem);
5175 + if (current->mm->call_dl_resolve) {
5176 + call_dl_resolve = current->mm->call_dl_resolve;
5177 + up_write(&current->mm->mmap_sem);
5178 + if (vma)
5179 + kmem_cache_free(vm_area_cachep, vma);
5180 + goto emulate;
5181 + }
5182 +
5183 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5184 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5185 + up_write(&current->mm->mmap_sem);
5186 + if (vma)
5187 + kmem_cache_free(vm_area_cachep, vma);
5188 + return 1;
5189 + }
5190 +
5191 + if (pax_insert_vma(vma, call_dl_resolve)) {
5192 + up_write(&current->mm->mmap_sem);
5193 + kmem_cache_free(vm_area_cachep, vma);
5194 + return 1;
5195 + }
5196 +
5197 + current->mm->call_dl_resolve = call_dl_resolve;
5198 + up_write(&current->mm->mmap_sem);
5199 +
5200 +emulate:
5201 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5202 + regs->tpc = call_dl_resolve;
5203 + regs->tnpc = addr+4;
5204 + return 3;
5205 + }
5206 +#endif
5207 +
5208 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
5209 + if ((save & 0xFFC00000U) == 0x05000000U &&
5210 + (call & 0xFFFFE000U) == 0x85C0A000U &&
5211 + nop == 0x01000000U)
5212 + {
5213 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5214 + regs->u_regs[UREG_G2] = addr + 4;
5215 + addr = (save & 0x003FFFFFU) << 10;
5216 + addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5217 +
5218 + if (test_thread_flag(TIF_32BIT))
5219 + addr &= 0xFFFFFFFFUL;
5220 +
5221 + regs->tpc = addr;
5222 + regs->tnpc = addr+4;
5223 + return 3;
5224 + }
5225 +
5226 + /* PaX: 64-bit PLT stub */
5227 + err = get_user(sethi1, (unsigned int *)addr);
5228 + err |= get_user(sethi2, (unsigned int *)(addr+4));
5229 + err |= get_user(or1, (unsigned int *)(addr+8));
5230 + err |= get_user(or2, (unsigned int *)(addr+12));
5231 + err |= get_user(sllx, (unsigned int *)(addr+16));
5232 + err |= get_user(add, (unsigned int *)(addr+20));
5233 + err |= get_user(jmpl, (unsigned int *)(addr+24));
5234 + err |= get_user(nop, (unsigned int *)(addr+28));
5235 + if (err)
5236 + break;
5237 +
5238 + if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
5239 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5240 + (or1 & 0xFFFFE000U) == 0x88112000U &&
5241 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
5242 + sllx == 0x89293020U &&
5243 + add == 0x8A010005U &&
5244 + jmpl == 0x89C14000U &&
5245 + nop == 0x01000000U)
5246 + {
5247 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5248 + regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5249 + regs->u_regs[UREG_G4] <<= 32;
5250 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5251 + regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
5252 + regs->u_regs[UREG_G4] = addr + 24;
5253 + addr = regs->u_regs[UREG_G5];
5254 + regs->tpc = addr;
5255 + regs->tnpc = addr+4;
5256 + return 3;
5257 + }
5258 + }
5259 + } while (0);
5260 +
5261 +#ifdef CONFIG_PAX_DLRESOLVE
5262 + do { /* PaX: unpatched PLT emulation step 2 */
5263 + unsigned int save, call, nop;
5264 +
5265 + err = get_user(save, (unsigned int *)(regs->tpc-4));
5266 + err |= get_user(call, (unsigned int *)regs->tpc);
5267 + err |= get_user(nop, (unsigned int *)(regs->tpc+4));
5268 + if (err)
5269 + break;
5270 +
5271 + if (save == 0x9DE3BFA8U &&
5272 + (call & 0xC0000000U) == 0x40000000U &&
5273 + nop == 0x01000000U)
5274 + {
5275 + unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5276 +
5277 + if (test_thread_flag(TIF_32BIT))
5278 + dl_resolve &= 0xFFFFFFFFUL;
5279 +
5280 + regs->u_regs[UREG_RETPC] = regs->tpc;
5281 + regs->tpc = dl_resolve;
5282 + regs->tnpc = dl_resolve+4;
5283 + return 3;
5284 + }
5285 + } while (0);
5286 +#endif
5287 +
5288 + do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
5289 + unsigned int sethi, ba, nop;
5290 +
5291 + err = get_user(sethi, (unsigned int *)regs->tpc);
5292 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5293 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5294 +
5295 + if (err)
5296 + break;
5297 +
5298 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5299 + (ba & 0xFFF00000U) == 0x30600000U &&
5300 + nop == 0x01000000U)
5301 + {
5302 + unsigned long addr;
5303 +
5304 + addr = (sethi & 0x003FFFFFU) << 10;
5305 + regs->u_regs[UREG_G1] = addr;
5306 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5307 +
5308 + if (test_thread_flag(TIF_32BIT))
5309 + addr &= 0xFFFFFFFFUL;
5310 +
5311 + regs->tpc = addr;
5312 + regs->tnpc = addr+4;
5313 + return 2;
5314 + }
5315 + } while (0);
5316 +
5317 +#endif
5318 +
5319 + return 1;
5320 +}
5321 +
5322 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5323 +{
5324 + unsigned long i;
5325 +
5326 + printk(KERN_ERR "PAX: bytes at PC: ");
5327 + for (i = 0; i < 8; i++) {
5328 + unsigned int c;
5329 + if (get_user(c, (unsigned int *)pc+i))
5330 + printk(KERN_CONT "???????? ");
5331 + else
5332 + printk(KERN_CONT "%08x ", c);
5333 + }
5334 + printk("\n");
5335 +}
5336 +#endif
5337 +
5338 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
5339 {
5340 struct mm_struct *mm = current->mm;
5341 @@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
5342 if (!vma)
5343 goto bad_area;
5344
5345 +#ifdef CONFIG_PAX_PAGEEXEC
5346 + /* PaX: detect ITLB misses on non-exec pages */
5347 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
5348 + !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
5349 + {
5350 + if (address != regs->tpc)
5351 + goto good_area;
5352 +
5353 + up_read(&mm->mmap_sem);
5354 + switch (pax_handle_fetch_fault(regs)) {
5355 +
5356 +#ifdef CONFIG_PAX_EMUPLT
5357 + case 2:
5358 + case 3:
5359 + return;
5360 +#endif
5361 +
5362 + }
5363 + pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
5364 + do_group_exit(SIGKILL);
5365 + }
5366 +#endif
5367 +
5368 /* Pure DTLB misses do not tell us whether the fault causing
5369 * load/store/atomic was a write or not, it only says that there
5370 * was no match. So in such a case we (carefully) read the
5371 diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
5372 index 07e1453..0a7d9e9 100644
5373 --- a/arch/sparc/mm/hugetlbpage.c
5374 +++ b/arch/sparc/mm/hugetlbpage.c
5375 @@ -67,7 +67,7 @@ full_search:
5376 }
5377 return -ENOMEM;
5378 }
5379 - if (likely(!vma || addr + len <= vma->vm_start)) {
5380 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5381 /*
5382 * Remember the place where we stopped the search:
5383 */
5384 @@ -106,7 +106,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5385 /* make sure it can fit in the remaining address space */
5386 if (likely(addr > len)) {
5387 vma = find_vma(mm, addr-len);
5388 - if (!vma || addr <= vma->vm_start) {
5389 + if (check_heap_stack_gap(vma, addr - len, len)) {
5390 /* remember the address as a hint for next time */
5391 return (mm->free_area_cache = addr-len);
5392 }
5393 @@ -115,16 +115,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5394 if (unlikely(mm->mmap_base < len))
5395 goto bottomup;
5396
5397 - addr = (mm->mmap_base-len) & HPAGE_MASK;
5398 + addr = mm->mmap_base - len;
5399
5400 do {
5401 + addr &= HPAGE_MASK;
5402 /*
5403 * Lookup failure means no vma is above this address,
5404 * else if new region fits below vma->vm_start,
5405 * return with success:
5406 */
5407 vma = find_vma(mm, addr);
5408 - if (likely(!vma || addr+len <= vma->vm_start)) {
5409 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5410 /* remember the address as a hint for next time */
5411 return (mm->free_area_cache = addr);
5412 }
5413 @@ -134,8 +135,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5414 mm->cached_hole_size = vma->vm_start - addr;
5415
5416 /* try just below the current vma->vm_start */
5417 - addr = (vma->vm_start-len) & HPAGE_MASK;
5418 - } while (likely(len < vma->vm_start));
5419 + addr = skip_heap_stack_gap(vma, len);
5420 + } while (!IS_ERR_VALUE(addr));
5421
5422 bottomup:
5423 /*
5424 @@ -181,8 +182,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
5425 if (addr) {
5426 addr = ALIGN(addr, HPAGE_SIZE);
5427 vma = find_vma(mm, addr);
5428 - if (task_size - len >= addr &&
5429 - (!vma || addr + len <= vma->vm_start))
5430 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5431 return addr;
5432 }
5433 if (mm->get_unmapped_area == arch_get_unmapped_area)
5434 diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
5435 index 7b00de6..78239f4 100644
5436 --- a/arch/sparc/mm/init_32.c
5437 +++ b/arch/sparc/mm/init_32.c
5438 @@ -316,6 +316,9 @@ extern void device_scan(void);
5439 pgprot_t PAGE_SHARED __read_mostly;
5440 EXPORT_SYMBOL(PAGE_SHARED);
5441
5442 +pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
5443 +EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
5444 +
5445 void __init paging_init(void)
5446 {
5447 switch(sparc_cpu_model) {
5448 @@ -344,17 +347,17 @@ void __init paging_init(void)
5449
5450 /* Initialize the protection map with non-constant, MMU dependent values. */
5451 protection_map[0] = PAGE_NONE;
5452 - protection_map[1] = PAGE_READONLY;
5453 - protection_map[2] = PAGE_COPY;
5454 - protection_map[3] = PAGE_COPY;
5455 + protection_map[1] = PAGE_READONLY_NOEXEC;
5456 + protection_map[2] = PAGE_COPY_NOEXEC;
5457 + protection_map[3] = PAGE_COPY_NOEXEC;
5458 protection_map[4] = PAGE_READONLY;
5459 protection_map[5] = PAGE_READONLY;
5460 protection_map[6] = PAGE_COPY;
5461 protection_map[7] = PAGE_COPY;
5462 protection_map[8] = PAGE_NONE;
5463 - protection_map[9] = PAGE_READONLY;
5464 - protection_map[10] = PAGE_SHARED;
5465 - protection_map[11] = PAGE_SHARED;
5466 + protection_map[9] = PAGE_READONLY_NOEXEC;
5467 + protection_map[10] = PAGE_SHARED_NOEXEC;
5468 + protection_map[11] = PAGE_SHARED_NOEXEC;
5469 protection_map[12] = PAGE_READONLY;
5470 protection_map[13] = PAGE_READONLY;
5471 protection_map[14] = PAGE_SHARED;
5472 diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
5473 index cbef74e..c38fead 100644
5474 --- a/arch/sparc/mm/srmmu.c
5475 +++ b/arch/sparc/mm/srmmu.c
5476 @@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
5477 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
5478 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
5479 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
5480 +
5481 +#ifdef CONFIG_PAX_PAGEEXEC
5482 + PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
5483 + BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
5484 + BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
5485 +#endif
5486 +
5487 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
5488 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
5489
5490 diff --git a/arch/um/Makefile b/arch/um/Makefile
5491 index 7730af6..cce5b19 100644
5492 --- a/arch/um/Makefile
5493 +++ b/arch/um/Makefile
5494 @@ -61,6 +61,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
5495 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
5496 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
5497
5498 +ifdef CONSTIFY_PLUGIN
5499 +USER_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5500 +endif
5501 +
5502 #This will adjust *FLAGS accordingly to the platform.
5503 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
5504
5505 diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
5506 index 6c03acd..a5e0215 100644
5507 --- a/arch/um/include/asm/kmap_types.h
5508 +++ b/arch/um/include/asm/kmap_types.h
5509 @@ -23,6 +23,7 @@ enum km_type {
5510 KM_IRQ1,
5511 KM_SOFTIRQ0,
5512 KM_SOFTIRQ1,
5513 + KM_CLEARPAGE,
5514 KM_TYPE_NR
5515 };
5516
5517 diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
5518 index 7cfc3ce..cbd1a58 100644
5519 --- a/arch/um/include/asm/page.h
5520 +++ b/arch/um/include/asm/page.h
5521 @@ -14,6 +14,9 @@
5522 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
5523 #define PAGE_MASK (~(PAGE_SIZE-1))
5524
5525 +#define ktla_ktva(addr) (addr)
5526 +#define ktva_ktla(addr) (addr)
5527 +
5528 #ifndef __ASSEMBLY__
5529
5530 struct page;
5531 diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
5532 index c533835..84db18e 100644
5533 --- a/arch/um/kernel/process.c
5534 +++ b/arch/um/kernel/process.c
5535 @@ -406,22 +406,6 @@ int singlestepping(void * t)
5536 return 2;
5537 }
5538
5539 -/*
5540 - * Only x86 and x86_64 have an arch_align_stack().
5541 - * All other arches have "#define arch_align_stack(x) (x)"
5542 - * in their asm/system.h
5543 - * As this is included in UML from asm-um/system-generic.h,
5544 - * we can use it to behave as the subarch does.
5545 - */
5546 -#ifndef arch_align_stack
5547 -unsigned long arch_align_stack(unsigned long sp)
5548 -{
5549 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5550 - sp -= get_random_int() % 8192;
5551 - return sp & ~0xf;
5552 -}
5553 -#endif
5554 -
5555 unsigned long get_wchan(struct task_struct *p)
5556 {
5557 unsigned long stack_page, sp, ip;
5558 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
5559 index efb4294..61bc18c 100644
5560 --- a/arch/x86/Kconfig
5561 +++ b/arch/x86/Kconfig
5562 @@ -235,7 +235,7 @@ config X86_HT
5563
5564 config X86_32_LAZY_GS
5565 def_bool y
5566 - depends on X86_32 && !CC_STACKPROTECTOR
5567 + depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
5568
5569 config ARCH_HWEIGHT_CFLAGS
5570 string
5571 @@ -1022,7 +1022,7 @@ choice
5572
5573 config NOHIGHMEM
5574 bool "off"
5575 - depends on !X86_NUMAQ
5576 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
5577 ---help---
5578 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
5579 However, the address space of 32-bit x86 processors is only 4
5580 @@ -1059,7 +1059,7 @@ config NOHIGHMEM
5581
5582 config HIGHMEM4G
5583 bool "4GB"
5584 - depends on !X86_NUMAQ
5585 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
5586 ---help---
5587 Select this if you have a 32-bit processor and between 1 and 4
5588 gigabytes of physical RAM.
5589 @@ -1113,7 +1113,7 @@ config PAGE_OFFSET
5590 hex
5591 default 0xB0000000 if VMSPLIT_3G_OPT
5592 default 0x80000000 if VMSPLIT_2G
5593 - default 0x78000000 if VMSPLIT_2G_OPT
5594 + default 0x70000000 if VMSPLIT_2G_OPT
5595 default 0x40000000 if VMSPLIT_1G
5596 default 0xC0000000
5597 depends on X86_32
5598 @@ -1496,6 +1496,7 @@ config SECCOMP
5599
5600 config CC_STACKPROTECTOR
5601 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
5602 + depends on X86_64 || !PAX_MEMORY_UDEREF
5603 ---help---
5604 This option turns on the -fstack-protector GCC feature. This
5605 feature puts, at the beginning of functions, a canary value on
5606 @@ -1553,6 +1554,7 @@ config KEXEC_JUMP
5607 config PHYSICAL_START
5608 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
5609 default "0x1000000"
5610 + range 0x400000 0x40000000
5611 ---help---
5612 This gives the physical address where the kernel is loaded.
5613
5614 @@ -1616,6 +1618,7 @@ config X86_NEED_RELOCS
5615 config PHYSICAL_ALIGN
5616 hex "Alignment value to which kernel should be aligned" if X86_32
5617 default "0x1000000"
5618 + range 0x400000 0x1000000 if PAX_KERNEXEC
5619 range 0x2000 0x1000000
5620 ---help---
5621 This value puts the alignment restrictions on physical address
5622 @@ -1647,9 +1650,10 @@ config HOTPLUG_CPU
5623 Say N if you want to disable CPU hotplug.
5624
5625 config COMPAT_VDSO
5626 - def_bool y
5627 + def_bool n
5628 prompt "Compat VDSO support"
5629 depends on X86_32 || IA32_EMULATION
5630 + depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
5631 ---help---
5632 Map the 32-bit VDSO to the predictable old-style address too.
5633
5634 diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
5635 index e3ca7e0..b30b28a 100644
5636 --- a/arch/x86/Kconfig.cpu
5637 +++ b/arch/x86/Kconfig.cpu
5638 @@ -341,7 +341,7 @@ config X86_PPRO_FENCE
5639
5640 config X86_F00F_BUG
5641 def_bool y
5642 - depends on M586MMX || M586TSC || M586 || M486 || M386
5643 + depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
5644
5645 config X86_INVD_BUG
5646 def_bool y
5647 @@ -365,7 +365,7 @@ config X86_POPAD_OK
5648
5649 config X86_ALIGNMENT_16
5650 def_bool y
5651 - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
5652 + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
5653
5654 config X86_INTEL_USERCOPY
5655 def_bool y
5656 @@ -411,7 +411,7 @@ config X86_CMPXCHG64
5657 # generates cmov.
5658 config X86_CMOV
5659 def_bool y
5660 - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
5661 + depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
5662
5663 config X86_MINIMUM_CPU_FAMILY
5664 int
5665 diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
5666 index bf56e17..05f9891 100644
5667 --- a/arch/x86/Kconfig.debug
5668 +++ b/arch/x86/Kconfig.debug
5669 @@ -81,7 +81,7 @@ config X86_PTDUMP
5670 config DEBUG_RODATA
5671 bool "Write protect kernel read-only data structures"
5672 default y
5673 - depends on DEBUG_KERNEL
5674 + depends on DEBUG_KERNEL && BROKEN
5675 ---help---
5676 Mark the kernel read-only data as write-protected in the pagetables,
5677 in order to catch accidental (and incorrect) writes to such const
5678 @@ -99,7 +99,7 @@ config DEBUG_RODATA_TEST
5679
5680 config DEBUG_SET_MODULE_RONX
5681 bool "Set loadable kernel module data as NX and text as RO"
5682 - depends on MODULES
5683 + depends on MODULES && BROKEN
5684 ---help---
5685 This option helps catch unintended modifications to loadable
5686 kernel module's text and read-only data. It also prevents execution
5687 diff --git a/arch/x86/Makefile b/arch/x86/Makefile
5688 index b02e509..2631e48 100644
5689 --- a/arch/x86/Makefile
5690 +++ b/arch/x86/Makefile
5691 @@ -46,6 +46,7 @@ else
5692 UTS_MACHINE := x86_64
5693 CHECKFLAGS += -D__x86_64__ -m64
5694
5695 + biarch := $(call cc-option,-m64)
5696 KBUILD_AFLAGS += -m64
5697 KBUILD_CFLAGS += -m64
5698
5699 @@ -195,3 +196,12 @@ define archhelp
5700 echo ' FDARGS="..." arguments for the booted kernel'
5701 echo ' FDINITRD=file initrd for the booted kernel'
5702 endef
5703 +
5704 +define OLD_LD
5705 +
5706 +*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
5707 +*** Please upgrade your binutils to 2.18 or newer
5708 +endef
5709 +
5710 +archprepare:
5711 + $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
5712 diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
5713 index 95365a8..52f857b 100644
5714 --- a/arch/x86/boot/Makefile
5715 +++ b/arch/x86/boot/Makefile
5716 @@ -63,6 +63,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
5717 $(call cc-option, -fno-stack-protector) \
5718 $(call cc-option, -mpreferred-stack-boundary=2)
5719 KBUILD_CFLAGS += $(call cc-option, -m32)
5720 +ifdef CONSTIFY_PLUGIN
5721 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5722 +endif
5723 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5724 GCOV_PROFILE := n
5725
5726 diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
5727 index 878e4b9..20537ab 100644
5728 --- a/arch/x86/boot/bitops.h
5729 +++ b/arch/x86/boot/bitops.h
5730 @@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
5731 u8 v;
5732 const u32 *p = (const u32 *)addr;
5733
5734 - asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5735 + asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5736 return v;
5737 }
5738
5739 @@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
5740
5741 static inline void set_bit(int nr, void *addr)
5742 {
5743 - asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5744 + asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5745 }
5746
5747 #endif /* BOOT_BITOPS_H */
5748 diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
5749 index c7093bd..d4247ffe0 100644
5750 --- a/arch/x86/boot/boot.h
5751 +++ b/arch/x86/boot/boot.h
5752 @@ -85,7 +85,7 @@ static inline void io_delay(void)
5753 static inline u16 ds(void)
5754 {
5755 u16 seg;
5756 - asm("movw %%ds,%0" : "=rm" (seg));
5757 + asm volatile("movw %%ds,%0" : "=rm" (seg));
5758 return seg;
5759 }
5760
5761 @@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr)
5762 static inline int memcmp(const void *s1, const void *s2, size_t len)
5763 {
5764 u8 diff;
5765 - asm("repe; cmpsb; setnz %0"
5766 + asm volatile("repe; cmpsb; setnz %0"
5767 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
5768 return diff;
5769 }
5770 diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
5771 index 09664ef..edc5d03 100644
5772 --- a/arch/x86/boot/compressed/Makefile
5773 +++ b/arch/x86/boot/compressed/Makefile
5774 @@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
5775 KBUILD_CFLAGS += $(cflags-y)
5776 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
5777 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
5778 +ifdef CONSTIFY_PLUGIN
5779 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5780 +endif
5781
5782 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5783 GCOV_PROFILE := n
5784 diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
5785 index 67a655a..b924059 100644
5786 --- a/arch/x86/boot/compressed/head_32.S
5787 +++ b/arch/x86/boot/compressed/head_32.S
5788 @@ -76,7 +76,7 @@ ENTRY(startup_32)
5789 notl %eax
5790 andl %eax, %ebx
5791 #else
5792 - movl $LOAD_PHYSICAL_ADDR, %ebx
5793 + movl $____LOAD_PHYSICAL_ADDR, %ebx
5794 #endif
5795
5796 /* Target address to relocate to for decompression */
5797 @@ -162,7 +162,7 @@ relocated:
5798 * and where it was actually loaded.
5799 */
5800 movl %ebp, %ebx
5801 - subl $LOAD_PHYSICAL_ADDR, %ebx
5802 + subl $____LOAD_PHYSICAL_ADDR, %ebx
5803 jz 2f /* Nothing to be done if loaded at compiled addr. */
5804 /*
5805 * Process relocations.
5806 @@ -170,8 +170,7 @@ relocated:
5807
5808 1: subl $4, %edi
5809 movl (%edi), %ecx
5810 - testl %ecx, %ecx
5811 - jz 2f
5812 + jecxz 2f
5813 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
5814 jmp 1b
5815 2:
5816 diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
5817 index 35af09d..99c9676 100644
5818 --- a/arch/x86/boot/compressed/head_64.S
5819 +++ b/arch/x86/boot/compressed/head_64.S
5820 @@ -91,7 +91,7 @@ ENTRY(startup_32)
5821 notl %eax
5822 andl %eax, %ebx
5823 #else
5824 - movl $LOAD_PHYSICAL_ADDR, %ebx
5825 + movl $____LOAD_PHYSICAL_ADDR, %ebx
5826 #endif
5827
5828 /* Target address to relocate to for decompression */
5829 @@ -233,7 +233,7 @@ ENTRY(startup_64)
5830 notq %rax
5831 andq %rax, %rbp
5832 #else
5833 - movq $LOAD_PHYSICAL_ADDR, %rbp
5834 + movq $____LOAD_PHYSICAL_ADDR, %rbp
5835 #endif
5836
5837 /* Target address to relocate to for decompression */
5838 diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
5839 index 3a19d04..7c1d55a 100644
5840 --- a/arch/x86/boot/compressed/misc.c
5841 +++ b/arch/x86/boot/compressed/misc.c
5842 @@ -310,7 +310,7 @@ static void parse_elf(void *output)
5843 case PT_LOAD:
5844 #ifdef CONFIG_RELOCATABLE
5845 dest = output;
5846 - dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
5847 + dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
5848 #else
5849 dest = (void *)(phdr->p_paddr);
5850 #endif
5851 @@ -363,7 +363,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
5852 error("Destination address too large");
5853 #endif
5854 #ifndef CONFIG_RELOCATABLE
5855 - if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
5856 + if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
5857 error("Wrong destination address");
5858 #endif
5859
5860 diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
5861 index 89bbf4e..869908e 100644
5862 --- a/arch/x86/boot/compressed/relocs.c
5863 +++ b/arch/x86/boot/compressed/relocs.c
5864 @@ -13,8 +13,11 @@
5865
5866 static void die(char *fmt, ...);
5867
5868 +#include "../../../../include/generated/autoconf.h"
5869 +
5870 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
5871 static Elf32_Ehdr ehdr;
5872 +static Elf32_Phdr *phdr;
5873 static unsigned long reloc_count, reloc_idx;
5874 static unsigned long *relocs;
5875
5876 @@ -270,9 +273,39 @@ static void read_ehdr(FILE *fp)
5877 }
5878 }
5879
5880 +static void read_phdrs(FILE *fp)
5881 +{
5882 + unsigned int i;
5883 +
5884 + phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
5885 + if (!phdr) {
5886 + die("Unable to allocate %d program headers\n",
5887 + ehdr.e_phnum);
5888 + }
5889 + if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
5890 + die("Seek to %d failed: %s\n",
5891 + ehdr.e_phoff, strerror(errno));
5892 + }
5893 + if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
5894 + die("Cannot read ELF program headers: %s\n",
5895 + strerror(errno));
5896 + }
5897 + for(i = 0; i < ehdr.e_phnum; i++) {
5898 + phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
5899 + phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
5900 + phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
5901 + phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
5902 + phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
5903 + phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
5904 + phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
5905 + phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
5906 + }
5907 +
5908 +}
5909 +
5910 static void read_shdrs(FILE *fp)
5911 {
5912 - int i;
5913 + unsigned int i;
5914 Elf32_Shdr shdr;
5915
5916 secs = calloc(ehdr.e_shnum, sizeof(struct section));
5917 @@ -307,7 +340,7 @@ static void read_shdrs(FILE *fp)
5918
5919 static void read_strtabs(FILE *fp)
5920 {
5921 - int i;
5922 + unsigned int i;
5923 for (i = 0; i < ehdr.e_shnum; i++) {
5924 struct section *sec = &secs[i];
5925 if (sec->shdr.sh_type != SHT_STRTAB) {
5926 @@ -332,7 +365,7 @@ static void read_strtabs(FILE *fp)
5927
5928 static void read_symtabs(FILE *fp)
5929 {
5930 - int i,j;
5931 + unsigned int i,j;
5932 for (i = 0; i < ehdr.e_shnum; i++) {
5933 struct section *sec = &secs[i];
5934 if (sec->shdr.sh_type != SHT_SYMTAB) {
5935 @@ -365,7 +398,9 @@ static void read_symtabs(FILE *fp)
5936
5937 static void read_relocs(FILE *fp)
5938 {
5939 - int i,j;
5940 + unsigned int i,j;
5941 + uint32_t base;
5942 +
5943 for (i = 0; i < ehdr.e_shnum; i++) {
5944 struct section *sec = &secs[i];
5945 if (sec->shdr.sh_type != SHT_REL) {
5946 @@ -385,9 +420,18 @@ static void read_relocs(FILE *fp)
5947 die("Cannot read symbol table: %s\n",
5948 strerror(errno));
5949 }
5950 + base = 0;
5951 + for (j = 0; j < ehdr.e_phnum; j++) {
5952 + if (phdr[j].p_type != PT_LOAD )
5953 + continue;
5954 + if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
5955 + continue;
5956 + base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
5957 + break;
5958 + }
5959 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
5960 Elf32_Rel *rel = &sec->reltab[j];
5961 - rel->r_offset = elf32_to_cpu(rel->r_offset);
5962 + rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
5963 rel->r_info = elf32_to_cpu(rel->r_info);
5964 }
5965 }
5966 @@ -396,14 +440,14 @@ static void read_relocs(FILE *fp)
5967
5968 static void print_absolute_symbols(void)
5969 {
5970 - int i;
5971 + unsigned int i;
5972 printf("Absolute symbols\n");
5973 printf(" Num: Value Size Type Bind Visibility Name\n");
5974 for (i = 0; i < ehdr.e_shnum; i++) {
5975 struct section *sec = &secs[i];
5976 char *sym_strtab;
5977 Elf32_Sym *sh_symtab;
5978 - int j;
5979 + unsigned int j;
5980
5981 if (sec->shdr.sh_type != SHT_SYMTAB) {
5982 continue;
5983 @@ -431,14 +475,14 @@ static void print_absolute_symbols(void)
5984
5985 static void print_absolute_relocs(void)
5986 {
5987 - int i, printed = 0;
5988 + unsigned int i, printed = 0;
5989
5990 for (i = 0; i < ehdr.e_shnum; i++) {
5991 struct section *sec = &secs[i];
5992 struct section *sec_applies, *sec_symtab;
5993 char *sym_strtab;
5994 Elf32_Sym *sh_symtab;
5995 - int j;
5996 + unsigned int j;
5997 if (sec->shdr.sh_type != SHT_REL) {
5998 continue;
5999 }
6000 @@ -499,13 +543,13 @@ static void print_absolute_relocs(void)
6001
6002 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
6003 {
6004 - int i;
6005 + unsigned int i;
6006 /* Walk through the relocations */
6007 for (i = 0; i < ehdr.e_shnum; i++) {
6008 char *sym_strtab;
6009 Elf32_Sym *sh_symtab;
6010 struct section *sec_applies, *sec_symtab;
6011 - int j;
6012 + unsigned int j;
6013 struct section *sec = &secs[i];
6014
6015 if (sec->shdr.sh_type != SHT_REL) {
6016 @@ -530,6 +574,22 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
6017 !is_rel_reloc(sym_name(sym_strtab, sym))) {
6018 continue;
6019 }
6020 + /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
6021 + if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
6022 + continue;
6023 +
6024 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
6025 + /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
6026 + if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
6027 + continue;
6028 + if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
6029 + continue;
6030 + if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
6031 + continue;
6032 + if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
6033 + continue;
6034 +#endif
6035 +
6036 switch (r_type) {
6037 case R_386_NONE:
6038 case R_386_PC32:
6039 @@ -571,7 +631,7 @@ static int cmp_relocs(const void *va, const void *vb)
6040
6041 static void emit_relocs(int as_text)
6042 {
6043 - int i;
6044 + unsigned int i;
6045 /* Count how many relocations I have and allocate space for them. */
6046 reloc_count = 0;
6047 walk_relocs(count_reloc);
6048 @@ -665,6 +725,7 @@ int main(int argc, char **argv)
6049 fname, strerror(errno));
6050 }
6051 read_ehdr(fp);
6052 + read_phdrs(fp);
6053 read_shdrs(fp);
6054 read_strtabs(fp);
6055 read_symtabs(fp);
6056 diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
6057 index 4d3ff03..e4972ff 100644
6058 --- a/arch/x86/boot/cpucheck.c
6059 +++ b/arch/x86/boot/cpucheck.c
6060 @@ -74,7 +74,7 @@ static int has_fpu(void)
6061 u16 fcw = -1, fsw = -1;
6062 u32 cr0;
6063
6064 - asm("movl %%cr0,%0" : "=r" (cr0));
6065 + asm volatile("movl %%cr0,%0" : "=r" (cr0));
6066 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
6067 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
6068 asm volatile("movl %0,%%cr0" : : "r" (cr0));
6069 @@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
6070 {
6071 u32 f0, f1;
6072
6073 - asm("pushfl ; "
6074 + asm volatile("pushfl ; "
6075 "pushfl ; "
6076 "popl %0 ; "
6077 "movl %0,%1 ; "
6078 @@ -115,7 +115,7 @@ static void get_flags(void)
6079 set_bit(X86_FEATURE_FPU, cpu.flags);
6080
6081 if (has_eflag(X86_EFLAGS_ID)) {
6082 - asm("cpuid"
6083 + asm volatile("cpuid"
6084 : "=a" (max_intel_level),
6085 "=b" (cpu_vendor[0]),
6086 "=d" (cpu_vendor[1]),
6087 @@ -124,7 +124,7 @@ static void get_flags(void)
6088
6089 if (max_intel_level >= 0x00000001 &&
6090 max_intel_level <= 0x0000ffff) {
6091 - asm("cpuid"
6092 + asm volatile("cpuid"
6093 : "=a" (tfms),
6094 "=c" (cpu.flags[4]),
6095 "=d" (cpu.flags[0])
6096 @@ -136,7 +136,7 @@ static void get_flags(void)
6097 cpu.model += ((tfms >> 16) & 0xf) << 4;
6098 }
6099
6100 - asm("cpuid"
6101 + asm volatile("cpuid"
6102 : "=a" (max_amd_level)
6103 : "a" (0x80000000)
6104 : "ebx", "ecx", "edx");
6105 @@ -144,7 +144,7 @@ static void get_flags(void)
6106 if (max_amd_level >= 0x80000001 &&
6107 max_amd_level <= 0x8000ffff) {
6108 u32 eax = 0x80000001;
6109 - asm("cpuid"
6110 + asm volatile("cpuid"
6111 : "+a" (eax),
6112 "=c" (cpu.flags[6]),
6113 "=d" (cpu.flags[1])
6114 @@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
6115 u32 ecx = MSR_K7_HWCR;
6116 u32 eax, edx;
6117
6118 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6119 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6120 eax &= ~(1 << 15);
6121 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6122 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6123
6124 get_flags(); /* Make sure it really did something */
6125 err = check_flags();
6126 @@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
6127 u32 ecx = MSR_VIA_FCR;
6128 u32 eax, edx;
6129
6130 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6131 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6132 eax |= (1<<1)|(1<<7);
6133 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6134 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6135
6136 set_bit(X86_FEATURE_CX8, cpu.flags);
6137 err = check_flags();
6138 @@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
6139 u32 eax, edx;
6140 u32 level = 1;
6141
6142 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6143 - asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6144 - asm("cpuid"
6145 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6146 + asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6147 + asm volatile("cpuid"
6148 : "+a" (level), "=d" (cpu.flags[0])
6149 : : "ecx", "ebx");
6150 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6151 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6152
6153 err = check_flags();
6154 }
6155 diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
6156 index bdb4d45..0476680 100644
6157 --- a/arch/x86/boot/header.S
6158 +++ b/arch/x86/boot/header.S
6159 @@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical pointer to
6160 # single linked list of
6161 # struct setup_data
6162
6163 -pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
6164 +pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
6165
6166 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
6167 #define VO_INIT_SIZE (VO__end - VO__text)
6168 diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
6169 index db75d07..8e6d0af 100644
6170 --- a/arch/x86/boot/memory.c
6171 +++ b/arch/x86/boot/memory.c
6172 @@ -19,7 +19,7 @@
6173
6174 static int detect_memory_e820(void)
6175 {
6176 - int count = 0;
6177 + unsigned int count = 0;
6178 struct biosregs ireg, oreg;
6179 struct e820entry *desc = boot_params.e820_map;
6180 static struct e820entry buf; /* static so it is zeroed */
6181 diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
6182 index 11e8c6e..fdbb1ed 100644
6183 --- a/arch/x86/boot/video-vesa.c
6184 +++ b/arch/x86/boot/video-vesa.c
6185 @@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
6186
6187 boot_params.screen_info.vesapm_seg = oreg.es;
6188 boot_params.screen_info.vesapm_off = oreg.di;
6189 + boot_params.screen_info.vesapm_size = oreg.cx;
6190 }
6191
6192 /*
6193 diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
6194 index 43eda28..5ab5fdb 100644
6195 --- a/arch/x86/boot/video.c
6196 +++ b/arch/x86/boot/video.c
6197 @@ -96,7 +96,7 @@ static void store_mode_params(void)
6198 static unsigned int get_entry(void)
6199 {
6200 char entry_buf[4];
6201 - int i, len = 0;
6202 + unsigned int i, len = 0;
6203 int key;
6204 unsigned int v;
6205
6206 diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
6207 index 5b577d5..3c1fed4 100644
6208 --- a/arch/x86/crypto/aes-x86_64-asm_64.S
6209 +++ b/arch/x86/crypto/aes-x86_64-asm_64.S
6210 @@ -8,6 +8,8 @@
6211 * including this sentence is retained in full.
6212 */
6213
6214 +#include <asm/alternative-asm.h>
6215 +
6216 .extern crypto_ft_tab
6217 .extern crypto_it_tab
6218 .extern crypto_fl_tab
6219 @@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
6220 je B192; \
6221 leaq 32(r9),r9;
6222
6223 +#define ret pax_force_retaddr 0, 1; ret
6224 +
6225 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
6226 movq r1,r2; \
6227 movq r3,r4; \
6228 diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
6229 index be6d9e3..21fbbca 100644
6230 --- a/arch/x86/crypto/aesni-intel_asm.S
6231 +++ b/arch/x86/crypto/aesni-intel_asm.S
6232 @@ -31,6 +31,7 @@
6233
6234 #include <linux/linkage.h>
6235 #include <asm/inst.h>
6236 +#include <asm/alternative-asm.h>
6237
6238 #ifdef __x86_64__
6239 .data
6240 @@ -1436,7 +1437,9 @@ _return_T_done_decrypt:
6241 pop %r14
6242 pop %r13
6243 pop %r12
6244 + pax_force_retaddr 0, 1
6245 ret
6246 +ENDPROC(aesni_gcm_dec)
6247
6248
6249 /*****************************************************************************
6250 @@ -1699,7 +1702,9 @@ _return_T_done_encrypt:
6251 pop %r14
6252 pop %r13
6253 pop %r12
6254 + pax_force_retaddr 0, 1
6255 ret
6256 +ENDPROC(aesni_gcm_enc)
6257
6258 #endif
6259
6260 @@ -1714,6 +1719,7 @@ _key_expansion_256a:
6261 pxor %xmm1, %xmm0
6262 movaps %xmm0, (TKEYP)
6263 add $0x10, TKEYP
6264 + pax_force_retaddr_bts
6265 ret
6266
6267 .align 4
6268 @@ -1738,6 +1744,7 @@ _key_expansion_192a:
6269 shufps $0b01001110, %xmm2, %xmm1
6270 movaps %xmm1, 0x10(TKEYP)
6271 add $0x20, TKEYP
6272 + pax_force_retaddr_bts
6273 ret
6274
6275 .align 4
6276 @@ -1757,6 +1764,7 @@ _key_expansion_192b:
6277
6278 movaps %xmm0, (TKEYP)
6279 add $0x10, TKEYP
6280 + pax_force_retaddr_bts
6281 ret
6282
6283 .align 4
6284 @@ -1769,6 +1777,7 @@ _key_expansion_256b:
6285 pxor %xmm1, %xmm2
6286 movaps %xmm2, (TKEYP)
6287 add $0x10, TKEYP
6288 + pax_force_retaddr_bts
6289 ret
6290
6291 /*
6292 @@ -1881,7 +1890,9 @@ ENTRY(aesni_set_key)
6293 #ifndef __x86_64__
6294 popl KEYP
6295 #endif
6296 + pax_force_retaddr 0, 1
6297 ret
6298 +ENDPROC(aesni_set_key)
6299
6300 /*
6301 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
6302 @@ -1902,7 +1913,9 @@ ENTRY(aesni_enc)
6303 popl KLEN
6304 popl KEYP
6305 #endif
6306 + pax_force_retaddr 0, 1
6307 ret
6308 +ENDPROC(aesni_enc)
6309
6310 /*
6311 * _aesni_enc1: internal ABI
6312 @@ -1959,6 +1972,7 @@ _aesni_enc1:
6313 AESENC KEY STATE
6314 movaps 0x70(TKEYP), KEY
6315 AESENCLAST KEY STATE
6316 + pax_force_retaddr_bts
6317 ret
6318
6319 /*
6320 @@ -2067,6 +2081,7 @@ _aesni_enc4:
6321 AESENCLAST KEY STATE2
6322 AESENCLAST KEY STATE3
6323 AESENCLAST KEY STATE4
6324 + pax_force_retaddr_bts
6325 ret
6326
6327 /*
6328 @@ -2089,7 +2104,9 @@ ENTRY(aesni_dec)
6329 popl KLEN
6330 popl KEYP
6331 #endif
6332 + pax_force_retaddr 0, 1
6333 ret
6334 +ENDPROC(aesni_dec)
6335
6336 /*
6337 * _aesni_dec1: internal ABI
6338 @@ -2146,6 +2163,7 @@ _aesni_dec1:
6339 AESDEC KEY STATE
6340 movaps 0x70(TKEYP), KEY
6341 AESDECLAST KEY STATE
6342 + pax_force_retaddr_bts
6343 ret
6344
6345 /*
6346 @@ -2254,6 +2272,7 @@ _aesni_dec4:
6347 AESDECLAST KEY STATE2
6348 AESDECLAST KEY STATE3
6349 AESDECLAST KEY STATE4
6350 + pax_force_retaddr_bts
6351 ret
6352
6353 /*
6354 @@ -2311,7 +2330,9 @@ ENTRY(aesni_ecb_enc)
6355 popl KEYP
6356 popl LEN
6357 #endif
6358 + pax_force_retaddr 0, 1
6359 ret
6360 +ENDPROC(aesni_ecb_enc)
6361
6362 /*
6363 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
6364 @@ -2369,7 +2390,9 @@ ENTRY(aesni_ecb_dec)
6365 popl KEYP
6366 popl LEN
6367 #endif
6368 + pax_force_retaddr 0, 1
6369 ret
6370 +ENDPROC(aesni_ecb_dec)
6371
6372 /*
6373 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
6374 @@ -2410,7 +2433,9 @@ ENTRY(aesni_cbc_enc)
6375 popl LEN
6376 popl IVP
6377 #endif
6378 + pax_force_retaddr 0, 1
6379 ret
6380 +ENDPROC(aesni_cbc_enc)
6381
6382 /*
6383 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
6384 @@ -2498,7 +2523,9 @@ ENTRY(aesni_cbc_dec)
6385 popl LEN
6386 popl IVP
6387 #endif
6388 + pax_force_retaddr 0, 1
6389 ret
6390 +ENDPROC(aesni_cbc_dec)
6391
6392 #ifdef __x86_64__
6393 .align 16
6394 @@ -2524,6 +2551,7 @@ _aesni_inc_init:
6395 mov $1, TCTR_LOW
6396 MOVQ_R64_XMM TCTR_LOW INC
6397 MOVQ_R64_XMM CTR TCTR_LOW
6398 + pax_force_retaddr_bts
6399 ret
6400
6401 /*
6402 @@ -2552,6 +2580,7 @@ _aesni_inc:
6403 .Linc_low:
6404 movaps CTR, IV
6405 PSHUFB_XMM BSWAP_MASK IV
6406 + pax_force_retaddr_bts
6407 ret
6408
6409 /*
6410 @@ -2612,5 +2641,7 @@ ENTRY(aesni_ctr_enc)
6411 .Lctr_enc_ret:
6412 movups IV, (IVP)
6413 .Lctr_enc_just_ret:
6414 + pax_force_retaddr 0, 1
6415 ret
6416 +ENDPROC(aesni_ctr_enc)
6417 #endif
6418 diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
6419 index 391d245..67f35c2 100644
6420 --- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
6421 +++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
6422 @@ -20,6 +20,8 @@
6423 *
6424 */
6425
6426 +#include <asm/alternative-asm.h>
6427 +
6428 .file "blowfish-x86_64-asm.S"
6429 .text
6430
6431 @@ -151,9 +153,11 @@ __blowfish_enc_blk:
6432 jnz __enc_xor;
6433
6434 write_block();
6435 + pax_force_retaddr 0, 1
6436 ret;
6437 __enc_xor:
6438 xor_block();
6439 + pax_force_retaddr 0, 1
6440 ret;
6441
6442 .align 8
6443 @@ -188,6 +192,7 @@ blowfish_dec_blk:
6444
6445 movq %r11, %rbp;
6446
6447 + pax_force_retaddr 0, 1
6448 ret;
6449
6450 /**********************************************************************
6451 @@ -342,6 +347,7 @@ __blowfish_enc_blk_4way:
6452
6453 popq %rbx;
6454 popq %rbp;
6455 + pax_force_retaddr 0, 1
6456 ret;
6457
6458 __enc_xor4:
6459 @@ -349,6 +355,7 @@ __enc_xor4:
6460
6461 popq %rbx;
6462 popq %rbp;
6463 + pax_force_retaddr 0, 1
6464 ret;
6465
6466 .align 8
6467 @@ -386,5 +393,6 @@ blowfish_dec_blk_4way:
6468 popq %rbx;
6469 popq %rbp;
6470
6471 + pax_force_retaddr 0, 1
6472 ret;
6473
6474 diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
6475 index 6214a9b..1f4fc9a 100644
6476 --- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
6477 +++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
6478 @@ -1,3 +1,5 @@
6479 +#include <asm/alternative-asm.h>
6480 +
6481 # enter ECRYPT_encrypt_bytes
6482 .text
6483 .p2align 5
6484 @@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
6485 add %r11,%rsp
6486 mov %rdi,%rax
6487 mov %rsi,%rdx
6488 + pax_force_retaddr 0, 1
6489 ret
6490 # bytesatleast65:
6491 ._bytesatleast65:
6492 @@ -891,6 +894,7 @@ ECRYPT_keysetup:
6493 add %r11,%rsp
6494 mov %rdi,%rax
6495 mov %rsi,%rdx
6496 + pax_force_retaddr
6497 ret
6498 # enter ECRYPT_ivsetup
6499 .text
6500 @@ -917,4 +921,5 @@ ECRYPT_ivsetup:
6501 add %r11,%rsp
6502 mov %rdi,%rax
6503 mov %rsi,%rdx
6504 + pax_force_retaddr
6505 ret
6506 diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
6507 index b2c2f57..8470cab 100644
6508 --- a/arch/x86/crypto/sha1_ssse3_asm.S
6509 +++ b/arch/x86/crypto/sha1_ssse3_asm.S
6510 @@ -28,6 +28,8 @@
6511 * (at your option) any later version.
6512 */
6513
6514 +#include <asm/alternative-asm.h>
6515 +
6516 #define CTX %rdi // arg1
6517 #define BUF %rsi // arg2
6518 #define CNT %rdx // arg3
6519 @@ -104,6 +106,7 @@
6520 pop %r12
6521 pop %rbp
6522 pop %rbx
6523 + pax_force_retaddr 0, 1
6524 ret
6525
6526 .size \name, .-\name
6527 diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
6528 index 5b012a2..36d5364 100644
6529 --- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
6530 +++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
6531 @@ -20,6 +20,8 @@
6532 *
6533 */
6534
6535 +#include <asm/alternative-asm.h>
6536 +
6537 .file "twofish-x86_64-asm-3way.S"
6538 .text
6539
6540 @@ -260,6 +262,7 @@ __twofish_enc_blk_3way:
6541 popq %r13;
6542 popq %r14;
6543 popq %r15;
6544 + pax_force_retaddr 0, 1
6545 ret;
6546
6547 __enc_xor3:
6548 @@ -271,6 +274,7 @@ __enc_xor3:
6549 popq %r13;
6550 popq %r14;
6551 popq %r15;
6552 + pax_force_retaddr 0, 1
6553 ret;
6554
6555 .global twofish_dec_blk_3way
6556 @@ -312,5 +316,6 @@ twofish_dec_blk_3way:
6557 popq %r13;
6558 popq %r14;
6559 popq %r15;
6560 + pax_force_retaddr 0, 1
6561 ret;
6562
6563 diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
6564 index 7bcf3fc..f53832f 100644
6565 --- a/arch/x86/crypto/twofish-x86_64-asm_64.S
6566 +++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
6567 @@ -21,6 +21,7 @@
6568 .text
6569
6570 #include <asm/asm-offsets.h>
6571 +#include <asm/alternative-asm.h>
6572
6573 #define a_offset 0
6574 #define b_offset 4
6575 @@ -268,6 +269,7 @@ twofish_enc_blk:
6576
6577 popq R1
6578 movq $1,%rax
6579 + pax_force_retaddr 0, 1
6580 ret
6581
6582 twofish_dec_blk:
6583 @@ -319,4 +321,5 @@ twofish_dec_blk:
6584
6585 popq R1
6586 movq $1,%rax
6587 + pax_force_retaddr 0, 1
6588 ret
6589 diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
6590 index fd84387..0b4af7d 100644
6591 --- a/arch/x86/ia32/ia32_aout.c
6592 +++ b/arch/x86/ia32/ia32_aout.c
6593 @@ -162,6 +162,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
6594 unsigned long dump_start, dump_size;
6595 struct user32 dump;
6596
6597 + memset(&dump, 0, sizeof(dump));
6598 +
6599 fs = get_fs();
6600 set_fs(KERNEL_DS);
6601 has_dumped = 1;
6602 diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
6603 index 6557769..ef6ae89 100644
6604 --- a/arch/x86/ia32/ia32_signal.c
6605 +++ b/arch/x86/ia32/ia32_signal.c
6606 @@ -169,7 +169,7 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
6607 }
6608 seg = get_fs();
6609 set_fs(KERNEL_DS);
6610 - ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
6611 + ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
6612 set_fs(seg);
6613 if (ret >= 0 && uoss_ptr) {
6614 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
6615 @@ -370,7 +370,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
6616 */
6617 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
6618 size_t frame_size,
6619 - void **fpstate)
6620 + void __user **fpstate)
6621 {
6622 unsigned long sp;
6623
6624 @@ -391,7 +391,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
6625
6626 if (used_math()) {
6627 sp = sp - sig_xstate_ia32_size;
6628 - *fpstate = (struct _fpstate_ia32 *) sp;
6629 + *fpstate = (struct _fpstate_ia32 __user *) sp;
6630 if (save_i387_xstate_ia32(*fpstate) < 0)
6631 return (void __user *) -1L;
6632 }
6633 @@ -399,7 +399,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
6634 sp -= frame_size;
6635 /* Align the stack pointer according to the i386 ABI,
6636 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
6637 - sp = ((sp + 4) & -16ul) - 4;
6638 + sp = ((sp - 12) & -16ul) - 4;
6639 return (void __user *) sp;
6640 }
6641
6642 @@ -457,7 +457,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
6643 * These are actually not used anymore, but left because some
6644 * gdb versions depend on them as a marker.
6645 */
6646 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6647 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
6648 } put_user_catch(err);
6649
6650 if (err)
6651 @@ -499,7 +499,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
6652 0xb8,
6653 __NR_ia32_rt_sigreturn,
6654 0x80cd,
6655 - 0,
6656 + 0
6657 };
6658
6659 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
6660 @@ -529,16 +529,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
6661
6662 if (ka->sa.sa_flags & SA_RESTORER)
6663 restorer = ka->sa.sa_restorer;
6664 + else if (current->mm->context.vdso)
6665 + /* Return stub is in 32bit vsyscall page */
6666 + restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
6667 else
6668 - restorer = VDSO32_SYMBOL(current->mm->context.vdso,
6669 - rt_sigreturn);
6670 + restorer = &frame->retcode;
6671 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
6672
6673 /*
6674 * Not actually used anymore, but left because some gdb
6675 * versions need it.
6676 */
6677 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6678 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
6679 } put_user_catch(err);
6680
6681 if (err)
6682 diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
6683 index a6253ec..4ad2120 100644
6684 --- a/arch/x86/ia32/ia32entry.S
6685 +++ b/arch/x86/ia32/ia32entry.S
6686 @@ -13,7 +13,9 @@
6687 #include <asm/thread_info.h>
6688 #include <asm/segment.h>
6689 #include <asm/irqflags.h>
6690 +#include <asm/pgtable.h>
6691 #include <linux/linkage.h>
6692 +#include <asm/alternative-asm.h>
6693
6694 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
6695 #include <linux/elf-em.h>
6696 @@ -95,6 +97,32 @@ ENTRY(native_irq_enable_sysexit)
6697 ENDPROC(native_irq_enable_sysexit)
6698 #endif
6699
6700 + .macro pax_enter_kernel_user
6701 + pax_set_fptr_mask
6702 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6703 + call pax_enter_kernel_user
6704 +#endif
6705 + .endm
6706 +
6707 + .macro pax_exit_kernel_user
6708 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6709 + call pax_exit_kernel_user
6710 +#endif
6711 +#ifdef CONFIG_PAX_RANDKSTACK
6712 + pushq %rax
6713 + pushq %r11
6714 + call pax_randomize_kstack
6715 + popq %r11
6716 + popq %rax
6717 +#endif
6718 + .endm
6719 +
6720 +.macro pax_erase_kstack
6721 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
6722 + call pax_erase_kstack
6723 +#endif
6724 +.endm
6725 +
6726 /*
6727 * 32bit SYSENTER instruction entry.
6728 *
6729 @@ -121,12 +149,6 @@ ENTRY(ia32_sysenter_target)
6730 CFI_REGISTER rsp,rbp
6731 SWAPGS_UNSAFE_STACK
6732 movq PER_CPU_VAR(kernel_stack), %rsp
6733 - addq $(KERNEL_STACK_OFFSET),%rsp
6734 - /*
6735 - * No need to follow this irqs on/off section: the syscall
6736 - * disabled irqs, here we enable it straight after entry:
6737 - */
6738 - ENABLE_INTERRUPTS(CLBR_NONE)
6739 movl %ebp,%ebp /* zero extension */
6740 pushq_cfi $__USER32_DS
6741 /*CFI_REL_OFFSET ss,0*/
6742 @@ -134,25 +156,39 @@ ENTRY(ia32_sysenter_target)
6743 CFI_REL_OFFSET rsp,0
6744 pushfq_cfi
6745 /*CFI_REL_OFFSET rflags,0*/
6746 - movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
6747 - CFI_REGISTER rip,r10
6748 + orl $X86_EFLAGS_IF,(%rsp)
6749 + GET_THREAD_INFO(%r11)
6750 + movl TI_sysenter_return(%r11), %r11d
6751 + CFI_REGISTER rip,r11
6752 pushq_cfi $__USER32_CS
6753 /*CFI_REL_OFFSET cs,0*/
6754 movl %eax, %eax
6755 - pushq_cfi %r10
6756 + pushq_cfi %r11
6757 CFI_REL_OFFSET rip,0
6758 pushq_cfi %rax
6759 cld
6760 SAVE_ARGS 0,1,0
6761 + pax_enter_kernel_user
6762 + /*
6763 + * No need to follow this irqs on/off section: the syscall
6764 + * disabled irqs, here we enable it straight after entry:
6765 + */
6766 + ENABLE_INTERRUPTS(CLBR_NONE)
6767 /* no need to do an access_ok check here because rbp has been
6768 32bit zero extended */
6769 +
6770 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6771 + mov $PAX_USER_SHADOW_BASE,%r11
6772 + add %r11,%rbp
6773 +#endif
6774 +
6775 1: movl (%rbp),%ebp
6776 .section __ex_table,"a"
6777 .quad 1b,ia32_badarg
6778 .previous
6779 - GET_THREAD_INFO(%r10)
6780 - orl $TS_COMPAT,TI_status(%r10)
6781 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
6782 + GET_THREAD_INFO(%r11)
6783 + orl $TS_COMPAT,TI_status(%r11)
6784 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
6785 CFI_REMEMBER_STATE
6786 jnz sysenter_tracesys
6787 cmpq $(IA32_NR_syscalls-1),%rax
6788 @@ -162,13 +198,15 @@ sysenter_do_call:
6789 sysenter_dispatch:
6790 call *ia32_sys_call_table(,%rax,8)
6791 movq %rax,RAX-ARGOFFSET(%rsp)
6792 - GET_THREAD_INFO(%r10)
6793 + GET_THREAD_INFO(%r11)
6794 DISABLE_INTERRUPTS(CLBR_NONE)
6795 TRACE_IRQS_OFF
6796 - testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6797 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
6798 jnz sysexit_audit
6799 sysexit_from_sys_call:
6800 - andl $~TS_COMPAT,TI_status(%r10)
6801 + pax_exit_kernel_user
6802 + pax_erase_kstack
6803 + andl $~TS_COMPAT,TI_status(%r11)
6804 /* clear IF, that popfq doesn't enable interrupts early */
6805 andl $~0x200,EFLAGS-R11(%rsp)
6806 movl RIP-R11(%rsp),%edx /* User %eip */
6807 @@ -194,6 +232,9 @@ sysexit_from_sys_call:
6808 movl %eax,%esi /* 2nd arg: syscall number */
6809 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
6810 call audit_syscall_entry
6811 +
6812 + pax_erase_kstack
6813 +
6814 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
6815 cmpq $(IA32_NR_syscalls-1),%rax
6816 ja ia32_badsys
6817 @@ -205,7 +246,7 @@ sysexit_from_sys_call:
6818 .endm
6819
6820 .macro auditsys_exit exit
6821 - testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
6822 + testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
6823 jnz ia32_ret_from_sys_call
6824 TRACE_IRQS_ON
6825 sti
6826 @@ -215,12 +256,12 @@ sysexit_from_sys_call:
6827 movzbl %al,%edi /* zero-extend that into %edi */
6828 inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
6829 call audit_syscall_exit
6830 - GET_THREAD_INFO(%r10)
6831 + GET_THREAD_INFO(%r11)
6832 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall return value */
6833 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
6834 cli
6835 TRACE_IRQS_OFF
6836 - testl %edi,TI_flags(%r10)
6837 + testl %edi,TI_flags(%r11)
6838 jz \exit
6839 CLEAR_RREGS -ARGOFFSET
6840 jmp int_with_check
6841 @@ -238,7 +279,7 @@ sysexit_audit:
6842
6843 sysenter_tracesys:
6844 #ifdef CONFIG_AUDITSYSCALL
6845 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
6846 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
6847 jz sysenter_auditsys
6848 #endif
6849 SAVE_REST
6850 @@ -246,6 +287,9 @@ sysenter_tracesys:
6851 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
6852 movq %rsp,%rdi /* &pt_regs -> arg1 */
6853 call syscall_trace_enter
6854 +
6855 + pax_erase_kstack
6856 +
6857 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6858 RESTORE_REST
6859 cmpq $(IA32_NR_syscalls-1),%rax
6860 @@ -277,19 +321,20 @@ ENDPROC(ia32_sysenter_target)
6861 ENTRY(ia32_cstar_target)
6862 CFI_STARTPROC32 simple
6863 CFI_SIGNAL_FRAME
6864 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
6865 + CFI_DEF_CFA rsp,0
6866 CFI_REGISTER rip,rcx
6867 /*CFI_REGISTER rflags,r11*/
6868 SWAPGS_UNSAFE_STACK
6869 movl %esp,%r8d
6870 CFI_REGISTER rsp,r8
6871 movq PER_CPU_VAR(kernel_stack),%rsp
6872 + SAVE_ARGS 8*6,0,0
6873 + pax_enter_kernel_user
6874 /*
6875 * No need to follow this irqs on/off section: the syscall
6876 * disabled irqs and here we enable it straight after entry:
6877 */
6878 ENABLE_INTERRUPTS(CLBR_NONE)
6879 - SAVE_ARGS 8,0,0
6880 movl %eax,%eax /* zero extension */
6881 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
6882 movq %rcx,RIP-ARGOFFSET(%rsp)
6883 @@ -305,13 +350,19 @@ ENTRY(ia32_cstar_target)
6884 /* no need to do an access_ok check here because r8 has been
6885 32bit zero extended */
6886 /* hardware stack frame is complete now */
6887 +
6888 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6889 + mov $PAX_USER_SHADOW_BASE,%r11
6890 + add %r11,%r8
6891 +#endif
6892 +
6893 1: movl (%r8),%r9d
6894 .section __ex_table,"a"
6895 .quad 1b,ia32_badarg
6896 .previous
6897 - GET_THREAD_INFO(%r10)
6898 - orl $TS_COMPAT,TI_status(%r10)
6899 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
6900 + GET_THREAD_INFO(%r11)
6901 + orl $TS_COMPAT,TI_status(%r11)
6902 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
6903 CFI_REMEMBER_STATE
6904 jnz cstar_tracesys
6905 cmpq $IA32_NR_syscalls-1,%rax
6906 @@ -321,13 +372,15 @@ cstar_do_call:
6907 cstar_dispatch:
6908 call *ia32_sys_call_table(,%rax,8)
6909 movq %rax,RAX-ARGOFFSET(%rsp)
6910 - GET_THREAD_INFO(%r10)
6911 + GET_THREAD_INFO(%r11)
6912 DISABLE_INTERRUPTS(CLBR_NONE)
6913 TRACE_IRQS_OFF
6914 - testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6915 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
6916 jnz sysretl_audit
6917 sysretl_from_sys_call:
6918 - andl $~TS_COMPAT,TI_status(%r10)
6919 + pax_exit_kernel_user
6920 + pax_erase_kstack
6921 + andl $~TS_COMPAT,TI_status(%r11)
6922 RESTORE_ARGS 0,-ARG_SKIP,0,0,0
6923 movl RIP-ARGOFFSET(%rsp),%ecx
6924 CFI_REGISTER rip,rcx
6925 @@ -355,7 +408,7 @@ sysretl_audit:
6926
6927 cstar_tracesys:
6928 #ifdef CONFIG_AUDITSYSCALL
6929 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
6930 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
6931 jz cstar_auditsys
6932 #endif
6933 xchgl %r9d,%ebp
6934 @@ -364,6 +417,9 @@ cstar_tracesys:
6935 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6936 movq %rsp,%rdi /* &pt_regs -> arg1 */
6937 call syscall_trace_enter
6938 +
6939 + pax_erase_kstack
6940 +
6941 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
6942 RESTORE_REST
6943 xchgl %ebp,%r9d
6944 @@ -409,20 +465,21 @@ ENTRY(ia32_syscall)
6945 CFI_REL_OFFSET rip,RIP-RIP
6946 PARAVIRT_ADJUST_EXCEPTION_FRAME
6947 SWAPGS
6948 - /*
6949 - * No need to follow this irqs on/off section: the syscall
6950 - * disabled irqs and here we enable it straight after entry:
6951 - */
6952 - ENABLE_INTERRUPTS(CLBR_NONE)
6953 movl %eax,%eax
6954 pushq_cfi %rax
6955 cld
6956 /* note the registers are not zero extended to the sf.
6957 this could be a problem. */
6958 SAVE_ARGS 0,1,0
6959 - GET_THREAD_INFO(%r10)
6960 - orl $TS_COMPAT,TI_status(%r10)
6961 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
6962 + pax_enter_kernel_user
6963 + /*
6964 + * No need to follow this irqs on/off section: the syscall
6965 + * disabled irqs and here we enable it straight after entry:
6966 + */
6967 + ENABLE_INTERRUPTS(CLBR_NONE)
6968 + GET_THREAD_INFO(%r11)
6969 + orl $TS_COMPAT,TI_status(%r11)
6970 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
6971 jnz ia32_tracesys
6972 cmpq $(IA32_NR_syscalls-1),%rax
6973 ja ia32_badsys
6974 @@ -441,6 +498,9 @@ ia32_tracesys:
6975 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6976 movq %rsp,%rdi /* &pt_regs -> arg1 */
6977 call syscall_trace_enter
6978 +
6979 + pax_erase_kstack
6980 +
6981 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6982 RESTORE_REST
6983 cmpq $(IA32_NR_syscalls-1),%rax
6984 @@ -455,6 +515,7 @@ ia32_badsys:
6985
6986 quiet_ni_syscall:
6987 movq $-ENOSYS,%rax
6988 + pax_force_retaddr
6989 ret
6990 CFI_ENDPROC
6991
6992 diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
6993 index f6f5c53..b358b28 100644
6994 --- a/arch/x86/ia32/sys_ia32.c
6995 +++ b/arch/x86/ia32/sys_ia32.c
6996 @@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
6997 */
6998 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
6999 {
7000 - typeof(ubuf->st_uid) uid = 0;
7001 - typeof(ubuf->st_gid) gid = 0;
7002 + typeof(((struct stat64 *)0)->st_uid) uid = 0;
7003 + typeof(((struct stat64 *)0)->st_gid) gid = 0;
7004 SET_UID(uid, stat->uid);
7005 SET_GID(gid, stat->gid);
7006 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
7007 @@ -308,8 +308,8 @@ asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
7008 }
7009 set_fs(KERNEL_DS);
7010 ret = sys_rt_sigprocmask(how,
7011 - set ? (sigset_t __user *)&s : NULL,
7012 - oset ? (sigset_t __user *)&s : NULL,
7013 + set ? (sigset_t __force_user *)&s : NULL,
7014 + oset ? (sigset_t __force_user *)&s : NULL,
7015 sigsetsize);
7016 set_fs(old_fs);
7017 if (ret)
7018 @@ -332,7 +332,7 @@ asmlinkage long sys32_alarm(unsigned int seconds)
7019 return alarm_setitimer(seconds);
7020 }
7021
7022 -asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr,
7023 +asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr,
7024 int options)
7025 {
7026 return compat_sys_wait4(pid, stat_addr, options, NULL);
7027 @@ -353,7 +353,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
7028 mm_segment_t old_fs = get_fs();
7029
7030 set_fs(KERNEL_DS);
7031 - ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
7032 + ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
7033 set_fs(old_fs);
7034 if (put_compat_timespec(&t, interval))
7035 return -EFAULT;
7036 @@ -369,7 +369,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
7037 mm_segment_t old_fs = get_fs();
7038
7039 set_fs(KERNEL_DS);
7040 - ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
7041 + ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
7042 set_fs(old_fs);
7043 if (!ret) {
7044 switch (_NSIG_WORDS) {
7045 @@ -394,7 +394,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
7046 if (copy_siginfo_from_user32(&info, uinfo))
7047 return -EFAULT;
7048 set_fs(KERNEL_DS);
7049 - ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
7050 + ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
7051 set_fs(old_fs);
7052 return ret;
7053 }
7054 @@ -439,7 +439,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
7055 return -EFAULT;
7056
7057 set_fs(KERNEL_DS);
7058 - ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
7059 + ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
7060 count);
7061 set_fs(old_fs);
7062
7063 diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
7064 index 091508b..e245ff2 100644
7065 --- a/arch/x86/include/asm/alternative-asm.h
7066 +++ b/arch/x86/include/asm/alternative-asm.h
7067 @@ -4,10 +4,10 @@
7068
7069 #ifdef CONFIG_SMP
7070 .macro LOCK_PREFIX
7071 -1: lock
7072 +672: lock
7073 .section .smp_locks,"a"
7074 .balign 4
7075 - .long 1b - .
7076 + .long 672b - .
7077 .previous
7078 .endm
7079 #else
7080 @@ -15,6 +15,45 @@
7081 .endm
7082 #endif
7083
7084 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
7085 + .macro pax_force_retaddr_bts rip=0
7086 + btsq $63,\rip(%rsp)
7087 + .endm
7088 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
7089 + .macro pax_force_retaddr rip=0, reload=0
7090 + btsq $63,\rip(%rsp)
7091 + .endm
7092 + .macro pax_force_fptr ptr
7093 + btsq $63,\ptr
7094 + .endm
7095 + .macro pax_set_fptr_mask
7096 + .endm
7097 +#endif
7098 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
7099 + .macro pax_force_retaddr rip=0, reload=0
7100 + .if \reload
7101 + pax_set_fptr_mask
7102 + .endif
7103 + orq %r10,\rip(%rsp)
7104 + .endm
7105 + .macro pax_force_fptr ptr
7106 + orq %r10,\ptr
7107 + .endm
7108 + .macro pax_set_fptr_mask
7109 + movabs $0x8000000000000000,%r10
7110 + .endm
7111 +#endif
7112 +#else
7113 + .macro pax_force_retaddr rip=0, reload=0
7114 + .endm
7115 + .macro pax_force_fptr ptr
7116 + .endm
7117 + .macro pax_force_retaddr_bts rip=0
7118 + .endm
7119 + .macro pax_set_fptr_mask
7120 + .endm
7121 +#endif
7122 +
7123 .macro altinstruction_entry orig alt feature orig_len alt_len
7124 .long \orig - .
7125 .long \alt - .
7126 diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
7127 index 37ad100..7d47faa 100644
7128 --- a/arch/x86/include/asm/alternative.h
7129 +++ b/arch/x86/include/asm/alternative.h
7130 @@ -89,7 +89,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
7131 ".section .discard,\"aw\",@progbits\n" \
7132 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
7133 ".previous\n" \
7134 - ".section .altinstr_replacement, \"ax\"\n" \
7135 + ".section .altinstr_replacement, \"a\"\n" \
7136 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
7137 ".previous"
7138
7139 diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
7140 index 1a6c09a..fec2432 100644
7141 --- a/arch/x86/include/asm/apic.h
7142 +++ b/arch/x86/include/asm/apic.h
7143 @@ -45,7 +45,7 @@ static inline void generic_apic_probe(void)
7144
7145 #ifdef CONFIG_X86_LOCAL_APIC
7146
7147 -extern unsigned int apic_verbosity;
7148 +extern int apic_verbosity;
7149 extern int local_apic_timer_c2_ok;
7150
7151 extern int disable_apic;
7152 diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
7153 index 20370c6..a2eb9b0 100644
7154 --- a/arch/x86/include/asm/apm.h
7155 +++ b/arch/x86/include/asm/apm.h
7156 @@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
7157 __asm__ __volatile__(APM_DO_ZERO_SEGS
7158 "pushl %%edi\n\t"
7159 "pushl %%ebp\n\t"
7160 - "lcall *%%cs:apm_bios_entry\n\t"
7161 + "lcall *%%ss:apm_bios_entry\n\t"
7162 "setc %%al\n\t"
7163 "popl %%ebp\n\t"
7164 "popl %%edi\n\t"
7165 @@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
7166 __asm__ __volatile__(APM_DO_ZERO_SEGS
7167 "pushl %%edi\n\t"
7168 "pushl %%ebp\n\t"
7169 - "lcall *%%cs:apm_bios_entry\n\t"
7170 + "lcall *%%ss:apm_bios_entry\n\t"
7171 "setc %%bl\n\t"
7172 "popl %%ebp\n\t"
7173 "popl %%edi\n\t"
7174 diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
7175 index 58cb6d4..ca9010d 100644
7176 --- a/arch/x86/include/asm/atomic.h
7177 +++ b/arch/x86/include/asm/atomic.h
7178 @@ -22,7 +22,18 @@
7179 */
7180 static inline int atomic_read(const atomic_t *v)
7181 {
7182 - return (*(volatile int *)&(v)->counter);
7183 + return (*(volatile const int *)&(v)->counter);
7184 +}
7185 +
7186 +/**
7187 + * atomic_read_unchecked - read atomic variable
7188 + * @v: pointer of type atomic_unchecked_t
7189 + *
7190 + * Atomically reads the value of @v.
7191 + */
7192 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
7193 +{
7194 + return (*(volatile const int *)&(v)->counter);
7195 }
7196
7197 /**
7198 @@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i)
7199 }
7200
7201 /**
7202 + * atomic_set_unchecked - set atomic variable
7203 + * @v: pointer of type atomic_unchecked_t
7204 + * @i: required value
7205 + *
7206 + * Atomically sets the value of @v to @i.
7207 + */
7208 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7209 +{
7210 + v->counter = i;
7211 +}
7212 +
7213 +/**
7214 * atomic_add - add integer to atomic variable
7215 * @i: integer value to add
7216 * @v: pointer of type atomic_t
7217 @@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i)
7218 */
7219 static inline void atomic_add(int i, atomic_t *v)
7220 {
7221 - asm volatile(LOCK_PREFIX "addl %1,%0"
7222 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
7223 +
7224 +#ifdef CONFIG_PAX_REFCOUNT
7225 + "jno 0f\n"
7226 + LOCK_PREFIX "subl %1,%0\n"
7227 + "int $4\n0:\n"
7228 + _ASM_EXTABLE(0b, 0b)
7229 +#endif
7230 +
7231 + : "+m" (v->counter)
7232 + : "ir" (i));
7233 +}
7234 +
7235 +/**
7236 + * atomic_add_unchecked - add integer to atomic variable
7237 + * @i: integer value to add
7238 + * @v: pointer of type atomic_unchecked_t
7239 + *
7240 + * Atomically adds @i to @v.
7241 + */
7242 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
7243 +{
7244 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
7245 : "+m" (v->counter)
7246 : "ir" (i));
7247 }
7248 @@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v)
7249 */
7250 static inline void atomic_sub(int i, atomic_t *v)
7251 {
7252 - asm volatile(LOCK_PREFIX "subl %1,%0"
7253 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
7254 +
7255 +#ifdef CONFIG_PAX_REFCOUNT
7256 + "jno 0f\n"
7257 + LOCK_PREFIX "addl %1,%0\n"
7258 + "int $4\n0:\n"
7259 + _ASM_EXTABLE(0b, 0b)
7260 +#endif
7261 +
7262 + : "+m" (v->counter)
7263 + : "ir" (i));
7264 +}
7265 +
7266 +/**
7267 + * atomic_sub_unchecked - subtract integer from atomic variable
7268 + * @i: integer value to subtract
7269 + * @v: pointer of type atomic_unchecked_t
7270 + *
7271 + * Atomically subtracts @i from @v.
7272 + */
7273 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
7274 +{
7275 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
7276 : "+m" (v->counter)
7277 : "ir" (i));
7278 }
7279 @@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
7280 {
7281 unsigned char c;
7282
7283 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
7284 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
7285 +
7286 +#ifdef CONFIG_PAX_REFCOUNT
7287 + "jno 0f\n"
7288 + LOCK_PREFIX "addl %2,%0\n"
7289 + "int $4\n0:\n"
7290 + _ASM_EXTABLE(0b, 0b)
7291 +#endif
7292 +
7293 + "sete %1\n"
7294 : "+m" (v->counter), "=qm" (c)
7295 : "ir" (i) : "memory");
7296 return c;
7297 @@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
7298 */
7299 static inline void atomic_inc(atomic_t *v)
7300 {
7301 - asm volatile(LOCK_PREFIX "incl %0"
7302 + asm volatile(LOCK_PREFIX "incl %0\n"
7303 +
7304 +#ifdef CONFIG_PAX_REFCOUNT
7305 + "jno 0f\n"
7306 + LOCK_PREFIX "decl %0\n"
7307 + "int $4\n0:\n"
7308 + _ASM_EXTABLE(0b, 0b)
7309 +#endif
7310 +
7311 + : "+m" (v->counter));
7312 +}
7313 +
7314 +/**
7315 + * atomic_inc_unchecked - increment atomic variable
7316 + * @v: pointer of type atomic_unchecked_t
7317 + *
7318 + * Atomically increments @v by 1.
7319 + */
7320 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7321 +{
7322 + asm volatile(LOCK_PREFIX "incl %0\n"
7323 : "+m" (v->counter));
7324 }
7325
7326 @@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v)
7327 */
7328 static inline void atomic_dec(atomic_t *v)
7329 {
7330 - asm volatile(LOCK_PREFIX "decl %0"
7331 + asm volatile(LOCK_PREFIX "decl %0\n"
7332 +
7333 +#ifdef CONFIG_PAX_REFCOUNT
7334 + "jno 0f\n"
7335 + LOCK_PREFIX "incl %0\n"
7336 + "int $4\n0:\n"
7337 + _ASM_EXTABLE(0b, 0b)
7338 +#endif
7339 +
7340 + : "+m" (v->counter));
7341 +}
7342 +
7343 +/**
7344 + * atomic_dec_unchecked - decrement atomic variable
7345 + * @v: pointer of type atomic_unchecked_t
7346 + *
7347 + * Atomically decrements @v by 1.
7348 + */
7349 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
7350 +{
7351 + asm volatile(LOCK_PREFIX "decl %0\n"
7352 : "+m" (v->counter));
7353 }
7354
7355 @@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
7356 {
7357 unsigned char c;
7358
7359 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
7360 + asm volatile(LOCK_PREFIX "decl %0\n"
7361 +
7362 +#ifdef CONFIG_PAX_REFCOUNT
7363 + "jno 0f\n"
7364 + LOCK_PREFIX "incl %0\n"
7365 + "int $4\n0:\n"
7366 + _ASM_EXTABLE(0b, 0b)
7367 +#endif
7368 +
7369 + "sete %1\n"
7370 : "+m" (v->counter), "=qm" (c)
7371 : : "memory");
7372 return c != 0;
7373 @@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
7374 {
7375 unsigned char c;
7376
7377 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
7378 + asm volatile(LOCK_PREFIX "incl %0\n"
7379 +
7380 +#ifdef CONFIG_PAX_REFCOUNT
7381 + "jno 0f\n"
7382 + LOCK_PREFIX "decl %0\n"
7383 + "int $4\n0:\n"
7384 + _ASM_EXTABLE(0b, 0b)
7385 +#endif
7386 +
7387 + "sete %1\n"
7388 + : "+m" (v->counter), "=qm" (c)
7389 + : : "memory");
7390 + return c != 0;
7391 +}
7392 +
7393 +/**
7394 + * atomic_inc_and_test_unchecked - increment and test
7395 + * @v: pointer of type atomic_unchecked_t
7396 + *
7397 + * Atomically increments @v by 1
7398 + * and returns true if the result is zero, or false for all
7399 + * other cases.
7400 + */
7401 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7402 +{
7403 + unsigned char c;
7404 +
7405 + asm volatile(LOCK_PREFIX "incl %0\n"
7406 + "sete %1\n"
7407 : "+m" (v->counter), "=qm" (c)
7408 : : "memory");
7409 return c != 0;
7410 @@ -157,7 +310,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
7411 {
7412 unsigned char c;
7413
7414 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
7415 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
7416 +
7417 +#ifdef CONFIG_PAX_REFCOUNT
7418 + "jno 0f\n"
7419 + LOCK_PREFIX "subl %2,%0\n"
7420 + "int $4\n0:\n"
7421 + _ASM_EXTABLE(0b, 0b)
7422 +#endif
7423 +
7424 + "sets %1\n"
7425 : "+m" (v->counter), "=qm" (c)
7426 : "ir" (i) : "memory");
7427 return c;
7428 @@ -179,7 +341,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
7429 goto no_xadd;
7430 #endif
7431 /* Modern 486+ processor */
7432 - return i + xadd(&v->counter, i);
7433 + return i + xadd_check_overflow(&v->counter, i);
7434
7435 #ifdef CONFIG_M386
7436 no_xadd: /* Legacy 386 processor */
7437 @@ -192,6 +354,34 @@ no_xadd: /* Legacy 386 processor */
7438 }
7439
7440 /**
7441 + * atomic_add_return_unchecked - add integer and return
7442 + * @i: integer value to add
7443 + * @v: pointer of type atomic_unchecked_t
7444 + *
7445 + * Atomically adds @i to @v and returns @i + @v
7446 + */
7447 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
7448 +{
7449 +#ifdef CONFIG_M386
7450 + int __i;
7451 + unsigned long flags;
7452 + if (unlikely(boot_cpu_data.x86 <= 3))
7453 + goto no_xadd;
7454 +#endif
7455 + /* Modern 486+ processor */
7456 + return i + xadd(&v->counter, i);
7457 +
7458 +#ifdef CONFIG_M386
7459 +no_xadd: /* Legacy 386 processor */
7460 + raw_local_irq_save(flags);
7461 + __i = atomic_read_unchecked(v);
7462 + atomic_set_unchecked(v, i + __i);
7463 + raw_local_irq_restore(flags);
7464 + return i + __i;
7465 +#endif
7466 +}
7467 +
7468 +/**
7469 * atomic_sub_return - subtract integer and return
7470 * @v: pointer of type atomic_t
7471 * @i: integer value to subtract
7472 @@ -204,6 +394,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
7473 }
7474
7475 #define atomic_inc_return(v) (atomic_add_return(1, v))
7476 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7477 +{
7478 + return atomic_add_return_unchecked(1, v);
7479 +}
7480 #define atomic_dec_return(v) (atomic_sub_return(1, v))
7481
7482 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
7483 @@ -211,11 +405,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
7484 return cmpxchg(&v->counter, old, new);
7485 }
7486
7487 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
7488 +{
7489 + return cmpxchg(&v->counter, old, new);
7490 +}
7491 +
7492 static inline int atomic_xchg(atomic_t *v, int new)
7493 {
7494 return xchg(&v->counter, new);
7495 }
7496
7497 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7498 +{
7499 + return xchg(&v->counter, new);
7500 +}
7501 +
7502 /**
7503 * __atomic_add_unless - add unless the number is already a given value
7504 * @v: pointer of type atomic_t
7505 @@ -227,12 +431,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
7506 */
7507 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
7508 {
7509 - int c, old;
7510 + int c, old, new;
7511 c = atomic_read(v);
7512 for (;;) {
7513 - if (unlikely(c == (u)))
7514 + if (unlikely(c == u))
7515 break;
7516 - old = atomic_cmpxchg((v), c, c + (a));
7517 +
7518 + asm volatile("addl %2,%0\n"
7519 +
7520 +#ifdef CONFIG_PAX_REFCOUNT
7521 + "jno 0f\n"
7522 + "subl %2,%0\n"
7523 + "int $4\n0:\n"
7524 + _ASM_EXTABLE(0b, 0b)
7525 +#endif
7526 +
7527 + : "=r" (new)
7528 + : "0" (c), "ir" (a));
7529 +
7530 + old = atomic_cmpxchg(v, c, new);
7531 if (likely(old == c))
7532 break;
7533 c = old;
7534 @@ -240,6 +457,48 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
7535 return c;
7536 }
7537
7538 +/**
7539 + * atomic_inc_not_zero_hint - increment if not null
7540 + * @v: pointer of type atomic_t
7541 + * @hint: probable value of the atomic before the increment
7542 + *
7543 + * This version of atomic_inc_not_zero() gives a hint of probable
7544 + * value of the atomic. This helps processor to not read the memory
7545 + * before doing the atomic read/modify/write cycle, lowering
7546 + * number of bus transactions on some arches.
7547 + *
7548 + * Returns: 0 if increment was not done, 1 otherwise.
7549 + */
7550 +#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
7551 +static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
7552 +{
7553 + int val, c = hint, new;
7554 +
7555 + /* sanity test, should be removed by compiler if hint is a constant */
7556 + if (!hint)
7557 + return __atomic_add_unless(v, 1, 0);
7558 +
7559 + do {
7560 + asm volatile("incl %0\n"
7561 +
7562 +#ifdef CONFIG_PAX_REFCOUNT
7563 + "jno 0f\n"
7564 + "decl %0\n"
7565 + "int $4\n0:\n"
7566 + _ASM_EXTABLE(0b, 0b)
7567 +#endif
7568 +
7569 + : "=r" (new)
7570 + : "0" (c));
7571 +
7572 + val = atomic_cmpxchg(v, c, new);
7573 + if (val == c)
7574 + return 1;
7575 + c = val;
7576 + } while (c);
7577 +
7578 + return 0;
7579 +}
7580
7581 /*
7582 * atomic_dec_if_positive - decrement by 1 if old value positive
7583 diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
7584 index 24098aa..1e37723 100644
7585 --- a/arch/x86/include/asm/atomic64_32.h
7586 +++ b/arch/x86/include/asm/atomic64_32.h
7587 @@ -12,6 +12,14 @@ typedef struct {
7588 u64 __aligned(8) counter;
7589 } atomic64_t;
7590
7591 +#ifdef CONFIG_PAX_REFCOUNT
7592 +typedef struct {
7593 + u64 __aligned(8) counter;
7594 +} atomic64_unchecked_t;
7595 +#else
7596 +typedef atomic64_t atomic64_unchecked_t;
7597 +#endif
7598 +
7599 #define ATOMIC64_INIT(val) { (val) }
7600
7601 #ifdef CONFIG_X86_CMPXCHG64
7602 @@ -38,6 +46,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
7603 }
7604
7605 /**
7606 + * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
7607 + * @p: pointer to type atomic64_unchecked_t
7608 + * @o: expected value
7609 + * @n: new value
7610 + *
7611 + * Atomically sets @v to @n if it was equal to @o and returns
7612 + * the old value.
7613 + */
7614 +
7615 +static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
7616 +{
7617 + return cmpxchg64(&v->counter, o, n);
7618 +}
7619 +
7620 +/**
7621 * atomic64_xchg - xchg atomic64 variable
7622 * @v: pointer to type atomic64_t
7623 * @n: value to assign
7624 @@ -77,6 +100,24 @@ static inline void atomic64_set(atomic64_t *v, long long i)
7625 }
7626
7627 /**
7628 + * atomic64_set_unchecked - set atomic64 variable
7629 + * @v: pointer to type atomic64_unchecked_t
7630 + * @n: value to assign
7631 + *
7632 + * Atomically sets the value of @v to @n.
7633 + */
7634 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
7635 +{
7636 + unsigned high = (unsigned)(i >> 32);
7637 + unsigned low = (unsigned)i;
7638 + asm volatile(ATOMIC64_ALTERNATIVE(set)
7639 + : "+b" (low), "+c" (high)
7640 + : "S" (v)
7641 + : "eax", "edx", "memory"
7642 + );
7643 +}
7644 +
7645 +/**
7646 * atomic64_read - read atomic64 variable
7647 * @v: pointer to type atomic64_t
7648 *
7649 @@ -93,6 +134,22 @@ static inline long long atomic64_read(atomic64_t *v)
7650 }
7651
7652 /**
7653 + * atomic64_read_unchecked - read atomic64 variable
7654 + * @v: pointer to type atomic64_unchecked_t
7655 + *
7656 + * Atomically reads the value of @v and returns it.
7657 + */
7658 +static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
7659 +{
7660 + long long r;
7661 + asm volatile(ATOMIC64_ALTERNATIVE(read_unchecked)
7662 + : "=A" (r), "+c" (v)
7663 + : : "memory"
7664 + );
7665 + return r;
7666 + }
7667 +
7668 +/**
7669 * atomic64_add_return - add and return
7670 * @i: integer value to add
7671 * @v: pointer to type atomic64_t
7672 @@ -108,6 +165,22 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
7673 return i;
7674 }
7675
7676 +/**
7677 + * atomic64_add_return_unchecked - add and return
7678 + * @i: integer value to add
7679 + * @v: pointer to type atomic64_unchecked_t
7680 + *
7681 + * Atomically adds @i to @v and returns @i + *@v
7682 + */
7683 +static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
7684 +{
7685 + asm volatile(ATOMIC64_ALTERNATIVE(add_return_unchecked)
7686 + : "+A" (i), "+c" (v)
7687 + : : "memory"
7688 + );
7689 + return i;
7690 +}
7691 +
7692 /*
7693 * Other variants with different arithmetic operators:
7694 */
7695 @@ -131,6 +204,17 @@ static inline long long atomic64_inc_return(atomic64_t *v)
7696 return a;
7697 }
7698
7699 +static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
7700 +{
7701 + long long a;
7702 + asm volatile(ATOMIC64_ALTERNATIVE(inc_return_unchecked)
7703 + : "=A" (a)
7704 + : "S" (v)
7705 + : "memory", "ecx"
7706 + );
7707 + return a;
7708 +}
7709 +
7710 static inline long long atomic64_dec_return(atomic64_t *v)
7711 {
7712 long long a;
7713 @@ -159,6 +243,22 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
7714 }
7715
7716 /**
7717 + * atomic64_add_unchecked - add integer to atomic64 variable
7718 + * @i: integer value to add
7719 + * @v: pointer to type atomic64_unchecked_t
7720 + *
7721 + * Atomically adds @i to @v.
7722 + */
7723 +static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
7724 +{
7725 + asm volatile(ATOMIC64_ALTERNATIVE_(add_unchecked, add_return_unchecked)
7726 + : "+A" (i), "+c" (v)
7727 + : : "memory"
7728 + );
7729 + return i;
7730 +}
7731 +
7732 +/**
7733 * atomic64_sub - subtract the atomic64 variable
7734 * @i: integer value to subtract
7735 * @v: pointer to type atomic64_t
7736 diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
7737 index 0e1cbfc..5623683 100644
7738 --- a/arch/x86/include/asm/atomic64_64.h
7739 +++ b/arch/x86/include/asm/atomic64_64.h
7740 @@ -18,7 +18,19 @@
7741 */
7742 static inline long atomic64_read(const atomic64_t *v)
7743 {
7744 - return (*(volatile long *)&(v)->counter);
7745 + return (*(volatile const long *)&(v)->counter);
7746 +}
7747 +
7748 +/**
7749 + * atomic64_read_unchecked - read atomic64 variable
7750 + * @v: pointer of type atomic64_unchecked_t
7751 + *
7752 + * Atomically reads the value of @v.
7753 + * Doesn't imply a read memory barrier.
7754 + */
7755 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
7756 +{
7757 + return (*(volatile const long *)&(v)->counter);
7758 }
7759
7760 /**
7761 @@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
7762 }
7763
7764 /**
7765 + * atomic64_set_unchecked - set atomic64 variable
7766 + * @v: pointer to type atomic64_unchecked_t
7767 + * @i: required value
7768 + *
7769 + * Atomically sets the value of @v to @i.
7770 + */
7771 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
7772 +{
7773 + v->counter = i;
7774 +}
7775 +
7776 +/**
7777 * atomic64_add - add integer to atomic64 variable
7778 * @i: integer value to add
7779 * @v: pointer to type atomic64_t
7780 @@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
7781 */
7782 static inline void atomic64_add(long i, atomic64_t *v)
7783 {
7784 + asm volatile(LOCK_PREFIX "addq %1,%0\n"
7785 +
7786 +#ifdef CONFIG_PAX_REFCOUNT
7787 + "jno 0f\n"
7788 + LOCK_PREFIX "subq %1,%0\n"
7789 + "int $4\n0:\n"
7790 + _ASM_EXTABLE(0b, 0b)
7791 +#endif
7792 +
7793 + : "=m" (v->counter)
7794 + : "er" (i), "m" (v->counter));
7795 +}
7796 +
7797 +/**
7798 + * atomic64_add_unchecked - add integer to atomic64 variable
7799 + * @i: integer value to add
7800 + * @v: pointer to type atomic64_unchecked_t
7801 + *
7802 + * Atomically adds @i to @v.
7803 + */
7804 +static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
7805 +{
7806 asm volatile(LOCK_PREFIX "addq %1,%0"
7807 : "=m" (v->counter)
7808 : "er" (i), "m" (v->counter));
7809 @@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
7810 */
7811 static inline void atomic64_sub(long i, atomic64_t *v)
7812 {
7813 - asm volatile(LOCK_PREFIX "subq %1,%0"
7814 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
7815 +
7816 +#ifdef CONFIG_PAX_REFCOUNT
7817 + "jno 0f\n"
7818 + LOCK_PREFIX "addq %1,%0\n"
7819 + "int $4\n0:\n"
7820 + _ASM_EXTABLE(0b, 0b)
7821 +#endif
7822 +
7823 + : "=m" (v->counter)
7824 + : "er" (i), "m" (v->counter));
7825 +}
7826 +
7827 +/**
7828 + * atomic64_sub_unchecked - subtract the atomic64 variable
7829 + * @i: integer value to subtract
7830 + * @v: pointer to type atomic64_unchecked_t
7831 + *
7832 + * Atomically subtracts @i from @v.
7833 + */
7834 +static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
7835 +{
7836 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
7837 : "=m" (v->counter)
7838 : "er" (i), "m" (v->counter));
7839 }
7840 @@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
7841 {
7842 unsigned char c;
7843
7844 - asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
7845 + asm volatile(LOCK_PREFIX "subq %2,%0\n"
7846 +
7847 +#ifdef CONFIG_PAX_REFCOUNT
7848 + "jno 0f\n"
7849 + LOCK_PREFIX "addq %2,%0\n"
7850 + "int $4\n0:\n"
7851 + _ASM_EXTABLE(0b, 0b)
7852 +#endif
7853 +
7854 + "sete %1\n"
7855 : "=m" (v->counter), "=qm" (c)
7856 : "er" (i), "m" (v->counter) : "memory");
7857 return c;
7858 @@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
7859 */
7860 static inline void atomic64_inc(atomic64_t *v)
7861 {
7862 + asm volatile(LOCK_PREFIX "incq %0\n"
7863 +
7864 +#ifdef CONFIG_PAX_REFCOUNT
7865 + "jno 0f\n"
7866 + LOCK_PREFIX "decq %0\n"
7867 + "int $4\n0:\n"
7868 + _ASM_EXTABLE(0b, 0b)
7869 +#endif
7870 +
7871 + : "=m" (v->counter)
7872 + : "m" (v->counter));
7873 +}
7874 +
7875 +/**
7876 + * atomic64_inc_unchecked - increment atomic64 variable
7877 + * @v: pointer to type atomic64_unchecked_t
7878 + *
7879 + * Atomically increments @v by 1.
7880 + */
7881 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
7882 +{
7883 asm volatile(LOCK_PREFIX "incq %0"
7884 : "=m" (v->counter)
7885 : "m" (v->counter));
7886 @@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64_t *v)
7887 */
7888 static inline void atomic64_dec(atomic64_t *v)
7889 {
7890 - asm volatile(LOCK_PREFIX "decq %0"
7891 + asm volatile(LOCK_PREFIX "decq %0\n"
7892 +
7893 +#ifdef CONFIG_PAX_REFCOUNT
7894 + "jno 0f\n"
7895 + LOCK_PREFIX "incq %0\n"
7896 + "int $4\n0:\n"
7897 + _ASM_EXTABLE(0b, 0b)
7898 +#endif
7899 +
7900 + : "=m" (v->counter)
7901 + : "m" (v->counter));
7902 +}
7903 +
7904 +/**
7905 + * atomic64_dec_unchecked - decrement atomic64 variable
7906 + * @v: pointer to type atomic64_t
7907 + *
7908 + * Atomically decrements @v by 1.
7909 + */
7910 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
7911 +{
7912 + asm volatile(LOCK_PREFIX "decq %0\n"
7913 : "=m" (v->counter)
7914 : "m" (v->counter));
7915 }
7916 @@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
7917 {
7918 unsigned char c;
7919
7920 - asm volatile(LOCK_PREFIX "decq %0; sete %1"
7921 + asm volatile(LOCK_PREFIX "decq %0\n"
7922 +
7923 +#ifdef CONFIG_PAX_REFCOUNT
7924 + "jno 0f\n"
7925 + LOCK_PREFIX "incq %0\n"
7926 + "int $4\n0:\n"
7927 + _ASM_EXTABLE(0b, 0b)
7928 +#endif
7929 +
7930 + "sete %1\n"
7931 : "=m" (v->counter), "=qm" (c)
7932 : "m" (v->counter) : "memory");
7933 return c != 0;
7934 @@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
7935 {
7936 unsigned char c;
7937
7938 - asm volatile(LOCK_PREFIX "incq %0; sete %1"
7939 + asm volatile(LOCK_PREFIX "incq %0\n"
7940 +
7941 +#ifdef CONFIG_PAX_REFCOUNT
7942 + "jno 0f\n"
7943 + LOCK_PREFIX "decq %0\n"
7944 + "int $4\n0:\n"
7945 + _ASM_EXTABLE(0b, 0b)
7946 +#endif
7947 +
7948 + "sete %1\n"
7949 : "=m" (v->counter), "=qm" (c)
7950 : "m" (v->counter) : "memory");
7951 return c != 0;
7952 @@ -155,7 +292,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
7953 {
7954 unsigned char c;
7955
7956 - asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
7957 + asm volatile(LOCK_PREFIX "addq %2,%0\n"
7958 +
7959 +#ifdef CONFIG_PAX_REFCOUNT
7960 + "jno 0f\n"
7961 + LOCK_PREFIX "subq %2,%0\n"
7962 + "int $4\n0:\n"
7963 + _ASM_EXTABLE(0b, 0b)
7964 +#endif
7965 +
7966 + "sets %1\n"
7967 : "=m" (v->counter), "=qm" (c)
7968 : "er" (i), "m" (v->counter) : "memory");
7969 return c;
7970 @@ -170,6 +316,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
7971 */
7972 static inline long atomic64_add_return(long i, atomic64_t *v)
7973 {
7974 + return i + xadd_check_overflow(&v->counter, i);
7975 +}
7976 +
7977 +/**
7978 + * atomic64_add_return_unchecked - add and return
7979 + * @i: integer value to add
7980 + * @v: pointer to type atomic64_unchecked_t
7981 + *
7982 + * Atomically adds @i to @v and returns @i + @v
7983 + */
7984 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
7985 +{
7986 return i + xadd(&v->counter, i);
7987 }
7988
7989 @@ -179,6 +337,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
7990 }
7991
7992 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
7993 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
7994 +{
7995 + return atomic64_add_return_unchecked(1, v);
7996 +}
7997 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
7998
7999 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
8000 @@ -186,6 +348,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
8001 return cmpxchg(&v->counter, old, new);
8002 }
8003
8004 +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
8005 +{
8006 + return cmpxchg(&v->counter, old, new);
8007 +}
8008 +
8009 static inline long atomic64_xchg(atomic64_t *v, long new)
8010 {
8011 return xchg(&v->counter, new);
8012 @@ -202,17 +369,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
8013 */
8014 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
8015 {
8016 - long c, old;
8017 + long c, old, new;
8018 c = atomic64_read(v);
8019 for (;;) {
8020 - if (unlikely(c == (u)))
8021 + if (unlikely(c == u))
8022 break;
8023 - old = atomic64_cmpxchg((v), c, c + (a));
8024 +
8025 + asm volatile("add %2,%0\n"
8026 +
8027 +#ifdef CONFIG_PAX_REFCOUNT
8028 + "jno 0f\n"
8029 + "sub %2,%0\n"
8030 + "int $4\n0:\n"
8031 + _ASM_EXTABLE(0b, 0b)
8032 +#endif
8033 +
8034 + : "=r" (new)
8035 + : "0" (c), "ir" (a));
8036 +
8037 + old = atomic64_cmpxchg(v, c, new);
8038 if (likely(old == c))
8039 break;
8040 c = old;
8041 }
8042 - return c != (u);
8043 + return c != u;
8044 }
8045
8046 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
8047 diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
8048 index 1775d6e..b65017f 100644
8049 --- a/arch/x86/include/asm/bitops.h
8050 +++ b/arch/x86/include/asm/bitops.h
8051 @@ -38,7 +38,7 @@
8052 * a mask operation on a byte.
8053 */
8054 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
8055 -#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
8056 +#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
8057 #define CONST_MASK(nr) (1 << ((nr) & 7))
8058
8059 /**
8060 diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
8061 index 5e1a2ee..c9f9533 100644
8062 --- a/arch/x86/include/asm/boot.h
8063 +++ b/arch/x86/include/asm/boot.h
8064 @@ -11,10 +11,15 @@
8065 #include <asm/pgtable_types.h>
8066
8067 /* Physical address where kernel should be loaded. */
8068 -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
8069 +#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
8070 + (CONFIG_PHYSICAL_ALIGN - 1)) \
8071 & ~(CONFIG_PHYSICAL_ALIGN - 1))
8072
8073 +#ifndef __ASSEMBLY__
8074 +extern unsigned char __LOAD_PHYSICAL_ADDR[];
8075 +#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
8076 +#endif
8077 +
8078 /* Minimum kernel alignment, as a power of two */
8079 #ifdef CONFIG_X86_64
8080 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
8081 diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
8082 index 48f99f1..d78ebf9 100644
8083 --- a/arch/x86/include/asm/cache.h
8084 +++ b/arch/x86/include/asm/cache.h
8085 @@ -5,12 +5,13 @@
8086
8087 /* L1 cache line size */
8088 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
8089 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8090 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8091
8092 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
8093 +#define __read_only __attribute__((__section__(".data..read_only")))
8094
8095 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
8096 -#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
8097 +#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
8098
8099 #ifdef CONFIG_X86_VSMP
8100 #ifdef CONFIG_SMP
8101 diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
8102 index 4e12668..501d239 100644
8103 --- a/arch/x86/include/asm/cacheflush.h
8104 +++ b/arch/x86/include/asm/cacheflush.h
8105 @@ -26,7 +26,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
8106 unsigned long pg_flags = pg->flags & _PGMT_MASK;
8107
8108 if (pg_flags == _PGMT_DEFAULT)
8109 - return -1;
8110 + return ~0UL;
8111 else if (pg_flags == _PGMT_WC)
8112 return _PAGE_CACHE_WC;
8113 else if (pg_flags == _PGMT_UC_MINUS)
8114 diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
8115 index 46fc474..b02b0f9 100644
8116 --- a/arch/x86/include/asm/checksum_32.h
8117 +++ b/arch/x86/include/asm/checksum_32.h
8118 @@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
8119 int len, __wsum sum,
8120 int *src_err_ptr, int *dst_err_ptr);
8121
8122 +asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
8123 + int len, __wsum sum,
8124 + int *src_err_ptr, int *dst_err_ptr);
8125 +
8126 +asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
8127 + int len, __wsum sum,
8128 + int *src_err_ptr, int *dst_err_ptr);
8129 +
8130 /*
8131 * Note: when you get a NULL pointer exception here this means someone
8132 * passed in an incorrect kernel address to one of these functions.
8133 @@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
8134 int *err_ptr)
8135 {
8136 might_sleep();
8137 - return csum_partial_copy_generic((__force void *)src, dst,
8138 + return csum_partial_copy_generic_from_user((__force void *)src, dst,
8139 len, sum, err_ptr, NULL);
8140 }
8141
8142 @@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
8143 {
8144 might_sleep();
8145 if (access_ok(VERIFY_WRITE, dst, len))
8146 - return csum_partial_copy_generic(src, (__force void *)dst,
8147 + return csum_partial_copy_generic_to_user(src, (__force void *)dst,
8148 len, sum, NULL, err_ptr);
8149
8150 if (len)
8151 diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
8152 index 5d3acdf..6447a02 100644
8153 --- a/arch/x86/include/asm/cmpxchg.h
8154 +++ b/arch/x86/include/asm/cmpxchg.h
8155 @@ -14,6 +14,8 @@ extern void __cmpxchg_wrong_size(void)
8156 __compiletime_error("Bad argument size for cmpxchg");
8157 extern void __xadd_wrong_size(void)
8158 __compiletime_error("Bad argument size for xadd");
8159 +extern void __xadd_check_overflow_wrong_size(void)
8160 + __compiletime_error("Bad argument size for xadd_check_overflow");
8161
8162 /*
8163 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
8164 @@ -195,6 +197,34 @@ extern void __xadd_wrong_size(void)
8165 __ret; \
8166 })
8167
8168 +#define __xadd_check_overflow(ptr, inc, lock) \
8169 + ({ \
8170 + __typeof__ (*(ptr)) __ret = (inc); \
8171 + switch (sizeof(*(ptr))) { \
8172 + case __X86_CASE_L: \
8173 + asm volatile (lock "xaddl %0, %1\n" \
8174 + "jno 0f\n" \
8175 + "mov %0,%1\n" \
8176 + "int $4\n0:\n" \
8177 + _ASM_EXTABLE(0b, 0b) \
8178 + : "+r" (__ret), "+m" (*(ptr)) \
8179 + : : "memory", "cc"); \
8180 + break; \
8181 + case __X86_CASE_Q: \
8182 + asm volatile (lock "xaddq %q0, %1\n" \
8183 + "jno 0f\n" \
8184 + "mov %0,%1\n" \
8185 + "int $4\n0:\n" \
8186 + _ASM_EXTABLE(0b, 0b) \
8187 + : "+r" (__ret), "+m" (*(ptr)) \
8188 + : : "memory", "cc"); \
8189 + break; \
8190 + default: \
8191 + __xadd_check_overflow_wrong_size(); \
8192 + } \
8193 + __ret; \
8194 + })
8195 +
8196 /*
8197 * xadd() adds "inc" to "*ptr" and atomically returns the previous
8198 * value of "*ptr".
8199 @@ -207,4 +237,6 @@ extern void __xadd_wrong_size(void)
8200 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
8201 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
8202
8203 +#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
8204 +
8205 #endif /* ASM_X86_CMPXCHG_H */
8206 diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
8207 index f3444f7..051a196 100644
8208 --- a/arch/x86/include/asm/cpufeature.h
8209 +++ b/arch/x86/include/asm/cpufeature.h
8210 @@ -363,7 +363,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
8211 ".section .discard,\"aw\",@progbits\n"
8212 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
8213 ".previous\n"
8214 - ".section .altinstr_replacement,\"ax\"\n"
8215 + ".section .altinstr_replacement,\"a\"\n"
8216 "3: movb $1,%0\n"
8217 "4:\n"
8218 ".previous\n"
8219 diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
8220 index 41935fa..3b40db8 100644
8221 --- a/arch/x86/include/asm/desc.h
8222 +++ b/arch/x86/include/asm/desc.h
8223 @@ -4,6 +4,7 @@
8224 #include <asm/desc_defs.h>
8225 #include <asm/ldt.h>
8226 #include <asm/mmu.h>
8227 +#include <asm/pgtable.h>
8228
8229 #include <linux/smp.h>
8230
8231 @@ -16,6 +17,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
8232
8233 desc->type = (info->read_exec_only ^ 1) << 1;
8234 desc->type |= info->contents << 2;
8235 + desc->type |= info->seg_not_present ^ 1;
8236
8237 desc->s = 1;
8238 desc->dpl = 0x3;
8239 @@ -34,17 +36,12 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
8240 }
8241
8242 extern struct desc_ptr idt_descr;
8243 -extern gate_desc idt_table[];
8244 -
8245 -struct gdt_page {
8246 - struct desc_struct gdt[GDT_ENTRIES];
8247 -} __attribute__((aligned(PAGE_SIZE)));
8248 -
8249 -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
8250 +extern gate_desc idt_table[256];
8251
8252 +extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
8253 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
8254 {
8255 - return per_cpu(gdt_page, cpu).gdt;
8256 + return cpu_gdt_table[cpu];
8257 }
8258
8259 #ifdef CONFIG_X86_64
8260 @@ -69,8 +66,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
8261 unsigned long base, unsigned dpl, unsigned flags,
8262 unsigned short seg)
8263 {
8264 - gate->a = (seg << 16) | (base & 0xffff);
8265 - gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
8266 + gate->gate.offset_low = base;
8267 + gate->gate.seg = seg;
8268 + gate->gate.reserved = 0;
8269 + gate->gate.type = type;
8270 + gate->gate.s = 0;
8271 + gate->gate.dpl = dpl;
8272 + gate->gate.p = 1;
8273 + gate->gate.offset_high = base >> 16;
8274 }
8275
8276 #endif
8277 @@ -115,12 +118,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
8278
8279 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
8280 {
8281 + pax_open_kernel();
8282 memcpy(&idt[entry], gate, sizeof(*gate));
8283 + pax_close_kernel();
8284 }
8285
8286 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
8287 {
8288 + pax_open_kernel();
8289 memcpy(&ldt[entry], desc, 8);
8290 + pax_close_kernel();
8291 }
8292
8293 static inline void
8294 @@ -134,7 +141,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
8295 default: size = sizeof(*gdt); break;
8296 }
8297
8298 + pax_open_kernel();
8299 memcpy(&gdt[entry], desc, size);
8300 + pax_close_kernel();
8301 }
8302
8303 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
8304 @@ -207,7 +216,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
8305
8306 static inline void native_load_tr_desc(void)
8307 {
8308 + pax_open_kernel();
8309 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
8310 + pax_close_kernel();
8311 }
8312
8313 static inline void native_load_gdt(const struct desc_ptr *dtr)
8314 @@ -244,8 +255,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
8315 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
8316 unsigned int i;
8317
8318 + pax_open_kernel();
8319 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
8320 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
8321 + pax_close_kernel();
8322 }
8323
8324 #define _LDT_empty(info) \
8325 @@ -307,7 +320,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
8326 desc->limit = (limit >> 16) & 0xf;
8327 }
8328
8329 -static inline void _set_gate(int gate, unsigned type, void *addr,
8330 +static inline void _set_gate(int gate, unsigned type, const void *addr,
8331 unsigned dpl, unsigned ist, unsigned seg)
8332 {
8333 gate_desc s;
8334 @@ -326,7 +339,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
8335 * Pentium F0 0F bugfix can have resulted in the mapped
8336 * IDT being write-protected.
8337 */
8338 -static inline void set_intr_gate(unsigned int n, void *addr)
8339 +static inline void set_intr_gate(unsigned int n, const void *addr)
8340 {
8341 BUG_ON((unsigned)n > 0xFF);
8342 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
8343 @@ -356,19 +369,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
8344 /*
8345 * This routine sets up an interrupt gate at directory privilege level 3.
8346 */
8347 -static inline void set_system_intr_gate(unsigned int n, void *addr)
8348 +static inline void set_system_intr_gate(unsigned int n, const void *addr)
8349 {
8350 BUG_ON((unsigned)n > 0xFF);
8351 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
8352 }
8353
8354 -static inline void set_system_trap_gate(unsigned int n, void *addr)
8355 +static inline void set_system_trap_gate(unsigned int n, const void *addr)
8356 {
8357 BUG_ON((unsigned)n > 0xFF);
8358 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
8359 }
8360
8361 -static inline void set_trap_gate(unsigned int n, void *addr)
8362 +static inline void set_trap_gate(unsigned int n, const void *addr)
8363 {
8364 BUG_ON((unsigned)n > 0xFF);
8365 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
8366 @@ -377,19 +390,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
8367 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
8368 {
8369 BUG_ON((unsigned)n > 0xFF);
8370 - _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
8371 + _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
8372 }
8373
8374 -static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
8375 +static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
8376 {
8377 BUG_ON((unsigned)n > 0xFF);
8378 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
8379 }
8380
8381 -static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
8382 +static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
8383 {
8384 BUG_ON((unsigned)n > 0xFF);
8385 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
8386 }
8387
8388 +#ifdef CONFIG_X86_32
8389 +static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
8390 +{
8391 + struct desc_struct d;
8392 +
8393 + if (likely(limit))
8394 + limit = (limit - 1UL) >> PAGE_SHIFT;
8395 + pack_descriptor(&d, base, limit, 0xFB, 0xC);
8396 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
8397 +}
8398 +#endif
8399 +
8400 #endif /* _ASM_X86_DESC_H */
8401 diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
8402 index 278441f..b95a174 100644
8403 --- a/arch/x86/include/asm/desc_defs.h
8404 +++ b/arch/x86/include/asm/desc_defs.h
8405 @@ -31,6 +31,12 @@ struct desc_struct {
8406 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
8407 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
8408 };
8409 + struct {
8410 + u16 offset_low;
8411 + u16 seg;
8412 + unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
8413 + unsigned offset_high: 16;
8414 + } gate;
8415 };
8416 } __attribute__((packed));
8417
8418 diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
8419 index 908b969..a1f4eb4 100644
8420 --- a/arch/x86/include/asm/e820.h
8421 +++ b/arch/x86/include/asm/e820.h
8422 @@ -69,7 +69,7 @@ struct e820map {
8423 #define ISA_START_ADDRESS 0xa0000
8424 #define ISA_END_ADDRESS 0x100000
8425
8426 -#define BIOS_BEGIN 0x000a0000
8427 +#define BIOS_BEGIN 0x000c0000
8428 #define BIOS_END 0x00100000
8429
8430 #define BIOS_ROM_BASE 0xffe00000
8431 diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
8432 index 5f962df..7289f09 100644
8433 --- a/arch/x86/include/asm/elf.h
8434 +++ b/arch/x86/include/asm/elf.h
8435 @@ -238,7 +238,25 @@ extern int force_personality32;
8436 the loader. We need to make sure that it is out of the way of the program
8437 that it will "exec", and that there is sufficient room for the brk. */
8438
8439 +#ifdef CONFIG_PAX_SEGMEXEC
8440 +#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
8441 +#else
8442 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
8443 +#endif
8444 +
8445 +#ifdef CONFIG_PAX_ASLR
8446 +#ifdef CONFIG_X86_32
8447 +#define PAX_ELF_ET_DYN_BASE 0x10000000UL
8448 +
8449 +#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8450 +#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8451 +#else
8452 +#define PAX_ELF_ET_DYN_BASE 0x400000UL
8453 +
8454 +#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8455 +#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8456 +#endif
8457 +#endif
8458
8459 /* This yields a mask that user programs can use to figure out what
8460 instruction set this CPU supports. This could be done in user space,
8461 @@ -291,9 +309,7 @@ do { \
8462
8463 #define ARCH_DLINFO \
8464 do { \
8465 - if (vdso_enabled) \
8466 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
8467 - (unsigned long)current->mm->context.vdso); \
8468 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
8469 } while (0)
8470
8471 #define AT_SYSINFO 32
8472 @@ -304,7 +320,7 @@ do { \
8473
8474 #endif /* !CONFIG_X86_32 */
8475
8476 -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
8477 +#define VDSO_CURRENT_BASE (current->mm->context.vdso)
8478
8479 #define VDSO_ENTRY \
8480 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
8481 @@ -318,9 +334,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
8482 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
8483 #define compat_arch_setup_additional_pages syscall32_setup_pages
8484
8485 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
8486 -#define arch_randomize_brk arch_randomize_brk
8487 -
8488 /*
8489 * True on X86_32 or when emulating IA32 on X86_64
8490 */
8491 diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
8492 index cc70c1c..d96d011 100644
8493 --- a/arch/x86/include/asm/emergency-restart.h
8494 +++ b/arch/x86/include/asm/emergency-restart.h
8495 @@ -15,6 +15,6 @@ enum reboot_type {
8496
8497 extern enum reboot_type reboot_type;
8498
8499 -extern void machine_emergency_restart(void);
8500 +extern void machine_emergency_restart(void) __noreturn;
8501
8502 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
8503 diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
8504 index d09bb03..4ea4194 100644
8505 --- a/arch/x86/include/asm/futex.h
8506 +++ b/arch/x86/include/asm/futex.h
8507 @@ -12,16 +12,18 @@
8508 #include <asm/system.h>
8509
8510 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
8511 + typecheck(u32 __user *, uaddr); \
8512 asm volatile("1:\t" insn "\n" \
8513 "2:\t.section .fixup,\"ax\"\n" \
8514 "3:\tmov\t%3, %1\n" \
8515 "\tjmp\t2b\n" \
8516 "\t.previous\n" \
8517 _ASM_EXTABLE(1b, 3b) \
8518 - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
8519 + : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
8520 : "i" (-EFAULT), "0" (oparg), "1" (0))
8521
8522 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
8523 + typecheck(u32 __user *, uaddr); \
8524 asm volatile("1:\tmovl %2, %0\n" \
8525 "\tmovl\t%0, %3\n" \
8526 "\t" insn "\n" \
8527 @@ -34,7 +36,7 @@
8528 _ASM_EXTABLE(1b, 4b) \
8529 _ASM_EXTABLE(2b, 4b) \
8530 : "=&a" (oldval), "=&r" (ret), \
8531 - "+m" (*uaddr), "=&r" (tem) \
8532 + "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
8533 : "r" (oparg), "i" (-EFAULT), "1" (0))
8534
8535 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
8536 @@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
8537
8538 switch (op) {
8539 case FUTEX_OP_SET:
8540 - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
8541 + __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
8542 break;
8543 case FUTEX_OP_ADD:
8544 - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
8545 + __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
8546 uaddr, oparg);
8547 break;
8548 case FUTEX_OP_OR:
8549 @@ -123,13 +125,13 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
8550 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
8551 return -EFAULT;
8552
8553 - asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
8554 + asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
8555 "2:\t.section .fixup, \"ax\"\n"
8556 "3:\tmov %3, %0\n"
8557 "\tjmp 2b\n"
8558 "\t.previous\n"
8559 _ASM_EXTABLE(1b, 3b)
8560 - : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
8561 + : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
8562 : "i" (-EFAULT), "r" (newval), "1" (oldval)
8563 : "memory"
8564 );
8565 diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
8566 index eb92a6e..b98b2f4 100644
8567 --- a/arch/x86/include/asm/hw_irq.h
8568 +++ b/arch/x86/include/asm/hw_irq.h
8569 @@ -136,8 +136,8 @@ extern void setup_ioapic_dest(void);
8570 extern void enable_IO_APIC(void);
8571
8572 /* Statistics */
8573 -extern atomic_t irq_err_count;
8574 -extern atomic_t irq_mis_count;
8575 +extern atomic_unchecked_t irq_err_count;
8576 +extern atomic_unchecked_t irq_mis_count;
8577
8578 /* EISA */
8579 extern void eisa_set_level_irq(unsigned int irq);
8580 diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
8581 index c9e09ea..73888df 100644
8582 --- a/arch/x86/include/asm/i387.h
8583 +++ b/arch/x86/include/asm/i387.h
8584 @@ -92,6 +92,11 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
8585 {
8586 int err;
8587
8588 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8589 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8590 + fx = (struct i387_fxsave_struct __user *)((void *)fx + PAX_USER_SHADOW_BASE);
8591 +#endif
8592 +
8593 /* See comment in fxsave() below. */
8594 #ifdef CONFIG_AS_FXSAVEQ
8595 asm volatile("1: fxrstorq %[fx]\n\t"
8596 @@ -121,6 +126,11 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
8597 {
8598 int err;
8599
8600 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8601 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8602 + fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
8603 +#endif
8604 +
8605 /*
8606 * Clear the bytes not touched by the fxsave and reserved
8607 * for the SW usage.
8608 @@ -213,13 +223,8 @@ static inline void fpu_fxsave(struct fpu *fpu)
8609 #endif /* CONFIG_X86_64 */
8610
8611 /* We need a safe address that is cheap to find and that is already
8612 - in L1 during context switch. The best choices are unfortunately
8613 - different for UP and SMP */
8614 -#ifdef CONFIG_SMP
8615 -#define safe_address (__per_cpu_offset[0])
8616 -#else
8617 -#define safe_address (kstat_cpu(0).cpustat.user)
8618 -#endif
8619 + in L1 during context switch. */
8620 +#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
8621
8622 /*
8623 * These must be called with preempt disabled
8624 @@ -312,7 +317,7 @@ static inline void kernel_fpu_begin(void)
8625 struct thread_info *me = current_thread_info();
8626 preempt_disable();
8627 if (me->status & TS_USEDFPU)
8628 - __save_init_fpu(me->task);
8629 + __save_init_fpu(current);
8630 else
8631 clts();
8632 }
8633 diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
8634 index d8e8eef..99f81ae 100644
8635 --- a/arch/x86/include/asm/io.h
8636 +++ b/arch/x86/include/asm/io.h
8637 @@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
8638
8639 #include <linux/vmalloc.h>
8640
8641 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
8642 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
8643 +{
8644 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8645 +}
8646 +
8647 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
8648 +{
8649 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8650 +}
8651 +
8652 /*
8653 * Convert a virtual cached pointer to an uncached pointer
8654 */
8655 diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
8656 index bba3cf8..06bc8da 100644
8657 --- a/arch/x86/include/asm/irqflags.h
8658 +++ b/arch/x86/include/asm/irqflags.h
8659 @@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
8660 sti; \
8661 sysexit
8662
8663 +#define GET_CR0_INTO_RDI mov %cr0, %rdi
8664 +#define SET_RDI_INTO_CR0 mov %rdi, %cr0
8665 +#define GET_CR3_INTO_RDI mov %cr3, %rdi
8666 +#define SET_RDI_INTO_CR3 mov %rdi, %cr3
8667 +
8668 #else
8669 #define INTERRUPT_RETURN iret
8670 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
8671 diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
8672 index 5478825..839e88c 100644
8673 --- a/arch/x86/include/asm/kprobes.h
8674 +++ b/arch/x86/include/asm/kprobes.h
8675 @@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
8676 #define RELATIVEJUMP_SIZE 5
8677 #define RELATIVECALL_OPCODE 0xe8
8678 #define RELATIVE_ADDR_SIZE 4
8679 -#define MAX_STACK_SIZE 64
8680 -#define MIN_STACK_SIZE(ADDR) \
8681 - (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
8682 - THREAD_SIZE - (unsigned long)(ADDR))) \
8683 - ? (MAX_STACK_SIZE) \
8684 - : (((unsigned long)current_thread_info()) + \
8685 - THREAD_SIZE - (unsigned long)(ADDR)))
8686 +#define MAX_STACK_SIZE 64UL
8687 +#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
8688
8689 #define flush_insn_slot(p) do { } while (0)
8690
8691 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
8692 index b4973f4..7c4d3fc 100644
8693 --- a/arch/x86/include/asm/kvm_host.h
8694 +++ b/arch/x86/include/asm/kvm_host.h
8695 @@ -459,7 +459,7 @@ struct kvm_arch {
8696 unsigned int n_requested_mmu_pages;
8697 unsigned int n_max_mmu_pages;
8698 unsigned int indirect_shadow_pages;
8699 - atomic_t invlpg_counter;
8700 + atomic_unchecked_t invlpg_counter;
8701 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
8702 /*
8703 * Hash table of struct kvm_mmu_page.
8704 @@ -638,7 +638,7 @@ struct kvm_x86_ops {
8705 int (*check_intercept)(struct kvm_vcpu *vcpu,
8706 struct x86_instruction_info *info,
8707 enum x86_intercept_stage stage);
8708 -};
8709 +} __do_const;
8710
8711 struct kvm_arch_async_pf {
8712 u32 token;
8713 diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
8714 index 9cdae5d..300d20f 100644
8715 --- a/arch/x86/include/asm/local.h
8716 +++ b/arch/x86/include/asm/local.h
8717 @@ -18,26 +18,58 @@ typedef struct {
8718
8719 static inline void local_inc(local_t *l)
8720 {
8721 - asm volatile(_ASM_INC "%0"
8722 + asm volatile(_ASM_INC "%0\n"
8723 +
8724 +#ifdef CONFIG_PAX_REFCOUNT
8725 + "jno 0f\n"
8726 + _ASM_DEC "%0\n"
8727 + "int $4\n0:\n"
8728 + _ASM_EXTABLE(0b, 0b)
8729 +#endif
8730 +
8731 : "+m" (l->a.counter));
8732 }
8733
8734 static inline void local_dec(local_t *l)
8735 {
8736 - asm volatile(_ASM_DEC "%0"
8737 + asm volatile(_ASM_DEC "%0\n"
8738 +
8739 +#ifdef CONFIG_PAX_REFCOUNT
8740 + "jno 0f\n"
8741 + _ASM_INC "%0\n"
8742 + "int $4\n0:\n"
8743 + _ASM_EXTABLE(0b, 0b)
8744 +#endif
8745 +
8746 : "+m" (l->a.counter));
8747 }
8748
8749 static inline void local_add(long i, local_t *l)
8750 {
8751 - asm volatile(_ASM_ADD "%1,%0"
8752 + asm volatile(_ASM_ADD "%1,%0\n"
8753 +
8754 +#ifdef CONFIG_PAX_REFCOUNT
8755 + "jno 0f\n"
8756 + _ASM_SUB "%1,%0\n"
8757 + "int $4\n0:\n"
8758 + _ASM_EXTABLE(0b, 0b)
8759 +#endif
8760 +
8761 : "+m" (l->a.counter)
8762 : "ir" (i));
8763 }
8764
8765 static inline void local_sub(long i, local_t *l)
8766 {
8767 - asm volatile(_ASM_SUB "%1,%0"
8768 + asm volatile(_ASM_SUB "%1,%0\n"
8769 +
8770 +#ifdef CONFIG_PAX_REFCOUNT
8771 + "jno 0f\n"
8772 + _ASM_ADD "%1,%0\n"
8773 + "int $4\n0:\n"
8774 + _ASM_EXTABLE(0b, 0b)
8775 +#endif
8776 +
8777 : "+m" (l->a.counter)
8778 : "ir" (i));
8779 }
8780 @@ -55,7 +87,16 @@ static inline int local_sub_and_test(long i, local_t *l)
8781 {
8782 unsigned char c;
8783
8784 - asm volatile(_ASM_SUB "%2,%0; sete %1"
8785 + asm volatile(_ASM_SUB "%2,%0\n"
8786 +
8787 +#ifdef CONFIG_PAX_REFCOUNT
8788 + "jno 0f\n"
8789 + _ASM_ADD "%2,%0\n"
8790 + "int $4\n0:\n"
8791 + _ASM_EXTABLE(0b, 0b)
8792 +#endif
8793 +
8794 + "sete %1\n"
8795 : "+m" (l->a.counter), "=qm" (c)
8796 : "ir" (i) : "memory");
8797 return c;
8798 @@ -73,7 +114,16 @@ static inline int local_dec_and_test(local_t *l)
8799 {
8800 unsigned char c;
8801
8802 - asm volatile(_ASM_DEC "%0; sete %1"
8803 + asm volatile(_ASM_DEC "%0\n"
8804 +
8805 +#ifdef CONFIG_PAX_REFCOUNT
8806 + "jno 0f\n"
8807 + _ASM_INC "%0\n"
8808 + "int $4\n0:\n"
8809 + _ASM_EXTABLE(0b, 0b)
8810 +#endif
8811 +
8812 + "sete %1\n"
8813 : "+m" (l->a.counter), "=qm" (c)
8814 : : "memory");
8815 return c != 0;
8816 @@ -91,7 +141,16 @@ static inline int local_inc_and_test(local_t *l)
8817 {
8818 unsigned char c;
8819
8820 - asm volatile(_ASM_INC "%0; sete %1"
8821 + asm volatile(_ASM_INC "%0\n"
8822 +
8823 +#ifdef CONFIG_PAX_REFCOUNT
8824 + "jno 0f\n"
8825 + _ASM_DEC "%0\n"
8826 + "int $4\n0:\n"
8827 + _ASM_EXTABLE(0b, 0b)
8828 +#endif
8829 +
8830 + "sete %1\n"
8831 : "+m" (l->a.counter), "=qm" (c)
8832 : : "memory");
8833 return c != 0;
8834 @@ -110,7 +169,16 @@ static inline int local_add_negative(long i, local_t *l)
8835 {
8836 unsigned char c;
8837
8838 - asm volatile(_ASM_ADD "%2,%0; sets %1"
8839 + asm volatile(_ASM_ADD "%2,%0\n"
8840 +
8841 +#ifdef CONFIG_PAX_REFCOUNT
8842 + "jno 0f\n"
8843 + _ASM_SUB "%2,%0\n"
8844 + "int $4\n0:\n"
8845 + _ASM_EXTABLE(0b, 0b)
8846 +#endif
8847 +
8848 + "sets %1\n"
8849 : "+m" (l->a.counter), "=qm" (c)
8850 : "ir" (i) : "memory");
8851 return c;
8852 @@ -133,7 +201,15 @@ static inline long local_add_return(long i, local_t *l)
8853 #endif
8854 /* Modern 486+ processor */
8855 __i = i;
8856 - asm volatile(_ASM_XADD "%0, %1;"
8857 + asm volatile(_ASM_XADD "%0, %1\n"
8858 +
8859 +#ifdef CONFIG_PAX_REFCOUNT
8860 + "jno 0f\n"
8861 + _ASM_MOV "%0,%1\n"
8862 + "int $4\n0:\n"
8863 + _ASM_EXTABLE(0b, 0b)
8864 +#endif
8865 +
8866 : "+r" (i), "+m" (l->a.counter)
8867 : : "memory");
8868 return i + __i;
8869 diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
8870 index 593e51d..fa69c9a 100644
8871 --- a/arch/x86/include/asm/mman.h
8872 +++ b/arch/x86/include/asm/mman.h
8873 @@ -5,4 +5,14 @@
8874
8875 #include <asm-generic/mman.h>
8876
8877 +#ifdef __KERNEL__
8878 +#ifndef __ASSEMBLY__
8879 +#ifdef CONFIG_X86_32
8880 +#define arch_mmap_check i386_mmap_check
8881 +int i386_mmap_check(unsigned long addr, unsigned long len,
8882 + unsigned long flags);
8883 +#endif
8884 +#endif
8885 +#endif
8886 +
8887 #endif /* _ASM_X86_MMAN_H */
8888 diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
8889 index 5f55e69..e20bfb1 100644
8890 --- a/arch/x86/include/asm/mmu.h
8891 +++ b/arch/x86/include/asm/mmu.h
8892 @@ -9,7 +9,7 @@
8893 * we put the segment information here.
8894 */
8895 typedef struct {
8896 - void *ldt;
8897 + struct desc_struct *ldt;
8898 int size;
8899
8900 #ifdef CONFIG_X86_64
8901 @@ -18,7 +18,19 @@ typedef struct {
8902 #endif
8903
8904 struct mutex lock;
8905 - void *vdso;
8906 + unsigned long vdso;
8907 +
8908 +#ifdef CONFIG_X86_32
8909 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
8910 + unsigned long user_cs_base;
8911 + unsigned long user_cs_limit;
8912 +
8913 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
8914 + cpumask_t cpu_user_cs_mask;
8915 +#endif
8916 +
8917 +#endif
8918 +#endif
8919 } mm_context_t;
8920
8921 #ifdef CONFIG_SMP
8922 diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
8923 index 6902152..399f3a2 100644
8924 --- a/arch/x86/include/asm/mmu_context.h
8925 +++ b/arch/x86/include/asm/mmu_context.h
8926 @@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
8927
8928 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
8929 {
8930 +
8931 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8932 + unsigned int i;
8933 + pgd_t *pgd;
8934 +
8935 + pax_open_kernel();
8936 + pgd = get_cpu_pgd(smp_processor_id());
8937 + for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
8938 + set_pgd_batched(pgd+i, native_make_pgd(0));
8939 + pax_close_kernel();
8940 +#endif
8941 +
8942 #ifdef CONFIG_SMP
8943 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
8944 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
8945 @@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
8946 struct task_struct *tsk)
8947 {
8948 unsigned cpu = smp_processor_id();
8949 +#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
8950 + int tlbstate = TLBSTATE_OK;
8951 +#endif
8952
8953 if (likely(prev != next)) {
8954 #ifdef CONFIG_SMP
8955 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
8956 + tlbstate = percpu_read(cpu_tlbstate.state);
8957 +#endif
8958 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
8959 percpu_write(cpu_tlbstate.active_mm, next);
8960 #endif
8961 cpumask_set_cpu(cpu, mm_cpumask(next));
8962
8963 /* Re-load page tables */
8964 +#ifdef CONFIG_PAX_PER_CPU_PGD
8965 + pax_open_kernel();
8966 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
8967 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
8968 + pax_close_kernel();
8969 + load_cr3(get_cpu_pgd(cpu));
8970 +#else
8971 load_cr3(next->pgd);
8972 +#endif
8973
8974 /* stop flush ipis for the previous mm */
8975 cpumask_clear_cpu(cpu, mm_cpumask(prev));
8976 @@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
8977 */
8978 if (unlikely(prev->context.ldt != next->context.ldt))
8979 load_LDT_nolock(&next->context);
8980 - }
8981 +
8982 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
8983 + if (!(__supported_pte_mask & _PAGE_NX)) {
8984 + smp_mb__before_clear_bit();
8985 + cpu_clear(cpu, prev->context.cpu_user_cs_mask);
8986 + smp_mb__after_clear_bit();
8987 + cpu_set(cpu, next->context.cpu_user_cs_mask);
8988 + }
8989 +#endif
8990 +
8991 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
8992 + if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
8993 + prev->context.user_cs_limit != next->context.user_cs_limit))
8994 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
8995 #ifdef CONFIG_SMP
8996 + else if (unlikely(tlbstate != TLBSTATE_OK))
8997 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
8998 +#endif
8999 +#endif
9000 +
9001 + }
9002 else {
9003 +
9004 +#ifdef CONFIG_PAX_PER_CPU_PGD
9005 + pax_open_kernel();
9006 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
9007 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
9008 + pax_close_kernel();
9009 + load_cr3(get_cpu_pgd(cpu));
9010 +#endif
9011 +
9012 +#ifdef CONFIG_SMP
9013 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
9014 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
9015
9016 @@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
9017 * tlb flush IPI delivery. We must reload CR3
9018 * to make sure to use no freed page tables.
9019 */
9020 +
9021 +#ifndef CONFIG_PAX_PER_CPU_PGD
9022 load_cr3(next->pgd);
9023 +#endif
9024 +
9025 load_LDT_nolock(&next->context);
9026 +
9027 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
9028 + if (!(__supported_pte_mask & _PAGE_NX))
9029 + cpu_set(cpu, next->context.cpu_user_cs_mask);
9030 +#endif
9031 +
9032 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9033 +#ifdef CONFIG_PAX_PAGEEXEC
9034 + if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
9035 +#endif
9036 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9037 +#endif
9038 +
9039 }
9040 +#endif
9041 }
9042 -#endif
9043 }
9044
9045 #define activate_mm(prev, next) \
9046 diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
9047 index 9eae775..c914fea 100644
9048 --- a/arch/x86/include/asm/module.h
9049 +++ b/arch/x86/include/asm/module.h
9050 @@ -5,6 +5,7 @@
9051
9052 #ifdef CONFIG_X86_64
9053 /* X86_64 does not define MODULE_PROC_FAMILY */
9054 +#define MODULE_PROC_FAMILY ""
9055 #elif defined CONFIG_M386
9056 #define MODULE_PROC_FAMILY "386 "
9057 #elif defined CONFIG_M486
9058 @@ -59,8 +60,20 @@
9059 #error unknown processor family
9060 #endif
9061
9062 -#ifdef CONFIG_X86_32
9063 -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
9064 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
9065 +#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
9066 +#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
9067 +#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
9068 +#else
9069 +#define MODULE_PAX_KERNEXEC ""
9070 #endif
9071
9072 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9073 +#define MODULE_PAX_UDEREF "UDEREF "
9074 +#else
9075 +#define MODULE_PAX_UDEREF ""
9076 +#endif
9077 +
9078 +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
9079 +
9080 #endif /* _ASM_X86_MODULE_H */
9081 diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
9082 index 7639dbf..e08a58c 100644
9083 --- a/arch/x86/include/asm/page_64_types.h
9084 +++ b/arch/x86/include/asm/page_64_types.h
9085 @@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
9086
9087 /* duplicated to the one in bootmem.h */
9088 extern unsigned long max_pfn;
9089 -extern unsigned long phys_base;
9090 +extern const unsigned long phys_base;
9091
9092 extern unsigned long __phys_addr(unsigned long);
9093 #define __phys_reloc_hide(x) (x)
9094 diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
9095 index a7d2db9..edb023e 100644
9096 --- a/arch/x86/include/asm/paravirt.h
9097 +++ b/arch/x86/include/asm/paravirt.h
9098 @@ -667,6 +667,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
9099 val);
9100 }
9101
9102 +static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
9103 +{
9104 + pgdval_t val = native_pgd_val(pgd);
9105 +
9106 + if (sizeof(pgdval_t) > sizeof(long))
9107 + PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
9108 + val, (u64)val >> 32);
9109 + else
9110 + PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
9111 + val);
9112 +}
9113 +
9114 static inline void pgd_clear(pgd_t *pgdp)
9115 {
9116 set_pgd(pgdp, __pgd(0));
9117 @@ -748,6 +760,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
9118 pv_mmu_ops.set_fixmap(idx, phys, flags);
9119 }
9120
9121 +#ifdef CONFIG_PAX_KERNEXEC
9122 +static inline unsigned long pax_open_kernel(void)
9123 +{
9124 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
9125 +}
9126 +
9127 +static inline unsigned long pax_close_kernel(void)
9128 +{
9129 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
9130 +}
9131 +#else
9132 +static inline unsigned long pax_open_kernel(void) { return 0; }
9133 +static inline unsigned long pax_close_kernel(void) { return 0; }
9134 +#endif
9135 +
9136 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
9137
9138 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
9139 @@ -964,7 +991,7 @@ extern void default_banner(void);
9140
9141 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
9142 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
9143 -#define PARA_INDIRECT(addr) *%cs:addr
9144 +#define PARA_INDIRECT(addr) *%ss:addr
9145 #endif
9146
9147 #define INTERRUPT_RETURN \
9148 @@ -1041,6 +1068,21 @@ extern void default_banner(void);
9149 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
9150 CLBR_NONE, \
9151 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
9152 +
9153 +#define GET_CR0_INTO_RDI \
9154 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
9155 + mov %rax,%rdi
9156 +
9157 +#define SET_RDI_INTO_CR0 \
9158 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
9159 +
9160 +#define GET_CR3_INTO_RDI \
9161 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
9162 + mov %rax,%rdi
9163 +
9164 +#define SET_RDI_INTO_CR3 \
9165 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
9166 +
9167 #endif /* CONFIG_X86_32 */
9168
9169 #endif /* __ASSEMBLY__ */
9170 diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
9171 index 8e8b9a4..f07d725 100644
9172 --- a/arch/x86/include/asm/paravirt_types.h
9173 +++ b/arch/x86/include/asm/paravirt_types.h
9174 @@ -84,20 +84,20 @@ struct pv_init_ops {
9175 */
9176 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
9177 unsigned long addr, unsigned len);
9178 -};
9179 +} __no_const;
9180
9181
9182 struct pv_lazy_ops {
9183 /* Set deferred update mode, used for batching operations. */
9184 void (*enter)(void);
9185 void (*leave)(void);
9186 -};
9187 +} __no_const;
9188
9189 struct pv_time_ops {
9190 unsigned long long (*sched_clock)(void);
9191 unsigned long long (*steal_clock)(int cpu);
9192 unsigned long (*get_tsc_khz)(void);
9193 -};
9194 +} __no_const;
9195
9196 struct pv_cpu_ops {
9197 /* hooks for various privileged instructions */
9198 @@ -193,7 +193,7 @@ struct pv_cpu_ops {
9199
9200 void (*start_context_switch)(struct task_struct *prev);
9201 void (*end_context_switch)(struct task_struct *next);
9202 -};
9203 +} __no_const;
9204
9205 struct pv_irq_ops {
9206 /*
9207 @@ -224,7 +224,7 @@ struct pv_apic_ops {
9208 unsigned long start_eip,
9209 unsigned long start_esp);
9210 #endif
9211 -};
9212 +} __no_const;
9213
9214 struct pv_mmu_ops {
9215 unsigned long (*read_cr2)(void);
9216 @@ -313,6 +313,7 @@ struct pv_mmu_ops {
9217 struct paravirt_callee_save make_pud;
9218
9219 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
9220 + void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
9221 #endif /* PAGETABLE_LEVELS == 4 */
9222 #endif /* PAGETABLE_LEVELS >= 3 */
9223
9224 @@ -324,6 +325,12 @@ struct pv_mmu_ops {
9225 an mfn. We can tell which is which from the index. */
9226 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
9227 phys_addr_t phys, pgprot_t flags);
9228 +
9229 +#ifdef CONFIG_PAX_KERNEXEC
9230 + unsigned long (*pax_open_kernel)(void);
9231 + unsigned long (*pax_close_kernel)(void);
9232 +#endif
9233 +
9234 };
9235
9236 struct arch_spinlock;
9237 @@ -334,7 +341,7 @@ struct pv_lock_ops {
9238 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
9239 int (*spin_trylock)(struct arch_spinlock *lock);
9240 void (*spin_unlock)(struct arch_spinlock *lock);
9241 -};
9242 +} __no_const;
9243
9244 /* This contains all the paravirt structures: we get a convenient
9245 * number for each function using the offset which we use to indicate
9246 diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
9247 index b4389a4..b7ff22c 100644
9248 --- a/arch/x86/include/asm/pgalloc.h
9249 +++ b/arch/x86/include/asm/pgalloc.h
9250 @@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
9251 pmd_t *pmd, pte_t *pte)
9252 {
9253 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9254 + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
9255 +}
9256 +
9257 +static inline void pmd_populate_user(struct mm_struct *mm,
9258 + pmd_t *pmd, pte_t *pte)
9259 +{
9260 + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9261 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
9262 }
9263
9264 diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
9265 index 98391db..8f6984e 100644
9266 --- a/arch/x86/include/asm/pgtable-2level.h
9267 +++ b/arch/x86/include/asm/pgtable-2level.h
9268 @@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
9269
9270 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9271 {
9272 + pax_open_kernel();
9273 *pmdp = pmd;
9274 + pax_close_kernel();
9275 }
9276
9277 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
9278 diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
9279 index effff47..f9e4035 100644
9280 --- a/arch/x86/include/asm/pgtable-3level.h
9281 +++ b/arch/x86/include/asm/pgtable-3level.h
9282 @@ -38,12 +38,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
9283
9284 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9285 {
9286 + pax_open_kernel();
9287 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
9288 + pax_close_kernel();
9289 }
9290
9291 static inline void native_set_pud(pud_t *pudp, pud_t pud)
9292 {
9293 + pax_open_kernel();
9294 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
9295 + pax_close_kernel();
9296 }
9297
9298 /*
9299 diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
9300 index 18601c8..3d716d1 100644
9301 --- a/arch/x86/include/asm/pgtable.h
9302 +++ b/arch/x86/include/asm/pgtable.h
9303 @@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
9304
9305 #ifndef __PAGETABLE_PUD_FOLDED
9306 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
9307 +#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
9308 #define pgd_clear(pgd) native_pgd_clear(pgd)
9309 #endif
9310
9311 @@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
9312
9313 #define arch_end_context_switch(prev) do {} while(0)
9314
9315 +#define pax_open_kernel() native_pax_open_kernel()
9316 +#define pax_close_kernel() native_pax_close_kernel()
9317 #endif /* CONFIG_PARAVIRT */
9318
9319 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
9320 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
9321 +
9322 +#ifdef CONFIG_PAX_KERNEXEC
9323 +static inline unsigned long native_pax_open_kernel(void)
9324 +{
9325 + unsigned long cr0;
9326 +
9327 + preempt_disable();
9328 + barrier();
9329 + cr0 = read_cr0() ^ X86_CR0_WP;
9330 + BUG_ON(unlikely(cr0 & X86_CR0_WP));
9331 + write_cr0(cr0);
9332 + return cr0 ^ X86_CR0_WP;
9333 +}
9334 +
9335 +static inline unsigned long native_pax_close_kernel(void)
9336 +{
9337 + unsigned long cr0;
9338 +
9339 + cr0 = read_cr0() ^ X86_CR0_WP;
9340 + BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
9341 + write_cr0(cr0);
9342 + barrier();
9343 + preempt_enable_no_resched();
9344 + return cr0 ^ X86_CR0_WP;
9345 +}
9346 +#else
9347 +static inline unsigned long native_pax_open_kernel(void) { return 0; }
9348 +static inline unsigned long native_pax_close_kernel(void) { return 0; }
9349 +#endif
9350 +
9351 /*
9352 * The following only work if pte_present() is true.
9353 * Undefined behaviour if not..
9354 */
9355 +static inline int pte_user(pte_t pte)
9356 +{
9357 + return pte_val(pte) & _PAGE_USER;
9358 +}
9359 +
9360 static inline int pte_dirty(pte_t pte)
9361 {
9362 return pte_flags(pte) & _PAGE_DIRTY;
9363 @@ -196,9 +236,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
9364 return pte_clear_flags(pte, _PAGE_RW);
9365 }
9366
9367 +static inline pte_t pte_mkread(pte_t pte)
9368 +{
9369 + return __pte(pte_val(pte) | _PAGE_USER);
9370 +}
9371 +
9372 static inline pte_t pte_mkexec(pte_t pte)
9373 {
9374 - return pte_clear_flags(pte, _PAGE_NX);
9375 +#ifdef CONFIG_X86_PAE
9376 + if (__supported_pte_mask & _PAGE_NX)
9377 + return pte_clear_flags(pte, _PAGE_NX);
9378 + else
9379 +#endif
9380 + return pte_set_flags(pte, _PAGE_USER);
9381 +}
9382 +
9383 +static inline pte_t pte_exprotect(pte_t pte)
9384 +{
9385 +#ifdef CONFIG_X86_PAE
9386 + if (__supported_pte_mask & _PAGE_NX)
9387 + return pte_set_flags(pte, _PAGE_NX);
9388 + else
9389 +#endif
9390 + return pte_clear_flags(pte, _PAGE_USER);
9391 }
9392
9393 static inline pte_t pte_mkdirty(pte_t pte)
9394 @@ -390,6 +450,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
9395 #endif
9396
9397 #ifndef __ASSEMBLY__
9398 +
9399 +#ifdef CONFIG_PAX_PER_CPU_PGD
9400 +extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
9401 +static inline pgd_t *get_cpu_pgd(unsigned int cpu)
9402 +{
9403 + return cpu_pgd[cpu];
9404 +}
9405 +#endif
9406 +
9407 #include <linux/mm_types.h>
9408
9409 static inline int pte_none(pte_t pte)
9410 @@ -560,7 +629,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
9411
9412 static inline int pgd_bad(pgd_t pgd)
9413 {
9414 - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
9415 + return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
9416 }
9417
9418 static inline int pgd_none(pgd_t pgd)
9419 @@ -583,7 +652,12 @@ static inline int pgd_none(pgd_t pgd)
9420 * pgd_offset() returns a (pgd_t *)
9421 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
9422 */
9423 -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
9424 +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
9425 +
9426 +#ifdef CONFIG_PAX_PER_CPU_PGD
9427 +#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
9428 +#endif
9429 +
9430 /*
9431 * a shortcut which implies the use of the kernel's pgd, instead
9432 * of a process's
9433 @@ -594,6 +668,20 @@ static inline int pgd_none(pgd_t pgd)
9434 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
9435 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
9436
9437 +#ifdef CONFIG_X86_32
9438 +#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
9439 +#else
9440 +#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
9441 +#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
9442 +
9443 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9444 +#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
9445 +#else
9446 +#define PAX_USER_SHADOW_BASE (_AC(0,UL))
9447 +#endif
9448 +
9449 +#endif
9450 +
9451 #ifndef __ASSEMBLY__
9452
9453 extern int direct_gbpages;
9454 @@ -758,11 +846,23 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
9455 * dst and src can be on the same page, but the range must not overlap,
9456 * and must not cross a page boundary.
9457 */
9458 -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
9459 +static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
9460 {
9461 - memcpy(dst, src, count * sizeof(pgd_t));
9462 + pax_open_kernel();
9463 + while (count--)
9464 + *dst++ = *src++;
9465 + pax_close_kernel();
9466 }
9467
9468 +#ifdef CONFIG_PAX_PER_CPU_PGD
9469 +extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9470 +#endif
9471 +
9472 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9473 +extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9474 +#else
9475 +static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
9476 +#endif
9477
9478 #include <asm-generic/pgtable.h>
9479 #endif /* __ASSEMBLY__ */
9480 diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
9481 index 0c92113..34a77c6 100644
9482 --- a/arch/x86/include/asm/pgtable_32.h
9483 +++ b/arch/x86/include/asm/pgtable_32.h
9484 @@ -25,9 +25,6 @@
9485 struct mm_struct;
9486 struct vm_area_struct;
9487
9488 -extern pgd_t swapper_pg_dir[1024];
9489 -extern pgd_t initial_page_table[1024];
9490 -
9491 static inline void pgtable_cache_init(void) { }
9492 static inline void check_pgt_cache(void) { }
9493 void paging_init(void);
9494 @@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
9495 # include <asm/pgtable-2level.h>
9496 #endif
9497
9498 +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
9499 +extern pgd_t initial_page_table[PTRS_PER_PGD];
9500 +#ifdef CONFIG_X86_PAE
9501 +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
9502 +#endif
9503 +
9504 #if defined(CONFIG_HIGHPTE)
9505 #define pte_offset_map(dir, address) \
9506 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
9507 @@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
9508 /* Clear a kernel PTE and flush it from the TLB */
9509 #define kpte_clear_flush(ptep, vaddr) \
9510 do { \
9511 + pax_open_kernel(); \
9512 pte_clear(&init_mm, (vaddr), (ptep)); \
9513 + pax_close_kernel(); \
9514 __flush_tlb_one((vaddr)); \
9515 } while (0)
9516
9517 @@ -74,6 +79,9 @@ do { \
9518
9519 #endif /* !__ASSEMBLY__ */
9520
9521 +#define HAVE_ARCH_UNMAPPED_AREA
9522 +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
9523 +
9524 /*
9525 * kern_addr_valid() is (1) for FLATMEM and (0) for
9526 * SPARSEMEM and DISCONTIGMEM
9527 diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
9528 index ed5903b..c7fe163 100644
9529 --- a/arch/x86/include/asm/pgtable_32_types.h
9530 +++ b/arch/x86/include/asm/pgtable_32_types.h
9531 @@ -8,7 +8,7 @@
9532 */
9533 #ifdef CONFIG_X86_PAE
9534 # include <asm/pgtable-3level_types.h>
9535 -# define PMD_SIZE (1UL << PMD_SHIFT)
9536 +# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
9537 # define PMD_MASK (~(PMD_SIZE - 1))
9538 #else
9539 # include <asm/pgtable-2level_types.h>
9540 @@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
9541 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
9542 #endif
9543
9544 +#ifdef CONFIG_PAX_KERNEXEC
9545 +#ifndef __ASSEMBLY__
9546 +extern unsigned char MODULES_EXEC_VADDR[];
9547 +extern unsigned char MODULES_EXEC_END[];
9548 +#endif
9549 +#include <asm/boot.h>
9550 +#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
9551 +#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
9552 +#else
9553 +#define ktla_ktva(addr) (addr)
9554 +#define ktva_ktla(addr) (addr)
9555 +#endif
9556 +
9557 #define MODULES_VADDR VMALLOC_START
9558 #define MODULES_END VMALLOC_END
9559 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
9560 diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
9561 index 975f709..107976d 100644
9562 --- a/arch/x86/include/asm/pgtable_64.h
9563 +++ b/arch/x86/include/asm/pgtable_64.h
9564 @@ -16,10 +16,14 @@
9565
9566 extern pud_t level3_kernel_pgt[512];
9567 extern pud_t level3_ident_pgt[512];
9568 +extern pud_t level3_vmalloc_start_pgt[512];
9569 +extern pud_t level3_vmalloc_end_pgt[512];
9570 +extern pud_t level3_vmemmap_pgt[512];
9571 +extern pud_t level2_vmemmap_pgt[512];
9572 extern pmd_t level2_kernel_pgt[512];
9573 extern pmd_t level2_fixmap_pgt[512];
9574 -extern pmd_t level2_ident_pgt[512];
9575 -extern pgd_t init_level4_pgt[];
9576 +extern pmd_t level2_ident_pgt[512*2];
9577 +extern pgd_t init_level4_pgt[512];
9578
9579 #define swapper_pg_dir init_level4_pgt
9580
9581 @@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
9582
9583 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9584 {
9585 + pax_open_kernel();
9586 *pmdp = pmd;
9587 + pax_close_kernel();
9588 }
9589
9590 static inline void native_pmd_clear(pmd_t *pmd)
9591 @@ -107,6 +113,13 @@ static inline void native_pud_clear(pud_t *pud)
9592
9593 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
9594 {
9595 + pax_open_kernel();
9596 + *pgdp = pgd;
9597 + pax_close_kernel();
9598 +}
9599 +
9600 +static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
9601 +{
9602 *pgdp = pgd;
9603 }
9604
9605 diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
9606 index 766ea16..5b96cb3 100644
9607 --- a/arch/x86/include/asm/pgtable_64_types.h
9608 +++ b/arch/x86/include/asm/pgtable_64_types.h
9609 @@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
9610 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
9611 #define MODULES_END _AC(0xffffffffff000000, UL)
9612 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
9613 +#define MODULES_EXEC_VADDR MODULES_VADDR
9614 +#define MODULES_EXEC_END MODULES_END
9615 +
9616 +#define ktla_ktva(addr) (addr)
9617 +#define ktva_ktla(addr) (addr)
9618
9619 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
9620 diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
9621 index 013286a..8b42f4f 100644
9622 --- a/arch/x86/include/asm/pgtable_types.h
9623 +++ b/arch/x86/include/asm/pgtable_types.h
9624 @@ -16,13 +16,12 @@
9625 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
9626 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
9627 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
9628 -#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
9629 +#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
9630 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
9631 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
9632 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
9633 -#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
9634 -#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
9635 -#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
9636 +#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
9637 +#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
9638 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
9639
9640 /* If _PAGE_BIT_PRESENT is clear, we use these: */
9641 @@ -40,7 +39,6 @@
9642 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
9643 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
9644 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
9645 -#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
9646 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
9647 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
9648 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
9649 @@ -57,8 +55,10 @@
9650
9651 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
9652 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
9653 -#else
9654 +#elif defined(CONFIG_KMEMCHECK)
9655 #define _PAGE_NX (_AT(pteval_t, 0))
9656 +#else
9657 +#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
9658 #endif
9659
9660 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
9661 @@ -96,6 +96,9 @@
9662 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
9663 _PAGE_ACCESSED)
9664
9665 +#define PAGE_READONLY_NOEXEC PAGE_READONLY
9666 +#define PAGE_SHARED_NOEXEC PAGE_SHARED
9667 +
9668 #define __PAGE_KERNEL_EXEC \
9669 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
9670 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
9671 @@ -106,7 +109,7 @@
9672 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
9673 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
9674 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
9675 -#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
9676 +#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
9677 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
9678 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
9679 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
9680 @@ -168,8 +171,8 @@
9681 * bits are combined, this will alow user to access the high address mapped
9682 * VDSO in the presence of CONFIG_COMPAT_VDSO
9683 */
9684 -#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
9685 -#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
9686 +#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9687 +#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9688 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
9689 #endif
9690
9691 @@ -207,7 +210,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
9692 {
9693 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
9694 }
9695 +#endif
9696
9697 +#if PAGETABLE_LEVELS == 3
9698 +#include <asm-generic/pgtable-nopud.h>
9699 +#endif
9700 +
9701 +#if PAGETABLE_LEVELS == 2
9702 +#include <asm-generic/pgtable-nopmd.h>
9703 +#endif
9704 +
9705 +#ifndef __ASSEMBLY__
9706 #if PAGETABLE_LEVELS > 3
9707 typedef struct { pudval_t pud; } pud_t;
9708
9709 @@ -221,8 +234,6 @@ static inline pudval_t native_pud_val(pud_t pud)
9710 return pud.pud;
9711 }
9712 #else
9713 -#include <asm-generic/pgtable-nopud.h>
9714 -
9715 static inline pudval_t native_pud_val(pud_t pud)
9716 {
9717 return native_pgd_val(pud.pgd);
9718 @@ -242,8 +253,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
9719 return pmd.pmd;
9720 }
9721 #else
9722 -#include <asm-generic/pgtable-nopmd.h>
9723 -
9724 static inline pmdval_t native_pmd_val(pmd_t pmd)
9725 {
9726 return native_pgd_val(pmd.pud.pgd);
9727 @@ -283,7 +292,6 @@ typedef struct page *pgtable_t;
9728
9729 extern pteval_t __supported_pte_mask;
9730 extern void set_nx(void);
9731 -extern int nx_enabled;
9732
9733 #define pgprot_writecombine pgprot_writecombine
9734 extern pgprot_t pgprot_writecombine(pgprot_t prot);
9735 diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
9736 index b650435..eefa566 100644
9737 --- a/arch/x86/include/asm/processor.h
9738 +++ b/arch/x86/include/asm/processor.h
9739 @@ -268,7 +268,7 @@ struct tss_struct {
9740
9741 } ____cacheline_aligned;
9742
9743 -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
9744 +extern struct tss_struct init_tss[NR_CPUS];
9745
9746 /*
9747 * Save the original ist values for checking stack pointers during debugging
9748 @@ -860,11 +860,18 @@ static inline void spin_lock_prefetch(const void *x)
9749 */
9750 #define TASK_SIZE PAGE_OFFSET
9751 #define TASK_SIZE_MAX TASK_SIZE
9752 +
9753 +#ifdef CONFIG_PAX_SEGMEXEC
9754 +#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
9755 +#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
9756 +#else
9757 #define STACK_TOP TASK_SIZE
9758 -#define STACK_TOP_MAX STACK_TOP
9759 +#endif
9760 +
9761 +#define STACK_TOP_MAX TASK_SIZE
9762
9763 #define INIT_THREAD { \
9764 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
9765 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
9766 .vm86_info = NULL, \
9767 .sysenter_cs = __KERNEL_CS, \
9768 .io_bitmap_ptr = NULL, \
9769 @@ -878,7 +885,7 @@ static inline void spin_lock_prefetch(const void *x)
9770 */
9771 #define INIT_TSS { \
9772 .x86_tss = { \
9773 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
9774 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
9775 .ss0 = __KERNEL_DS, \
9776 .ss1 = __KERNEL_CS, \
9777 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
9778 @@ -889,11 +896,7 @@ static inline void spin_lock_prefetch(const void *x)
9779 extern unsigned long thread_saved_pc(struct task_struct *tsk);
9780
9781 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
9782 -#define KSTK_TOP(info) \
9783 -({ \
9784 - unsigned long *__ptr = (unsigned long *)(info); \
9785 - (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
9786 -})
9787 +#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
9788
9789 /*
9790 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
9791 @@ -908,7 +911,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
9792 #define task_pt_regs(task) \
9793 ({ \
9794 struct pt_regs *__regs__; \
9795 - __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
9796 + __regs__ = (struct pt_regs *)((task)->thread.sp0); \
9797 __regs__ - 1; \
9798 })
9799
9800 @@ -918,13 +921,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
9801 /*
9802 * User space process size. 47bits minus one guard page.
9803 */
9804 -#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
9805 +#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
9806
9807 /* This decides where the kernel will search for a free chunk of vm
9808 * space during mmap's.
9809 */
9810 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
9811 - 0xc0000000 : 0xFFFFe000)
9812 + 0xc0000000 : 0xFFFFf000)
9813
9814 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
9815 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
9816 @@ -935,11 +938,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
9817 #define STACK_TOP_MAX TASK_SIZE_MAX
9818
9819 #define INIT_THREAD { \
9820 - .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
9821 + .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
9822 }
9823
9824 #define INIT_TSS { \
9825 - .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
9826 + .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
9827 }
9828
9829 /*
9830 @@ -961,6 +964,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
9831 */
9832 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
9833
9834 +#ifdef CONFIG_PAX_SEGMEXEC
9835 +#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
9836 +#endif
9837 +
9838 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
9839
9840 /* Get/set a process' ability to use the timestamp counter instruction */
9841 diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
9842 index 3566454..4bdfb8c 100644
9843 --- a/arch/x86/include/asm/ptrace.h
9844 +++ b/arch/x86/include/asm/ptrace.h
9845 @@ -156,28 +156,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
9846 }
9847
9848 /*
9849 - * user_mode_vm(regs) determines whether a register set came from user mode.
9850 + * user_mode(regs) determines whether a register set came from user mode.
9851 * This is true if V8086 mode was enabled OR if the register set was from
9852 * protected mode with RPL-3 CS value. This tricky test checks that with
9853 * one comparison. Many places in the kernel can bypass this full check
9854 - * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
9855 + * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
9856 + * be used.
9857 */
9858 -static inline int user_mode(struct pt_regs *regs)
9859 +static inline int user_mode_novm(struct pt_regs *regs)
9860 {
9861 #ifdef CONFIG_X86_32
9862 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
9863 #else
9864 - return !!(regs->cs & 3);
9865 + return !!(regs->cs & SEGMENT_RPL_MASK);
9866 #endif
9867 }
9868
9869 -static inline int user_mode_vm(struct pt_regs *regs)
9870 +static inline int user_mode(struct pt_regs *regs)
9871 {
9872 #ifdef CONFIG_X86_32
9873 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
9874 USER_RPL;
9875 #else
9876 - return user_mode(regs);
9877 + return user_mode_novm(regs);
9878 #endif
9879 }
9880
9881 @@ -193,15 +194,16 @@ static inline int v8086_mode(struct pt_regs *regs)
9882 #ifdef CONFIG_X86_64
9883 static inline bool user_64bit_mode(struct pt_regs *regs)
9884 {
9885 + unsigned long cs = regs->cs & 0xffff;
9886 #ifndef CONFIG_PARAVIRT
9887 /*
9888 * On non-paravirt systems, this is the only long mode CPL 3
9889 * selector. We do not allow long mode selectors in the LDT.
9890 */
9891 - return regs->cs == __USER_CS;
9892 + return cs == __USER_CS;
9893 #else
9894 /* Headers are too twisted for this to go in paravirt.h. */
9895 - return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
9896 + return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
9897 #endif
9898 }
9899 #endif
9900 diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
9901 index 92f29706..a79cbbb 100644
9902 --- a/arch/x86/include/asm/reboot.h
9903 +++ b/arch/x86/include/asm/reboot.h
9904 @@ -6,19 +6,19 @@
9905 struct pt_regs;
9906
9907 struct machine_ops {
9908 - void (*restart)(char *cmd);
9909 - void (*halt)(void);
9910 - void (*power_off)(void);
9911 + void (* __noreturn restart)(char *cmd);
9912 + void (* __noreturn halt)(void);
9913 + void (* __noreturn power_off)(void);
9914 void (*shutdown)(void);
9915 void (*crash_shutdown)(struct pt_regs *);
9916 - void (*emergency_restart)(void);
9917 -};
9918 + void (* __noreturn emergency_restart)(void);
9919 +} __no_const;
9920
9921 extern struct machine_ops machine_ops;
9922
9923 void native_machine_crash_shutdown(struct pt_regs *regs);
9924 void native_machine_shutdown(void);
9925 -void machine_real_restart(unsigned int type);
9926 +void machine_real_restart(unsigned int type) __noreturn;
9927 /* These must match dispatch_table in reboot_32.S */
9928 #define MRR_BIOS 0
9929 #define MRR_APM 1
9930 diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
9931 index 2dbe4a7..ce1db00 100644
9932 --- a/arch/x86/include/asm/rwsem.h
9933 +++ b/arch/x86/include/asm/rwsem.h
9934 @@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
9935 {
9936 asm volatile("# beginning down_read\n\t"
9937 LOCK_PREFIX _ASM_INC "(%1)\n\t"
9938 +
9939 +#ifdef CONFIG_PAX_REFCOUNT
9940 + "jno 0f\n"
9941 + LOCK_PREFIX _ASM_DEC "(%1)\n"
9942 + "int $4\n0:\n"
9943 + _ASM_EXTABLE(0b, 0b)
9944 +#endif
9945 +
9946 /* adds 0x00000001 */
9947 " jns 1f\n"
9948 " call call_rwsem_down_read_failed\n"
9949 @@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
9950 "1:\n\t"
9951 " mov %1,%2\n\t"
9952 " add %3,%2\n\t"
9953 +
9954 +#ifdef CONFIG_PAX_REFCOUNT
9955 + "jno 0f\n"
9956 + "sub %3,%2\n"
9957 + "int $4\n0:\n"
9958 + _ASM_EXTABLE(0b, 0b)
9959 +#endif
9960 +
9961 " jle 2f\n\t"
9962 LOCK_PREFIX " cmpxchg %2,%0\n\t"
9963 " jnz 1b\n\t"
9964 @@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
9965 long tmp;
9966 asm volatile("# beginning down_write\n\t"
9967 LOCK_PREFIX " xadd %1,(%2)\n\t"
9968 +
9969 +#ifdef CONFIG_PAX_REFCOUNT
9970 + "jno 0f\n"
9971 + "mov %1,(%2)\n"
9972 + "int $4\n0:\n"
9973 + _ASM_EXTABLE(0b, 0b)
9974 +#endif
9975 +
9976 /* adds 0xffff0001, returns the old value */
9977 " test %1,%1\n\t"
9978 /* was the count 0 before? */
9979 @@ -141,6 +165,14 @@ static inline void __up_read(struct rw_semaphore *sem)
9980 long tmp;
9981 asm volatile("# beginning __up_read\n\t"
9982 LOCK_PREFIX " xadd %1,(%2)\n\t"
9983 +
9984 +#ifdef CONFIG_PAX_REFCOUNT
9985 + "jno 0f\n"
9986 + "mov %1,(%2)\n"
9987 + "int $4\n0:\n"
9988 + _ASM_EXTABLE(0b, 0b)
9989 +#endif
9990 +
9991 /* subtracts 1, returns the old value */
9992 " jns 1f\n\t"
9993 " call call_rwsem_wake\n" /* expects old value in %edx */
9994 @@ -159,6 +191,14 @@ static inline void __up_write(struct rw_semaphore *sem)
9995 long tmp;
9996 asm volatile("# beginning __up_write\n\t"
9997 LOCK_PREFIX " xadd %1,(%2)\n\t"
9998 +
9999 +#ifdef CONFIG_PAX_REFCOUNT
10000 + "jno 0f\n"
10001 + "mov %1,(%2)\n"
10002 + "int $4\n0:\n"
10003 + _ASM_EXTABLE(0b, 0b)
10004 +#endif
10005 +
10006 /* subtracts 0xffff0001, returns the old value */
10007 " jns 1f\n\t"
10008 " call call_rwsem_wake\n" /* expects old value in %edx */
10009 @@ -176,6 +216,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
10010 {
10011 asm volatile("# beginning __downgrade_write\n\t"
10012 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
10013 +
10014 +#ifdef CONFIG_PAX_REFCOUNT
10015 + "jno 0f\n"
10016 + LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
10017 + "int $4\n0:\n"
10018 + _ASM_EXTABLE(0b, 0b)
10019 +#endif
10020 +
10021 /*
10022 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
10023 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
10024 @@ -194,7 +242,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
10025 */
10026 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
10027 {
10028 - asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
10029 + asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
10030 +
10031 +#ifdef CONFIG_PAX_REFCOUNT
10032 + "jno 0f\n"
10033 + LOCK_PREFIX _ASM_SUB "%1,%0\n"
10034 + "int $4\n0:\n"
10035 + _ASM_EXTABLE(0b, 0b)
10036 +#endif
10037 +
10038 : "+m" (sem->count)
10039 : "er" (delta));
10040 }
10041 @@ -204,7 +260,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
10042 */
10043 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
10044 {
10045 - return delta + xadd(&sem->count, delta);
10046 + return delta + xadd_check_overflow(&sem->count, delta);
10047 }
10048
10049 #endif /* __KERNEL__ */
10050 diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
10051 index 5e64171..f58957e 100644
10052 --- a/arch/x86/include/asm/segment.h
10053 +++ b/arch/x86/include/asm/segment.h
10054 @@ -64,10 +64,15 @@
10055 * 26 - ESPFIX small SS
10056 * 27 - per-cpu [ offset to per-cpu data area ]
10057 * 28 - stack_canary-20 [ for stack protector ]
10058 - * 29 - unused
10059 - * 30 - unused
10060 + * 29 - PCI BIOS CS
10061 + * 30 - PCI BIOS DS
10062 * 31 - TSS for double fault handler
10063 */
10064 +#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
10065 +#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
10066 +#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
10067 +#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
10068 +
10069 #define GDT_ENTRY_TLS_MIN 6
10070 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
10071
10072 @@ -79,6 +84,8 @@
10073
10074 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
10075
10076 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
10077 +
10078 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
10079
10080 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
10081 @@ -104,6 +111,12 @@
10082 #define __KERNEL_STACK_CANARY 0
10083 #endif
10084
10085 +#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
10086 +#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
10087 +
10088 +#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
10089 +#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
10090 +
10091 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
10092
10093 /*
10094 @@ -141,7 +154,7 @@
10095 */
10096
10097 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
10098 -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
10099 +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
10100
10101
10102 #else
10103 @@ -165,6 +178,8 @@
10104 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
10105 #define __USER32_DS __USER_DS
10106
10107 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
10108 +
10109 #define GDT_ENTRY_TSS 8 /* needs two entries */
10110 #define GDT_ENTRY_LDT 10 /* needs two entries */
10111 #define GDT_ENTRY_TLS_MIN 12
10112 @@ -185,6 +200,7 @@
10113 #endif
10114
10115 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
10116 +#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
10117 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
10118 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
10119 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
10120 diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
10121 index 73b11bc..d4a3b63 100644
10122 --- a/arch/x86/include/asm/smp.h
10123 +++ b/arch/x86/include/asm/smp.h
10124 @@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
10125 /* cpus sharing the last level cache: */
10126 DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
10127 DECLARE_PER_CPU(u16, cpu_llc_id);
10128 -DECLARE_PER_CPU(int, cpu_number);
10129 +DECLARE_PER_CPU(unsigned int, cpu_number);
10130
10131 static inline struct cpumask *cpu_sibling_mask(int cpu)
10132 {
10133 @@ -77,7 +77,7 @@ struct smp_ops {
10134
10135 void (*send_call_func_ipi)(const struct cpumask *mask);
10136 void (*send_call_func_single_ipi)(int cpu);
10137 -};
10138 +} __no_const;
10139
10140 /* Globals due to paravirt */
10141 extern void set_cpu_sibling_map(int cpu);
10142 @@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitdata;
10143 extern int safe_smp_processor_id(void);
10144
10145 #elif defined(CONFIG_X86_64_SMP)
10146 -#define raw_smp_processor_id() (percpu_read(cpu_number))
10147 -
10148 -#define stack_smp_processor_id() \
10149 -({ \
10150 - struct thread_info *ti; \
10151 - __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
10152 - ti->cpu; \
10153 -})
10154 +#define raw_smp_processor_id() (percpu_read(cpu_number))
10155 +#define stack_smp_processor_id() raw_smp_processor_id()
10156 #define safe_smp_processor_id() smp_processor_id()
10157
10158 #endif
10159 diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
10160 index 972c260..43ab1fd 100644
10161 --- a/arch/x86/include/asm/spinlock.h
10162 +++ b/arch/x86/include/asm/spinlock.h
10163 @@ -188,6 +188,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
10164 static inline void arch_read_lock(arch_rwlock_t *rw)
10165 {
10166 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
10167 +
10168 +#ifdef CONFIG_PAX_REFCOUNT
10169 + "jno 0f\n"
10170 + LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
10171 + "int $4\n0:\n"
10172 + _ASM_EXTABLE(0b, 0b)
10173 +#endif
10174 +
10175 "jns 1f\n"
10176 "call __read_lock_failed\n\t"
10177 "1:\n"
10178 @@ -197,6 +205,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
10179 static inline void arch_write_lock(arch_rwlock_t *rw)
10180 {
10181 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
10182 +
10183 +#ifdef CONFIG_PAX_REFCOUNT
10184 + "jno 0f\n"
10185 + LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
10186 + "int $4\n0:\n"
10187 + _ASM_EXTABLE(0b, 0b)
10188 +#endif
10189 +
10190 "jz 1f\n"
10191 "call __write_lock_failed\n\t"
10192 "1:\n"
10193 @@ -226,13 +242,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
10194
10195 static inline void arch_read_unlock(arch_rwlock_t *rw)
10196 {
10197 - asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
10198 + asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
10199 +
10200 +#ifdef CONFIG_PAX_REFCOUNT
10201 + "jno 0f\n"
10202 + LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
10203 + "int $4\n0:\n"
10204 + _ASM_EXTABLE(0b, 0b)
10205 +#endif
10206 +
10207 :"+m" (rw->lock) : : "memory");
10208 }
10209
10210 static inline void arch_write_unlock(arch_rwlock_t *rw)
10211 {
10212 - asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
10213 + asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
10214 +
10215 +#ifdef CONFIG_PAX_REFCOUNT
10216 + "jno 0f\n"
10217 + LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
10218 + "int $4\n0:\n"
10219 + _ASM_EXTABLE(0b, 0b)
10220 +#endif
10221 +
10222 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
10223 }
10224
10225 diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
10226 index 1575177..cb23f52 100644
10227 --- a/arch/x86/include/asm/stackprotector.h
10228 +++ b/arch/x86/include/asm/stackprotector.h
10229 @@ -48,7 +48,7 @@
10230 * head_32 for boot CPU and setup_per_cpu_areas() for others.
10231 */
10232 #define GDT_STACK_CANARY_INIT \
10233 - [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
10234 + [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
10235
10236 /*
10237 * Initialize the stackprotector canary value.
10238 @@ -113,7 +113,7 @@ static inline void setup_stack_canary_segment(int cpu)
10239
10240 static inline void load_stack_canary_segment(void)
10241 {
10242 -#ifdef CONFIG_X86_32
10243 +#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
10244 asm volatile ("mov %0, %%gs" : : "r" (0));
10245 #endif
10246 }
10247 diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
10248 index 70bbe39..4ae2bd4 100644
10249 --- a/arch/x86/include/asm/stacktrace.h
10250 +++ b/arch/x86/include/asm/stacktrace.h
10251 @@ -11,28 +11,20 @@
10252
10253 extern int kstack_depth_to_print;
10254
10255 -struct thread_info;
10256 +struct task_struct;
10257 struct stacktrace_ops;
10258
10259 -typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
10260 - unsigned long *stack,
10261 - unsigned long bp,
10262 - const struct stacktrace_ops *ops,
10263 - void *data,
10264 - unsigned long *end,
10265 - int *graph);
10266 +typedef unsigned long walk_stack_t(struct task_struct *task,
10267 + void *stack_start,
10268 + unsigned long *stack,
10269 + unsigned long bp,
10270 + const struct stacktrace_ops *ops,
10271 + void *data,
10272 + unsigned long *end,
10273 + int *graph);
10274
10275 -extern unsigned long
10276 -print_context_stack(struct thread_info *tinfo,
10277 - unsigned long *stack, unsigned long bp,
10278 - const struct stacktrace_ops *ops, void *data,
10279 - unsigned long *end, int *graph);
10280 -
10281 -extern unsigned long
10282 -print_context_stack_bp(struct thread_info *tinfo,
10283 - unsigned long *stack, unsigned long bp,
10284 - const struct stacktrace_ops *ops, void *data,
10285 - unsigned long *end, int *graph);
10286 +extern walk_stack_t print_context_stack;
10287 +extern walk_stack_t print_context_stack_bp;
10288
10289 /* Generic stack tracer with callbacks */
10290
10291 @@ -40,7 +32,7 @@ struct stacktrace_ops {
10292 void (*address)(void *data, unsigned long address, int reliable);
10293 /* On negative return stop dumping */
10294 int (*stack)(void *data, char *name);
10295 - walk_stack_t walk_stack;
10296 + walk_stack_t *walk_stack;
10297 };
10298
10299 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
10300 diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
10301 index cb23852..2dde194 100644
10302 --- a/arch/x86/include/asm/sys_ia32.h
10303 +++ b/arch/x86/include/asm/sys_ia32.h
10304 @@ -40,7 +40,7 @@ asmlinkage long sys32_rt_sigprocmask(int, compat_sigset_t __user *,
10305 compat_sigset_t __user *, unsigned int);
10306 asmlinkage long sys32_alarm(unsigned int);
10307
10308 -asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int);
10309 +asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int);
10310 asmlinkage long sys32_sysfs(int, u32, u32);
10311
10312 asmlinkage long sys32_sched_rr_get_interval(compat_pid_t,
10313 diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
10314 index 2d2f01c..f985723 100644
10315 --- a/arch/x86/include/asm/system.h
10316 +++ b/arch/x86/include/asm/system.h
10317 @@ -129,7 +129,7 @@ do { \
10318 "call __switch_to\n\t" \
10319 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
10320 __switch_canary \
10321 - "movq %P[thread_info](%%rsi),%%r8\n\t" \
10322 + "movq "__percpu_arg([thread_info])",%%r8\n\t" \
10323 "movq %%rax,%%rdi\n\t" \
10324 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
10325 "jnz ret_from_fork\n\t" \
10326 @@ -140,7 +140,7 @@ do { \
10327 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
10328 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
10329 [_tif_fork] "i" (_TIF_FORK), \
10330 - [thread_info] "i" (offsetof(struct task_struct, stack)), \
10331 + [thread_info] "m" (current_tinfo), \
10332 [current_task] "m" (current_task) \
10333 __switch_canary_iparam \
10334 : "memory", "cc" __EXTRA_CLOBBER)
10335 @@ -200,7 +200,7 @@ static inline unsigned long get_limit(unsigned long segment)
10336 {
10337 unsigned long __limit;
10338 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
10339 - return __limit + 1;
10340 + return __limit;
10341 }
10342
10343 static inline void native_clts(void)
10344 @@ -397,13 +397,13 @@ void enable_hlt(void);
10345
10346 void cpu_idle_wait(void);
10347
10348 -extern unsigned long arch_align_stack(unsigned long sp);
10349 +#define arch_align_stack(x) ((x) & ~0xfUL)
10350 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
10351
10352 void default_idle(void);
10353 bool set_pm_idle_to_default(void);
10354
10355 -void stop_this_cpu(void *dummy);
10356 +void stop_this_cpu(void *dummy) __noreturn;
10357
10358 /*
10359 * Force strict CPU ordering.
10360 diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
10361 index a1fe5c1..ee326d8 100644
10362 --- a/arch/x86/include/asm/thread_info.h
10363 +++ b/arch/x86/include/asm/thread_info.h
10364 @@ -10,6 +10,7 @@
10365 #include <linux/compiler.h>
10366 #include <asm/page.h>
10367 #include <asm/types.h>
10368 +#include <asm/percpu.h>
10369
10370 /*
10371 * low level task data that entry.S needs immediate access to
10372 @@ -24,7 +25,6 @@ struct exec_domain;
10373 #include <linux/atomic.h>
10374
10375 struct thread_info {
10376 - struct task_struct *task; /* main task structure */
10377 struct exec_domain *exec_domain; /* execution domain */
10378 __u32 flags; /* low level flags */
10379 __u32 status; /* thread synchronous flags */
10380 @@ -34,18 +34,12 @@ struct thread_info {
10381 mm_segment_t addr_limit;
10382 struct restart_block restart_block;
10383 void __user *sysenter_return;
10384 -#ifdef CONFIG_X86_32
10385 - unsigned long previous_esp; /* ESP of the previous stack in
10386 - case of nested (IRQ) stacks
10387 - */
10388 - __u8 supervisor_stack[0];
10389 -#endif
10390 + unsigned long lowest_stack;
10391 int uaccess_err;
10392 };
10393
10394 -#define INIT_THREAD_INFO(tsk) \
10395 +#define INIT_THREAD_INFO \
10396 { \
10397 - .task = &tsk, \
10398 .exec_domain = &default_exec_domain, \
10399 .flags = 0, \
10400 .cpu = 0, \
10401 @@ -56,7 +50,7 @@ struct thread_info {
10402 }, \
10403 }
10404
10405 -#define init_thread_info (init_thread_union.thread_info)
10406 +#define init_thread_info (init_thread_union.stack)
10407 #define init_stack (init_thread_union.stack)
10408
10409 #else /* !__ASSEMBLY__ */
10410 @@ -170,45 +164,40 @@ struct thread_info {
10411 ret; \
10412 })
10413
10414 -#ifdef CONFIG_X86_32
10415 -
10416 -#define STACK_WARN (THREAD_SIZE/8)
10417 -/*
10418 - * macros/functions for gaining access to the thread information structure
10419 - *
10420 - * preempt_count needs to be 1 initially, until the scheduler is functional.
10421 - */
10422 -#ifndef __ASSEMBLY__
10423 -
10424 -
10425 -/* how to get the current stack pointer from C */
10426 -register unsigned long current_stack_pointer asm("esp") __used;
10427 -
10428 -/* how to get the thread information struct from C */
10429 -static inline struct thread_info *current_thread_info(void)
10430 -{
10431 - return (struct thread_info *)
10432 - (current_stack_pointer & ~(THREAD_SIZE - 1));
10433 -}
10434 -
10435 -#else /* !__ASSEMBLY__ */
10436 -
10437 +#ifdef __ASSEMBLY__
10438 /* how to get the thread information struct from ASM */
10439 #define GET_THREAD_INFO(reg) \
10440 - movl $-THREAD_SIZE, reg; \
10441 - andl %esp, reg
10442 + mov PER_CPU_VAR(current_tinfo), reg
10443
10444 /* use this one if reg already contains %esp */
10445 -#define GET_THREAD_INFO_WITH_ESP(reg) \
10446 - andl $-THREAD_SIZE, reg
10447 +#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
10448 +#else
10449 +/* how to get the thread information struct from C */
10450 +DECLARE_PER_CPU(struct thread_info *, current_tinfo);
10451 +
10452 +static __always_inline struct thread_info *current_thread_info(void)
10453 +{
10454 + return percpu_read_stable(current_tinfo);
10455 +}
10456 +#endif
10457 +
10458 +#ifdef CONFIG_X86_32
10459 +
10460 +#define STACK_WARN (THREAD_SIZE/8)
10461 +/*
10462 + * macros/functions for gaining access to the thread information structure
10463 + *
10464 + * preempt_count needs to be 1 initially, until the scheduler is functional.
10465 + */
10466 +#ifndef __ASSEMBLY__
10467 +
10468 +/* how to get the current stack pointer from C */
10469 +register unsigned long current_stack_pointer asm("esp") __used;
10470
10471 #endif
10472
10473 #else /* X86_32 */
10474
10475 -#include <asm/percpu.h>
10476 -#define KERNEL_STACK_OFFSET (5*8)
10477 -
10478 /*
10479 * macros/functions for gaining access to the thread information structure
10480 * preempt_count needs to be 1 initially, until the scheduler is functional.
10481 @@ -216,21 +205,8 @@ static inline struct thread_info *current_thread_info(void)
10482 #ifndef __ASSEMBLY__
10483 DECLARE_PER_CPU(unsigned long, kernel_stack);
10484
10485 -static inline struct thread_info *current_thread_info(void)
10486 -{
10487 - struct thread_info *ti;
10488 - ti = (void *)(percpu_read_stable(kernel_stack) +
10489 - KERNEL_STACK_OFFSET - THREAD_SIZE);
10490 - return ti;
10491 -}
10492 -
10493 -#else /* !__ASSEMBLY__ */
10494 -
10495 -/* how to get the thread information struct from ASM */
10496 -#define GET_THREAD_INFO(reg) \
10497 - movq PER_CPU_VAR(kernel_stack),reg ; \
10498 - subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
10499 -
10500 +/* how to get the current stack pointer from C */
10501 +register unsigned long current_stack_pointer asm("rsp") __used;
10502 #endif
10503
10504 #endif /* !X86_32 */
10505 @@ -266,5 +242,16 @@ extern void arch_task_cache_init(void);
10506 extern void free_thread_info(struct thread_info *ti);
10507 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
10508 #define arch_task_cache_init arch_task_cache_init
10509 +
10510 +#define __HAVE_THREAD_FUNCTIONS
10511 +#define task_thread_info(task) (&(task)->tinfo)
10512 +#define task_stack_page(task) ((task)->stack)
10513 +#define setup_thread_stack(p, org) do {} while (0)
10514 +#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
10515 +
10516 +#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
10517 +extern struct task_struct *alloc_task_struct_node(int node);
10518 +extern void free_task_struct(struct task_struct *);
10519 +
10520 #endif
10521 #endif /* _ASM_X86_THREAD_INFO_H */
10522 diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
10523 index 36361bf..324f262 100644
10524 --- a/arch/x86/include/asm/uaccess.h
10525 +++ b/arch/x86/include/asm/uaccess.h
10526 @@ -7,12 +7,15 @@
10527 #include <linux/compiler.h>
10528 #include <linux/thread_info.h>
10529 #include <linux/string.h>
10530 +#include <linux/sched.h>
10531 #include <asm/asm.h>
10532 #include <asm/page.h>
10533
10534 #define VERIFY_READ 0
10535 #define VERIFY_WRITE 1
10536
10537 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
10538 +
10539 /*
10540 * The fs value determines whether argument validity checking should be
10541 * performed or not. If get_fs() == USER_DS, checking is performed, with
10542 @@ -28,7 +31,12 @@
10543
10544 #define get_ds() (KERNEL_DS)
10545 #define get_fs() (current_thread_info()->addr_limit)
10546 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
10547 +void __set_fs(mm_segment_t x);
10548 +void set_fs(mm_segment_t x);
10549 +#else
10550 #define set_fs(x) (current_thread_info()->addr_limit = (x))
10551 +#endif
10552
10553 #define segment_eq(a, b) ((a).seg == (b).seg)
10554
10555 @@ -76,7 +84,33 @@
10556 * checks that the pointer is in the user space range - after calling
10557 * this function, memory access functions may still return -EFAULT.
10558 */
10559 -#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
10560 +#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
10561 +#define access_ok(type, addr, size) \
10562 +({ \
10563 + long __size = size; \
10564 + unsigned long __addr = (unsigned long)addr; \
10565 + unsigned long __addr_ao = __addr & PAGE_MASK; \
10566 + unsigned long __end_ao = __addr + __size - 1; \
10567 + bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
10568 + if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
10569 + while(__addr_ao <= __end_ao) { \
10570 + char __c_ao; \
10571 + __addr_ao += PAGE_SIZE; \
10572 + if (__size > PAGE_SIZE) \
10573 + cond_resched(); \
10574 + if (__get_user(__c_ao, (char __user *)__addr)) \
10575 + break; \
10576 + if (type != VERIFY_WRITE) { \
10577 + __addr = __addr_ao; \
10578 + continue; \
10579 + } \
10580 + if (__put_user(__c_ao, (char __user *)__addr)) \
10581 + break; \
10582 + __addr = __addr_ao; \
10583 + } \
10584 + } \
10585 + __ret_ao; \
10586 +})
10587
10588 /*
10589 * The exception table consists of pairs of addresses: the first is the
10590 @@ -182,12 +216,20 @@ extern int __get_user_bad(void);
10591 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
10592 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
10593
10594 -
10595 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
10596 +#define __copyuser_seg "gs;"
10597 +#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
10598 +#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
10599 +#else
10600 +#define __copyuser_seg
10601 +#define __COPYUSER_SET_ES
10602 +#define __COPYUSER_RESTORE_ES
10603 +#endif
10604
10605 #ifdef CONFIG_X86_32
10606 #define __put_user_asm_u64(x, addr, err, errret) \
10607 - asm volatile("1: movl %%eax,0(%2)\n" \
10608 - "2: movl %%edx,4(%2)\n" \
10609 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
10610 + "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
10611 "3:\n" \
10612 ".section .fixup,\"ax\"\n" \
10613 "4: movl %3,%0\n" \
10614 @@ -199,8 +241,8 @@ extern int __get_user_bad(void);
10615 : "A" (x), "r" (addr), "i" (errret), "0" (err))
10616
10617 #define __put_user_asm_ex_u64(x, addr) \
10618 - asm volatile("1: movl %%eax,0(%1)\n" \
10619 - "2: movl %%edx,4(%1)\n" \
10620 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
10621 + "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
10622 "3:\n" \
10623 _ASM_EXTABLE(1b, 2b - 1b) \
10624 _ASM_EXTABLE(2b, 3b - 2b) \
10625 @@ -252,7 +294,7 @@ extern void __put_user_8(void);
10626 __typeof__(*(ptr)) __pu_val; \
10627 __chk_user_ptr(ptr); \
10628 might_fault(); \
10629 - __pu_val = x; \
10630 + __pu_val = (x); \
10631 switch (sizeof(*(ptr))) { \
10632 case 1: \
10633 __put_user_x(1, __pu_val, ptr, __ret_pu); \
10634 @@ -373,7 +415,7 @@ do { \
10635 } while (0)
10636
10637 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
10638 - asm volatile("1: mov"itype" %2,%"rtype"1\n" \
10639 + asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
10640 "2:\n" \
10641 ".section .fixup,\"ax\"\n" \
10642 "3: mov %3,%0\n" \
10643 @@ -381,7 +423,7 @@ do { \
10644 " jmp 2b\n" \
10645 ".previous\n" \
10646 _ASM_EXTABLE(1b, 3b) \
10647 - : "=r" (err), ltype(x) \
10648 + : "=r" (err), ltype (x) \
10649 : "m" (__m(addr)), "i" (errret), "0" (err))
10650
10651 #define __get_user_size_ex(x, ptr, size) \
10652 @@ -406,7 +448,7 @@ do { \
10653 } while (0)
10654
10655 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
10656 - asm volatile("1: mov"itype" %1,%"rtype"0\n" \
10657 + asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
10658 "2:\n" \
10659 _ASM_EXTABLE(1b, 2b - 1b) \
10660 : ltype(x) : "m" (__m(addr)))
10661 @@ -423,13 +465,24 @@ do { \
10662 int __gu_err; \
10663 unsigned long __gu_val; \
10664 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
10665 - (x) = (__force __typeof__(*(ptr)))__gu_val; \
10666 + (x) = (__typeof__(*(ptr)))__gu_val; \
10667 __gu_err; \
10668 })
10669
10670 /* FIXME: this hack is definitely wrong -AK */
10671 struct __large_struct { unsigned long buf[100]; };
10672 -#define __m(x) (*(struct __large_struct __user *)(x))
10673 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10674 +#define ____m(x) \
10675 +({ \
10676 + unsigned long ____x = (unsigned long)(x); \
10677 + if (____x < PAX_USER_SHADOW_BASE) \
10678 + ____x += PAX_USER_SHADOW_BASE; \
10679 + (void __user *)____x; \
10680 +})
10681 +#else
10682 +#define ____m(x) (x)
10683 +#endif
10684 +#define __m(x) (*(struct __large_struct __user *)____m(x))
10685
10686 /*
10687 * Tell gcc we read from memory instead of writing: this is because
10688 @@ -437,7 +490,7 @@ struct __large_struct { unsigned long buf[100]; };
10689 * aliasing issues.
10690 */
10691 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
10692 - asm volatile("1: mov"itype" %"rtype"1,%2\n" \
10693 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
10694 "2:\n" \
10695 ".section .fixup,\"ax\"\n" \
10696 "3: mov %3,%0\n" \
10697 @@ -445,10 +498,10 @@ struct __large_struct { unsigned long buf[100]; };
10698 ".previous\n" \
10699 _ASM_EXTABLE(1b, 3b) \
10700 : "=r"(err) \
10701 - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
10702 + : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
10703
10704 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
10705 - asm volatile("1: mov"itype" %"rtype"0,%1\n" \
10706 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
10707 "2:\n" \
10708 _ASM_EXTABLE(1b, 2b - 1b) \
10709 : : ltype(x), "m" (__m(addr)))
10710 @@ -487,8 +540,12 @@ struct __large_struct { unsigned long buf[100]; };
10711 * On error, the variable @x is set to zero.
10712 */
10713
10714 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10715 +#define __get_user(x, ptr) get_user((x), (ptr))
10716 +#else
10717 #define __get_user(x, ptr) \
10718 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
10719 +#endif
10720
10721 /**
10722 * __put_user: - Write a simple value into user space, with less checking.
10723 @@ -510,8 +567,12 @@ struct __large_struct { unsigned long buf[100]; };
10724 * Returns zero on success, or -EFAULT on error.
10725 */
10726
10727 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10728 +#define __put_user(x, ptr) put_user((x), (ptr))
10729 +#else
10730 #define __put_user(x, ptr) \
10731 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
10732 +#endif
10733
10734 #define __get_user_unaligned __get_user
10735 #define __put_user_unaligned __put_user
10736 @@ -529,7 +590,7 @@ struct __large_struct { unsigned long buf[100]; };
10737 #define get_user_ex(x, ptr) do { \
10738 unsigned long __gue_val; \
10739 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
10740 - (x) = (__force __typeof__(*(ptr)))__gue_val; \
10741 + (x) = (__typeof__(*(ptr)))__gue_val; \
10742 } while (0)
10743
10744 #ifdef CONFIG_X86_WP_WORKS_OK
10745 diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
10746 index 566e803..b9521e9 100644
10747 --- a/arch/x86/include/asm/uaccess_32.h
10748 +++ b/arch/x86/include/asm/uaccess_32.h
10749 @@ -43,6 +43,9 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
10750 static __always_inline unsigned long __must_check
10751 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
10752 {
10753 + if ((long)n < 0)
10754 + return n;
10755 +
10756 if (__builtin_constant_p(n)) {
10757 unsigned long ret;
10758
10759 @@ -61,6 +64,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
10760 return ret;
10761 }
10762 }
10763 + if (!__builtin_constant_p(n))
10764 + check_object_size(from, n, true);
10765 return __copy_to_user_ll(to, from, n);
10766 }
10767
10768 @@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
10769 __copy_to_user(void __user *to, const void *from, unsigned long n)
10770 {
10771 might_fault();
10772 +
10773 return __copy_to_user_inatomic(to, from, n);
10774 }
10775
10776 static __always_inline unsigned long
10777 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
10778 {
10779 + if ((long)n < 0)
10780 + return n;
10781 +
10782 /* Avoid zeroing the tail if the copy fails..
10783 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
10784 * but as the zeroing behaviour is only significant when n is not
10785 @@ -137,6 +146,10 @@ static __always_inline unsigned long
10786 __copy_from_user(void *to, const void __user *from, unsigned long n)
10787 {
10788 might_fault();
10789 +
10790 + if ((long)n < 0)
10791 + return n;
10792 +
10793 if (__builtin_constant_p(n)) {
10794 unsigned long ret;
10795
10796 @@ -152,6 +165,8 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
10797 return ret;
10798 }
10799 }
10800 + if (!__builtin_constant_p(n))
10801 + check_object_size(to, n, false);
10802 return __copy_from_user_ll(to, from, n);
10803 }
10804
10805 @@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
10806 const void __user *from, unsigned long n)
10807 {
10808 might_fault();
10809 +
10810 + if ((long)n < 0)
10811 + return n;
10812 +
10813 if (__builtin_constant_p(n)) {
10814 unsigned long ret;
10815
10816 @@ -181,15 +200,19 @@ static __always_inline unsigned long
10817 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
10818 unsigned long n)
10819 {
10820 - return __copy_from_user_ll_nocache_nozero(to, from, n);
10821 + if ((long)n < 0)
10822 + return n;
10823 +
10824 + return __copy_from_user_ll_nocache_nozero(to, from, n);
10825 }
10826
10827 -unsigned long __must_check copy_to_user(void __user *to,
10828 - const void *from, unsigned long n);
10829 -unsigned long __must_check _copy_from_user(void *to,
10830 - const void __user *from,
10831 - unsigned long n);
10832 -
10833 +extern void copy_to_user_overflow(void)
10834 +#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
10835 + __compiletime_error("copy_to_user() buffer size is not provably correct")
10836 +#else
10837 + __compiletime_warning("copy_to_user() buffer size is not provably correct")
10838 +#endif
10839 +;
10840
10841 extern void copy_from_user_overflow(void)
10842 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
10843 @@ -199,17 +222,61 @@ extern void copy_from_user_overflow(void)
10844 #endif
10845 ;
10846
10847 -static inline unsigned long __must_check copy_from_user(void *to,
10848 - const void __user *from,
10849 - unsigned long n)
10850 +/**
10851 + * copy_to_user: - Copy a block of data into user space.
10852 + * @to: Destination address, in user space.
10853 + * @from: Source address, in kernel space.
10854 + * @n: Number of bytes to copy.
10855 + *
10856 + * Context: User context only. This function may sleep.
10857 + *
10858 + * Copy data from kernel space to user space.
10859 + *
10860 + * Returns number of bytes that could not be copied.
10861 + * On success, this will be zero.
10862 + */
10863 +static inline unsigned long __must_check
10864 +copy_to_user(void __user *to, const void *from, unsigned long n)
10865 +{
10866 + int sz = __compiletime_object_size(from);
10867 +
10868 + if (unlikely(sz != -1 && sz < n))
10869 + copy_to_user_overflow();
10870 + else if (access_ok(VERIFY_WRITE, to, n))
10871 + n = __copy_to_user(to, from, n);
10872 + return n;
10873 +}
10874 +
10875 +/**
10876 + * copy_from_user: - Copy a block of data from user space.
10877 + * @to: Destination address, in kernel space.
10878 + * @from: Source address, in user space.
10879 + * @n: Number of bytes to copy.
10880 + *
10881 + * Context: User context only. This function may sleep.
10882 + *
10883 + * Copy data from user space to kernel space.
10884 + *
10885 + * Returns number of bytes that could not be copied.
10886 + * On success, this will be zero.
10887 + *
10888 + * If some data could not be copied, this function will pad the copied
10889 + * data to the requested size using zero bytes.
10890 + */
10891 +static inline unsigned long __must_check
10892 +copy_from_user(void *to, const void __user *from, unsigned long n)
10893 {
10894 int sz = __compiletime_object_size(to);
10895
10896 - if (likely(sz == -1 || sz >= n))
10897 - n = _copy_from_user(to, from, n);
10898 - else
10899 + if (unlikely(sz != -1 && sz < n))
10900 copy_from_user_overflow();
10901 -
10902 + else if (access_ok(VERIFY_READ, from, n))
10903 + n = __copy_from_user(to, from, n);
10904 + else if ((long)n > 0) {
10905 + if (!__builtin_constant_p(n))
10906 + check_object_size(to, n, false);
10907 + memset(to, 0, n);
10908 + }
10909 return n;
10910 }
10911
10912 diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
10913 index 1c66d30..e66922c 100644
10914 --- a/arch/x86/include/asm/uaccess_64.h
10915 +++ b/arch/x86/include/asm/uaccess_64.h
10916 @@ -10,6 +10,9 @@
10917 #include <asm/alternative.h>
10918 #include <asm/cpufeature.h>
10919 #include <asm/page.h>
10920 +#include <asm/pgtable.h>
10921 +
10922 +#define set_fs(x) (current_thread_info()->addr_limit = (x))
10923
10924 /*
10925 * Copy To/From Userspace
10926 @@ -17,12 +20,12 @@
10927
10928 /* Handles exceptions in both to and from, but doesn't do access_ok */
10929 __must_check unsigned long
10930 -copy_user_generic_string(void *to, const void *from, unsigned len);
10931 +copy_user_generic_string(void *to, const void *from, unsigned long len);
10932 __must_check unsigned long
10933 -copy_user_generic_unrolled(void *to, const void *from, unsigned len);
10934 +copy_user_generic_unrolled(void *to, const void *from, unsigned long len);
10935
10936 static __always_inline __must_check unsigned long
10937 -copy_user_generic(void *to, const void *from, unsigned len)
10938 +copy_user_generic(void *to, const void *from, unsigned long len)
10939 {
10940 unsigned ret;
10941
10942 @@ -32,142 +35,226 @@ copy_user_generic(void *to, const void *from, unsigned len)
10943 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
10944 "=d" (len)),
10945 "1" (to), "2" (from), "3" (len)
10946 - : "memory", "rcx", "r8", "r9", "r10", "r11");
10947 + : "memory", "rcx", "r8", "r9", "r11");
10948 return ret;
10949 }
10950
10951 +static __always_inline __must_check unsigned long
10952 +__copy_to_user(void __user *to, const void *from, unsigned long len);
10953 +static __always_inline __must_check unsigned long
10954 +__copy_from_user(void *to, const void __user *from, unsigned long len);
10955 __must_check unsigned long
10956 -_copy_to_user(void __user *to, const void *from, unsigned len);
10957 -__must_check unsigned long
10958 -_copy_from_user(void *to, const void __user *from, unsigned len);
10959 -__must_check unsigned long
10960 -copy_in_user(void __user *to, const void __user *from, unsigned len);
10961 +copy_in_user(void __user *to, const void __user *from, unsigned long len);
10962
10963 static inline unsigned long __must_check copy_from_user(void *to,
10964 const void __user *from,
10965 unsigned long n)
10966 {
10967 - int sz = __compiletime_object_size(to);
10968 -
10969 might_fault();
10970 - if (likely(sz == -1 || sz >= n))
10971 - n = _copy_from_user(to, from, n);
10972 -#ifdef CONFIG_DEBUG_VM
10973 - else
10974 - WARN(1, "Buffer overflow detected!\n");
10975 -#endif
10976 +
10977 + if (access_ok(VERIFY_READ, from, n))
10978 + n = __copy_from_user(to, from, n);
10979 + else if (n < INT_MAX) {
10980 + if (!__builtin_constant_p(n))
10981 + check_object_size(to, n, false);
10982 + memset(to, 0, n);
10983 + }
10984 return n;
10985 }
10986
10987 static __always_inline __must_check
10988 -int copy_to_user(void __user *dst, const void *src, unsigned size)
10989 +int copy_to_user(void __user *dst, const void *src, unsigned long size)
10990 {
10991 might_fault();
10992
10993 - return _copy_to_user(dst, src, size);
10994 + if (access_ok(VERIFY_WRITE, dst, size))
10995 + size = __copy_to_user(dst, src, size);
10996 + return size;
10997 }
10998
10999 static __always_inline __must_check
11000 -int __copy_from_user(void *dst, const void __user *src, unsigned size)
11001 +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
11002 {
11003 - int ret = 0;
11004 + int sz = __compiletime_object_size(dst);
11005 + unsigned ret = 0;
11006
11007 might_fault();
11008 - if (!__builtin_constant_p(size))
11009 - return copy_user_generic(dst, (__force void *)src, size);
11010 +
11011 + if (size > INT_MAX)
11012 + return size;
11013 +
11014 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11015 + if (!__access_ok(VERIFY_READ, src, size))
11016 + return size;
11017 +#endif
11018 +
11019 + if (unlikely(sz != -1 && sz < size)) {
11020 +#ifdef CONFIG_DEBUG_VM
11021 + WARN(1, "Buffer overflow detected!\n");
11022 +#endif
11023 + return size;
11024 + }
11025 +
11026 + if (!__builtin_constant_p(size)) {
11027 + check_object_size(dst, size, false);
11028 +
11029 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11030 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11031 + src += PAX_USER_SHADOW_BASE;
11032 +#endif
11033 +
11034 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
11035 + }
11036 switch (size) {
11037 - case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
11038 + case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
11039 ret, "b", "b", "=q", 1);
11040 return ret;
11041 - case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
11042 + case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
11043 ret, "w", "w", "=r", 2);
11044 return ret;
11045 - case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
11046 + case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
11047 ret, "l", "k", "=r", 4);
11048 return ret;
11049 - case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
11050 + case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
11051 ret, "q", "", "=r", 8);
11052 return ret;
11053 case 10:
11054 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
11055 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
11056 ret, "q", "", "=r", 10);
11057 if (unlikely(ret))
11058 return ret;
11059 __get_user_asm(*(u16 *)(8 + (char *)dst),
11060 - (u16 __user *)(8 + (char __user *)src),
11061 + (const u16 __user *)(8 + (const char __user *)src),
11062 ret, "w", "w", "=r", 2);
11063 return ret;
11064 case 16:
11065 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
11066 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
11067 ret, "q", "", "=r", 16);
11068 if (unlikely(ret))
11069 return ret;
11070 __get_user_asm(*(u64 *)(8 + (char *)dst),
11071 - (u64 __user *)(8 + (char __user *)src),
11072 + (const u64 __user *)(8 + (const char __user *)src),
11073 ret, "q", "", "=r", 8);
11074 return ret;
11075 default:
11076 - return copy_user_generic(dst, (__force void *)src, size);
11077 +
11078 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11079 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11080 + src += PAX_USER_SHADOW_BASE;
11081 +#endif
11082 +
11083 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
11084 }
11085 }
11086
11087 static __always_inline __must_check
11088 -int __copy_to_user(void __user *dst, const void *src, unsigned size)
11089 +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
11090 {
11091 - int ret = 0;
11092 + int sz = __compiletime_object_size(src);
11093 + unsigned ret = 0;
11094
11095 might_fault();
11096 - if (!__builtin_constant_p(size))
11097 - return copy_user_generic((__force void *)dst, src, size);
11098 +
11099 + if (size > INT_MAX)
11100 + return size;
11101 +
11102 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11103 + if (!__access_ok(VERIFY_WRITE, dst, size))
11104 + return size;
11105 +#endif
11106 +
11107 + if (unlikely(sz != -1 && sz < size)) {
11108 +#ifdef CONFIG_DEBUG_VM
11109 + WARN(1, "Buffer overflow detected!\n");
11110 +#endif
11111 + return size;
11112 + }
11113 +
11114 + if (!__builtin_constant_p(size)) {
11115 + check_object_size(src, size, true);
11116 +
11117 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11118 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11119 + dst += PAX_USER_SHADOW_BASE;
11120 +#endif
11121 +
11122 + return copy_user_generic((__force_kernel void *)dst, src, size);
11123 + }
11124 switch (size) {
11125 - case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
11126 + case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
11127 ret, "b", "b", "iq", 1);
11128 return ret;
11129 - case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
11130 + case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
11131 ret, "w", "w", "ir", 2);
11132 return ret;
11133 - case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
11134 + case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
11135 ret, "l", "k", "ir", 4);
11136 return ret;
11137 - case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
11138 + case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
11139 ret, "q", "", "er", 8);
11140 return ret;
11141 case 10:
11142 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
11143 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
11144 ret, "q", "", "er", 10);
11145 if (unlikely(ret))
11146 return ret;
11147 asm("":::"memory");
11148 - __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
11149 + __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
11150 ret, "w", "w", "ir", 2);
11151 return ret;
11152 case 16:
11153 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
11154 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
11155 ret, "q", "", "er", 16);
11156 if (unlikely(ret))
11157 return ret;
11158 asm("":::"memory");
11159 - __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
11160 + __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
11161 ret, "q", "", "er", 8);
11162 return ret;
11163 default:
11164 - return copy_user_generic((__force void *)dst, src, size);
11165 +
11166 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11167 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11168 + dst += PAX_USER_SHADOW_BASE;
11169 +#endif
11170 +
11171 + return copy_user_generic((__force_kernel void *)dst, src, size);
11172 }
11173 }
11174
11175 static __always_inline __must_check
11176 -int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11177 +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
11178 {
11179 - int ret = 0;
11180 + unsigned ret = 0;
11181
11182 might_fault();
11183 - if (!__builtin_constant_p(size))
11184 - return copy_user_generic((__force void *)dst,
11185 - (__force void *)src, size);
11186 +
11187 + if (size > INT_MAX)
11188 + return size;
11189 +
11190 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11191 + if (!__access_ok(VERIFY_READ, src, size))
11192 + return size;
11193 + if (!__access_ok(VERIFY_WRITE, dst, size))
11194 + return size;
11195 +#endif
11196 +
11197 + if (!__builtin_constant_p(size)) {
11198 +
11199 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11200 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11201 + src += PAX_USER_SHADOW_BASE;
11202 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11203 + dst += PAX_USER_SHADOW_BASE;
11204 +#endif
11205 +
11206 + return copy_user_generic((__force_kernel void *)dst,
11207 + (__force_kernel const void *)src, size);
11208 + }
11209 switch (size) {
11210 case 1: {
11211 u8 tmp;
11212 - __get_user_asm(tmp, (u8 __user *)src,
11213 + __get_user_asm(tmp, (const u8 __user *)src,
11214 ret, "b", "b", "=q", 1);
11215 if (likely(!ret))
11216 __put_user_asm(tmp, (u8 __user *)dst,
11217 @@ -176,7 +263,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11218 }
11219 case 2: {
11220 u16 tmp;
11221 - __get_user_asm(tmp, (u16 __user *)src,
11222 + __get_user_asm(tmp, (const u16 __user *)src,
11223 ret, "w", "w", "=r", 2);
11224 if (likely(!ret))
11225 __put_user_asm(tmp, (u16 __user *)dst,
11226 @@ -186,7 +273,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11227
11228 case 4: {
11229 u32 tmp;
11230 - __get_user_asm(tmp, (u32 __user *)src,
11231 + __get_user_asm(tmp, (const u32 __user *)src,
11232 ret, "l", "k", "=r", 4);
11233 if (likely(!ret))
11234 __put_user_asm(tmp, (u32 __user *)dst,
11235 @@ -195,7 +282,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11236 }
11237 case 8: {
11238 u64 tmp;
11239 - __get_user_asm(tmp, (u64 __user *)src,
11240 + __get_user_asm(tmp, (const u64 __user *)src,
11241 ret, "q", "", "=r", 8);
11242 if (likely(!ret))
11243 __put_user_asm(tmp, (u64 __user *)dst,
11244 @@ -203,8 +290,16 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11245 return ret;
11246 }
11247 default:
11248 - return copy_user_generic((__force void *)dst,
11249 - (__force void *)src, size);
11250 +
11251 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11252 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11253 + src += PAX_USER_SHADOW_BASE;
11254 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11255 + dst += PAX_USER_SHADOW_BASE;
11256 +#endif
11257 +
11258 + return copy_user_generic((__force_kernel void *)dst,
11259 + (__force_kernel const void *)src, size);
11260 }
11261 }
11262
11263 @@ -219,35 +314,72 @@ __must_check unsigned long clear_user(void __user *mem, unsigned long len);
11264 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
11265
11266 static __must_check __always_inline int
11267 -__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
11268 +__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
11269 {
11270 - return copy_user_generic(dst, (__force const void *)src, size);
11271 + if (size > INT_MAX)
11272 + return size;
11273 +
11274 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11275 + if (!__access_ok(VERIFY_READ, src, size))
11276 + return size;
11277 +
11278 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11279 + src += PAX_USER_SHADOW_BASE;
11280 +#endif
11281 +
11282 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
11283 }
11284
11285 -static __must_check __always_inline int
11286 -__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
11287 +static __must_check __always_inline unsigned long
11288 +__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
11289 {
11290 - return copy_user_generic((__force void *)dst, src, size);
11291 + if (size > INT_MAX)
11292 + return size;
11293 +
11294 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11295 + if (!__access_ok(VERIFY_WRITE, dst, size))
11296 + return size;
11297 +
11298 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11299 + dst += PAX_USER_SHADOW_BASE;
11300 +#endif
11301 +
11302 + return copy_user_generic((__force_kernel void *)dst, src, size);
11303 }
11304
11305 -extern long __copy_user_nocache(void *dst, const void __user *src,
11306 - unsigned size, int zerorest);
11307 +extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
11308 + unsigned long size, int zerorest);
11309
11310 -static inline int
11311 -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
11312 +static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
11313 {
11314 might_sleep();
11315 +
11316 + if (size > INT_MAX)
11317 + return size;
11318 +
11319 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11320 + if (!__access_ok(VERIFY_READ, src, size))
11321 + return size;
11322 +#endif
11323 +
11324 return __copy_user_nocache(dst, src, size, 1);
11325 }
11326
11327 -static inline int
11328 -__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
11329 - unsigned size)
11330 +static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
11331 + unsigned long size)
11332 {
11333 + if (size > INT_MAX)
11334 + return size;
11335 +
11336 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11337 + if (!__access_ok(VERIFY_READ, src, size))
11338 + return size;
11339 +#endif
11340 +
11341 return __copy_user_nocache(dst, src, size, 0);
11342 }
11343
11344 -unsigned long
11345 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
11346 +extern unsigned long
11347 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest);
11348
11349 #endif /* _ASM_X86_UACCESS_64_H */
11350 diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
11351 index bb05228..d763d5b 100644
11352 --- a/arch/x86/include/asm/vdso.h
11353 +++ b/arch/x86/include/asm/vdso.h
11354 @@ -11,7 +11,7 @@ extern const char VDSO32_PRELINK[];
11355 #define VDSO32_SYMBOL(base, name) \
11356 ({ \
11357 extern const char VDSO32_##name[]; \
11358 - (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
11359 + (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
11360 })
11361 #endif
11362
11363 diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
11364 index 1971e65..1e3559b 100644
11365 --- a/arch/x86/include/asm/x86_init.h
11366 +++ b/arch/x86/include/asm/x86_init.h
11367 @@ -28,7 +28,7 @@ struct x86_init_mpparse {
11368 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
11369 void (*find_smp_config)(void);
11370 void (*get_smp_config)(unsigned int early);
11371 -};
11372 +} __no_const;
11373
11374 /**
11375 * struct x86_init_resources - platform specific resource related ops
11376 @@ -42,7 +42,7 @@ struct x86_init_resources {
11377 void (*probe_roms)(void);
11378 void (*reserve_resources)(void);
11379 char *(*memory_setup)(void);
11380 -};
11381 +} __no_const;
11382
11383 /**
11384 * struct x86_init_irqs - platform specific interrupt setup
11385 @@ -55,7 +55,7 @@ struct x86_init_irqs {
11386 void (*pre_vector_init)(void);
11387 void (*intr_init)(void);
11388 void (*trap_init)(void);
11389 -};
11390 +} __no_const;
11391
11392 /**
11393 * struct x86_init_oem - oem platform specific customizing functions
11394 @@ -65,7 +65,7 @@ struct x86_init_irqs {
11395 struct x86_init_oem {
11396 void (*arch_setup)(void);
11397 void (*banner)(void);
11398 -};
11399 +} __no_const;
11400
11401 /**
11402 * struct x86_init_mapping - platform specific initial kernel pagetable setup
11403 @@ -76,7 +76,7 @@ struct x86_init_oem {
11404 */
11405 struct x86_init_mapping {
11406 void (*pagetable_reserve)(u64 start, u64 end);
11407 -};
11408 +} __no_const;
11409
11410 /**
11411 * struct x86_init_paging - platform specific paging functions
11412 @@ -86,7 +86,7 @@ struct x86_init_mapping {
11413 struct x86_init_paging {
11414 void (*pagetable_setup_start)(pgd_t *base);
11415 void (*pagetable_setup_done)(pgd_t *base);
11416 -};
11417 +} __no_const;
11418
11419 /**
11420 * struct x86_init_timers - platform specific timer setup
11421 @@ -101,7 +101,7 @@ struct x86_init_timers {
11422 void (*tsc_pre_init)(void);
11423 void (*timer_init)(void);
11424 void (*wallclock_init)(void);
11425 -};
11426 +} __no_const;
11427
11428 /**
11429 * struct x86_init_iommu - platform specific iommu setup
11430 @@ -109,7 +109,7 @@ struct x86_init_timers {
11431 */
11432 struct x86_init_iommu {
11433 int (*iommu_init)(void);
11434 -};
11435 +} __no_const;
11436
11437 /**
11438 * struct x86_init_pci - platform specific pci init functions
11439 @@ -123,7 +123,7 @@ struct x86_init_pci {
11440 int (*init)(void);
11441 void (*init_irq)(void);
11442 void (*fixup_irqs)(void);
11443 -};
11444 +} __no_const;
11445
11446 /**
11447 * struct x86_init_ops - functions for platform specific setup
11448 @@ -139,7 +139,7 @@ struct x86_init_ops {
11449 struct x86_init_timers timers;
11450 struct x86_init_iommu iommu;
11451 struct x86_init_pci pci;
11452 -};
11453 +} __no_const;
11454
11455 /**
11456 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
11457 @@ -147,7 +147,7 @@ struct x86_init_ops {
11458 */
11459 struct x86_cpuinit_ops {
11460 void (*setup_percpu_clockev)(void);
11461 -};
11462 +} __no_const;
11463
11464 /**
11465 * struct x86_platform_ops - platform specific runtime functions
11466 @@ -169,7 +169,7 @@ struct x86_platform_ops {
11467 void (*nmi_init)(void);
11468 unsigned char (*get_nmi_reason)(void);
11469 int (*i8042_detect)(void);
11470 -};
11471 +} __no_const;
11472
11473 struct pci_dev;
11474
11475 @@ -177,7 +177,7 @@ struct x86_msi_ops {
11476 int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
11477 void (*teardown_msi_irq)(unsigned int irq);
11478 void (*teardown_msi_irqs)(struct pci_dev *dev);
11479 -};
11480 +} __no_const;
11481
11482 extern struct x86_init_ops x86_init;
11483 extern struct x86_cpuinit_ops x86_cpuinit;
11484 diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
11485 index c6ce245..ffbdab7 100644
11486 --- a/arch/x86/include/asm/xsave.h
11487 +++ b/arch/x86/include/asm/xsave.h
11488 @@ -65,6 +65,11 @@ static inline int xsave_user(struct xsave_struct __user *buf)
11489 {
11490 int err;
11491
11492 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11493 + if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
11494 + buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
11495 +#endif
11496 +
11497 /*
11498 * Clear the xsave header first, so that reserved fields are
11499 * initialized to zero.
11500 @@ -96,10 +101,15 @@ static inline int xsave_user(struct xsave_struct __user *buf)
11501 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
11502 {
11503 int err;
11504 - struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
11505 + struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
11506 u32 lmask = mask;
11507 u32 hmask = mask >> 32;
11508
11509 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11510 + if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
11511 + xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
11512 +#endif
11513 +
11514 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
11515 "2:\n"
11516 ".section .fixup,\"ax\"\n"
11517 diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile
11518 index 6a564ac..9b1340c 100644
11519 --- a/arch/x86/kernel/acpi/realmode/Makefile
11520 +++ b/arch/x86/kernel/acpi/realmode/Makefile
11521 @@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \
11522 $(call cc-option, -fno-stack-protector) \
11523 $(call cc-option, -mpreferred-stack-boundary=2)
11524 KBUILD_CFLAGS += $(call cc-option, -m32)
11525 +ifdef CONSTIFY_PLUGIN
11526 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
11527 +endif
11528 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
11529 GCOV_PROFILE := n
11530
11531 diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
11532 index b4fd836..4358fe3 100644
11533 --- a/arch/x86/kernel/acpi/realmode/wakeup.S
11534 +++ b/arch/x86/kernel/acpi/realmode/wakeup.S
11535 @@ -108,6 +108,9 @@ wakeup_code:
11536 /* Do any other stuff... */
11537
11538 #ifndef CONFIG_64BIT
11539 + /* Recheck NX bit overrides (64bit path does this in trampoline */
11540 + call verify_cpu
11541 +
11542 /* This could also be done in C code... */
11543 movl pmode_cr3, %eax
11544 movl %eax, %cr3
11545 @@ -131,6 +134,7 @@ wakeup_code:
11546 movl pmode_cr0, %eax
11547 movl %eax, %cr0
11548 jmp pmode_return
11549 +# include "../../verify_cpu.S"
11550 #else
11551 pushw $0
11552 pushw trampoline_segment
11553 diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
11554 index 103b6ab..2004d0a 100644
11555 --- a/arch/x86/kernel/acpi/sleep.c
11556 +++ b/arch/x86/kernel/acpi/sleep.c
11557 @@ -94,8 +94,12 @@ int acpi_suspend_lowlevel(void)
11558 header->trampoline_segment = trampoline_address() >> 4;
11559 #ifdef CONFIG_SMP
11560 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
11561 +
11562 + pax_open_kernel();
11563 early_gdt_descr.address =
11564 (unsigned long)get_cpu_gdt_table(smp_processor_id());
11565 + pax_close_kernel();
11566 +
11567 initial_gs = per_cpu_offset(smp_processor_id());
11568 #endif
11569 initial_code = (unsigned long)wakeup_long64;
11570 diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
11571 index 13ab720..95d5442 100644
11572 --- a/arch/x86/kernel/acpi/wakeup_32.S
11573 +++ b/arch/x86/kernel/acpi/wakeup_32.S
11574 @@ -30,13 +30,11 @@ wakeup_pmode_return:
11575 # and restore the stack ... but you need gdt for this to work
11576 movl saved_context_esp, %esp
11577
11578 - movl %cs:saved_magic, %eax
11579 - cmpl $0x12345678, %eax
11580 + cmpl $0x12345678, saved_magic
11581 jne bogus_magic
11582
11583 # jump to place where we left off
11584 - movl saved_eip, %eax
11585 - jmp *%eax
11586 + jmp *(saved_eip)
11587
11588 bogus_magic:
11589 jmp bogus_magic
11590 diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
11591 index 1f84794..e23f862 100644
11592 --- a/arch/x86/kernel/alternative.c
11593 +++ b/arch/x86/kernel/alternative.c
11594 @@ -276,6 +276,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
11595 */
11596 for (a = start; a < end; a++) {
11597 instr = (u8 *)&a->instr_offset + a->instr_offset;
11598 +
11599 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
11600 + instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11601 + if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
11602 + instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11603 +#endif
11604 +
11605 replacement = (u8 *)&a->repl_offset + a->repl_offset;
11606 BUG_ON(a->replacementlen > a->instrlen);
11607 BUG_ON(a->instrlen > sizeof(insnbuf));
11608 @@ -307,10 +314,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
11609 for (poff = start; poff < end; poff++) {
11610 u8 *ptr = (u8 *)poff + *poff;
11611
11612 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
11613 + ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11614 + if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
11615 + ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11616 +#endif
11617 +
11618 if (!*poff || ptr < text || ptr >= text_end)
11619 continue;
11620 /* turn DS segment override prefix into lock prefix */
11621 - if (*ptr == 0x3e)
11622 + if (*ktla_ktva(ptr) == 0x3e)
11623 text_poke(ptr, ((unsigned char []){0xf0}), 1);
11624 };
11625 mutex_unlock(&text_mutex);
11626 @@ -328,10 +341,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
11627 for (poff = start; poff < end; poff++) {
11628 u8 *ptr = (u8 *)poff + *poff;
11629
11630 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
11631 + ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11632 + if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
11633 + ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11634 +#endif
11635 +
11636 if (!*poff || ptr < text || ptr >= text_end)
11637 continue;
11638 /* turn lock prefix into DS segment override prefix */
11639 - if (*ptr == 0xf0)
11640 + if (*ktla_ktva(ptr) == 0xf0)
11641 text_poke(ptr, ((unsigned char []){0x3E}), 1);
11642 };
11643 mutex_unlock(&text_mutex);
11644 @@ -500,7 +519,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
11645
11646 BUG_ON(p->len > MAX_PATCH_LEN);
11647 /* prep the buffer with the original instructions */
11648 - memcpy(insnbuf, p->instr, p->len);
11649 + memcpy(insnbuf, ktla_ktva(p->instr), p->len);
11650 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
11651 (unsigned long)p->instr, p->len);
11652
11653 @@ -568,7 +587,7 @@ void __init alternative_instructions(void)
11654 if (smp_alt_once)
11655 free_init_pages("SMP alternatives",
11656 (unsigned long)__smp_locks,
11657 - (unsigned long)__smp_locks_end);
11658 + PAGE_ALIGN((unsigned long)__smp_locks_end));
11659
11660 restart_nmi();
11661 }
11662 @@ -585,13 +604,17 @@ void __init alternative_instructions(void)
11663 * instructions. And on the local CPU you need to be protected again NMI or MCE
11664 * handlers seeing an inconsistent instruction while you patch.
11665 */
11666 -void *__init_or_module text_poke_early(void *addr, const void *opcode,
11667 +void *__kprobes text_poke_early(void *addr, const void *opcode,
11668 size_t len)
11669 {
11670 unsigned long flags;
11671 local_irq_save(flags);
11672 - memcpy(addr, opcode, len);
11673 +
11674 + pax_open_kernel();
11675 + memcpy(ktla_ktva(addr), opcode, len);
11676 sync_core();
11677 + pax_close_kernel();
11678 +
11679 local_irq_restore(flags);
11680 /* Could also do a CLFLUSH here to speed up CPU recovery; but
11681 that causes hangs on some VIA CPUs. */
11682 @@ -613,36 +636,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
11683 */
11684 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
11685 {
11686 - unsigned long flags;
11687 - char *vaddr;
11688 + unsigned char *vaddr = ktla_ktva(addr);
11689 struct page *pages[2];
11690 - int i;
11691 + size_t i;
11692
11693 if (!core_kernel_text((unsigned long)addr)) {
11694 - pages[0] = vmalloc_to_page(addr);
11695 - pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
11696 + pages[0] = vmalloc_to_page(vaddr);
11697 + pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
11698 } else {
11699 - pages[0] = virt_to_page(addr);
11700 + pages[0] = virt_to_page(vaddr);
11701 WARN_ON(!PageReserved(pages[0]));
11702 - pages[1] = virt_to_page(addr + PAGE_SIZE);
11703 + pages[1] = virt_to_page(vaddr + PAGE_SIZE);
11704 }
11705 BUG_ON(!pages[0]);
11706 - local_irq_save(flags);
11707 - set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
11708 - if (pages[1])
11709 - set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
11710 - vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
11711 - memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
11712 - clear_fixmap(FIX_TEXT_POKE0);
11713 - if (pages[1])
11714 - clear_fixmap(FIX_TEXT_POKE1);
11715 - local_flush_tlb();
11716 - sync_core();
11717 - /* Could also do a CLFLUSH here to speed up CPU recovery; but
11718 - that causes hangs on some VIA CPUs. */
11719 + text_poke_early(addr, opcode, len);
11720 for (i = 0; i < len; i++)
11721 - BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
11722 - local_irq_restore(flags);
11723 + BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
11724 return addr;
11725 }
11726
11727 diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
11728 index f98d84c..e402a69 100644
11729 --- a/arch/x86/kernel/apic/apic.c
11730 +++ b/arch/x86/kernel/apic/apic.c
11731 @@ -174,7 +174,7 @@ int first_system_vector = 0xfe;
11732 /*
11733 * Debug level, exported for io_apic.c
11734 */
11735 -unsigned int apic_verbosity;
11736 +int apic_verbosity;
11737
11738 int pic_mode;
11739
11740 @@ -1853,7 +1853,7 @@ void smp_error_interrupt(struct pt_regs *regs)
11741 apic_write(APIC_ESR, 0);
11742 v1 = apic_read(APIC_ESR);
11743 ack_APIC_irq();
11744 - atomic_inc(&irq_err_count);
11745 + atomic_inc_unchecked(&irq_err_count);
11746
11747 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
11748 smp_processor_id(), v0 , v1);
11749 diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
11750 index 6d939d7..0697fcc 100644
11751 --- a/arch/x86/kernel/apic/io_apic.c
11752 +++ b/arch/x86/kernel/apic/io_apic.c
11753 @@ -1096,7 +1096,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
11754 }
11755 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
11756
11757 -void lock_vector_lock(void)
11758 +void lock_vector_lock(void) __acquires(vector_lock)
11759 {
11760 /* Used to the online set of cpus does not change
11761 * during assign_irq_vector.
11762 @@ -1104,7 +1104,7 @@ void lock_vector_lock(void)
11763 raw_spin_lock(&vector_lock);
11764 }
11765
11766 -void unlock_vector_lock(void)
11767 +void unlock_vector_lock(void) __releases(vector_lock)
11768 {
11769 raw_spin_unlock(&vector_lock);
11770 }
11771 @@ -2510,7 +2510,7 @@ static void ack_apic_edge(struct irq_data *data)
11772 ack_APIC_irq();
11773 }
11774
11775 -atomic_t irq_mis_count;
11776 +atomic_unchecked_t irq_mis_count;
11777
11778 static void ack_apic_level(struct irq_data *data)
11779 {
11780 @@ -2576,7 +2576,7 @@ static void ack_apic_level(struct irq_data *data)
11781 * at the cpu.
11782 */
11783 if (!(v & (1 << (i & 0x1f)))) {
11784 - atomic_inc(&irq_mis_count);
11785 + atomic_inc_unchecked(&irq_mis_count);
11786
11787 eoi_ioapic_irq(irq, cfg);
11788 }
11789 diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
11790 index a46bd38..6b906d7 100644
11791 --- a/arch/x86/kernel/apm_32.c
11792 +++ b/arch/x86/kernel/apm_32.c
11793 @@ -411,7 +411,7 @@ static DEFINE_MUTEX(apm_mutex);
11794 * This is for buggy BIOS's that refer to (real mode) segment 0x40
11795 * even though they are called in protected mode.
11796 */
11797 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
11798 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
11799 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
11800
11801 static const char driver_version[] = "1.16ac"; /* no spaces */
11802 @@ -589,7 +589,10 @@ static long __apm_bios_call(void *_call)
11803 BUG_ON(cpu != 0);
11804 gdt = get_cpu_gdt_table(cpu);
11805 save_desc_40 = gdt[0x40 / 8];
11806 +
11807 + pax_open_kernel();
11808 gdt[0x40 / 8] = bad_bios_desc;
11809 + pax_close_kernel();
11810
11811 apm_irq_save(flags);
11812 APM_DO_SAVE_SEGS;
11813 @@ -598,7 +601,11 @@ static long __apm_bios_call(void *_call)
11814 &call->esi);
11815 APM_DO_RESTORE_SEGS;
11816 apm_irq_restore(flags);
11817 +
11818 + pax_open_kernel();
11819 gdt[0x40 / 8] = save_desc_40;
11820 + pax_close_kernel();
11821 +
11822 put_cpu();
11823
11824 return call->eax & 0xff;
11825 @@ -665,7 +672,10 @@ static long __apm_bios_call_simple(void *_call)
11826 BUG_ON(cpu != 0);
11827 gdt = get_cpu_gdt_table(cpu);
11828 save_desc_40 = gdt[0x40 / 8];
11829 +
11830 + pax_open_kernel();
11831 gdt[0x40 / 8] = bad_bios_desc;
11832 + pax_close_kernel();
11833
11834 apm_irq_save(flags);
11835 APM_DO_SAVE_SEGS;
11836 @@ -673,7 +683,11 @@ static long __apm_bios_call_simple(void *_call)
11837 &call->eax);
11838 APM_DO_RESTORE_SEGS;
11839 apm_irq_restore(flags);
11840 +
11841 + pax_open_kernel();
11842 gdt[0x40 / 8] = save_desc_40;
11843 + pax_close_kernel();
11844 +
11845 put_cpu();
11846 return error;
11847 }
11848 @@ -2347,12 +2361,15 @@ static int __init apm_init(void)
11849 * code to that CPU.
11850 */
11851 gdt = get_cpu_gdt_table(0);
11852 +
11853 + pax_open_kernel();
11854 set_desc_base(&gdt[APM_CS >> 3],
11855 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
11856 set_desc_base(&gdt[APM_CS_16 >> 3],
11857 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
11858 set_desc_base(&gdt[APM_DS >> 3],
11859 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
11860 + pax_close_kernel();
11861
11862 proc_create("apm", 0, NULL, &apm_file_ops);
11863
11864 diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
11865 index 4f13faf..87db5d2 100644
11866 --- a/arch/x86/kernel/asm-offsets.c
11867 +++ b/arch/x86/kernel/asm-offsets.c
11868 @@ -33,6 +33,8 @@ void common(void) {
11869 OFFSET(TI_status, thread_info, status);
11870 OFFSET(TI_addr_limit, thread_info, addr_limit);
11871 OFFSET(TI_preempt_count, thread_info, preempt_count);
11872 + OFFSET(TI_lowest_stack, thread_info, lowest_stack);
11873 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
11874
11875 BLANK();
11876 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
11877 @@ -53,8 +55,26 @@ void common(void) {
11878 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
11879 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
11880 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
11881 +
11882 +#ifdef CONFIG_PAX_KERNEXEC
11883 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
11884 #endif
11885
11886 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11887 + OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
11888 + OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
11889 +#ifdef CONFIG_X86_64
11890 + OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
11891 +#endif
11892 +#endif
11893 +
11894 +#endif
11895 +
11896 + BLANK();
11897 + DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
11898 + DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
11899 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
11900 +
11901 #ifdef CONFIG_XEN
11902 BLANK();
11903 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
11904 diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
11905 index e72a119..6e2955d 100644
11906 --- a/arch/x86/kernel/asm-offsets_64.c
11907 +++ b/arch/x86/kernel/asm-offsets_64.c
11908 @@ -69,6 +69,7 @@ int main(void)
11909 BLANK();
11910 #undef ENTRY
11911
11912 + DEFINE(TSS_size, sizeof(struct tss_struct));
11913 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
11914 BLANK();
11915
11916 diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
11917 index 25f24dc..4094a7f 100644
11918 --- a/arch/x86/kernel/cpu/Makefile
11919 +++ b/arch/x86/kernel/cpu/Makefile
11920 @@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
11921 CFLAGS_REMOVE_perf_event.o = -pg
11922 endif
11923
11924 -# Make sure load_percpu_segment has no stackprotector
11925 -nostackp := $(call cc-option, -fno-stack-protector)
11926 -CFLAGS_common.o := $(nostackp)
11927 -
11928 obj-y := intel_cacheinfo.o scattered.o topology.o
11929 obj-y += proc.o capflags.o powerflags.o common.o
11930 obj-y += vmware.o hypervisor.o sched.o mshyperv.o
11931 diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
11932 index 0bab2b1..d0a1bf8 100644
11933 --- a/arch/x86/kernel/cpu/amd.c
11934 +++ b/arch/x86/kernel/cpu/amd.c
11935 @@ -664,7 +664,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
11936 unsigned int size)
11937 {
11938 /* AMD errata T13 (order #21922) */
11939 - if ((c->x86 == 6)) {
11940 + if (c->x86 == 6) {
11941 /* Duron Rev A0 */
11942 if (c->x86_model == 3 && c->x86_mask == 0)
11943 size = 64;
11944 diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
11945 index aa003b1..47ea638 100644
11946 --- a/arch/x86/kernel/cpu/common.c
11947 +++ b/arch/x86/kernel/cpu/common.c
11948 @@ -84,60 +84,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
11949
11950 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
11951
11952 -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
11953 -#ifdef CONFIG_X86_64
11954 - /*
11955 - * We need valid kernel segments for data and code in long mode too
11956 - * IRET will check the segment types kkeil 2000/10/28
11957 - * Also sysret mandates a special GDT layout
11958 - *
11959 - * TLS descriptors are currently at a different place compared to i386.
11960 - * Hopefully nobody expects them at a fixed place (Wine?)
11961 - */
11962 - [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
11963 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
11964 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
11965 - [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
11966 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
11967 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
11968 -#else
11969 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
11970 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
11971 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
11972 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
11973 - /*
11974 - * Segments used for calling PnP BIOS have byte granularity.
11975 - * They code segments and data segments have fixed 64k limits,
11976 - * the transfer segment sizes are set at run time.
11977 - */
11978 - /* 32-bit code */
11979 - [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
11980 - /* 16-bit code */
11981 - [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
11982 - /* 16-bit data */
11983 - [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
11984 - /* 16-bit data */
11985 - [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
11986 - /* 16-bit data */
11987 - [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
11988 - /*
11989 - * The APM segments have byte granularity and their bases
11990 - * are set at run time. All have 64k limits.
11991 - */
11992 - /* 32-bit code */
11993 - [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
11994 - /* 16-bit code */
11995 - [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
11996 - /* data */
11997 - [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
11998 -
11999 - [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
12000 - [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
12001 - GDT_STACK_CANARY_INIT
12002 -#endif
12003 -} };
12004 -EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
12005 -
12006 static int __init x86_xsave_setup(char *s)
12007 {
12008 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
12009 @@ -372,7 +318,7 @@ void switch_to_new_gdt(int cpu)
12010 {
12011 struct desc_ptr gdt_descr;
12012
12013 - gdt_descr.address = (long)get_cpu_gdt_table(cpu);
12014 + gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
12015 gdt_descr.size = GDT_SIZE - 1;
12016 load_gdt(&gdt_descr);
12017 /* Reload the per-cpu base */
12018 @@ -844,6 +790,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
12019 /* Filter out anything that depends on CPUID levels we don't have */
12020 filter_cpuid_features(c, true);
12021
12022 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
12023 + setup_clear_cpu_cap(X86_FEATURE_SEP);
12024 +#endif
12025 +
12026 /* If the model name is still unset, do table lookup. */
12027 if (!c->x86_model_id[0]) {
12028 const char *p;
12029 @@ -1024,6 +974,9 @@ static __init int setup_disablecpuid(char *arg)
12030 }
12031 __setup("clearcpuid=", setup_disablecpuid);
12032
12033 +DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
12034 +EXPORT_PER_CPU_SYMBOL(current_tinfo);
12035 +
12036 #ifdef CONFIG_X86_64
12037 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
12038
12039 @@ -1039,7 +992,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
12040 EXPORT_PER_CPU_SYMBOL(current_task);
12041
12042 DEFINE_PER_CPU(unsigned long, kernel_stack) =
12043 - (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
12044 + (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
12045 EXPORT_PER_CPU_SYMBOL(kernel_stack);
12046
12047 DEFINE_PER_CPU(char *, irq_stack_ptr) =
12048 @@ -1104,7 +1057,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
12049 {
12050 memset(regs, 0, sizeof(struct pt_regs));
12051 regs->fs = __KERNEL_PERCPU;
12052 - regs->gs = __KERNEL_STACK_CANARY;
12053 + savesegment(gs, regs->gs);
12054
12055 return regs;
12056 }
12057 @@ -1159,7 +1112,7 @@ void __cpuinit cpu_init(void)
12058 int i;
12059
12060 cpu = stack_smp_processor_id();
12061 - t = &per_cpu(init_tss, cpu);
12062 + t = init_tss + cpu;
12063 oist = &per_cpu(orig_ist, cpu);
12064
12065 #ifdef CONFIG_NUMA
12066 @@ -1185,7 +1138,7 @@ void __cpuinit cpu_init(void)
12067 switch_to_new_gdt(cpu);
12068 loadsegment(fs, 0);
12069
12070 - load_idt((const struct desc_ptr *)&idt_descr);
12071 + load_idt(&idt_descr);
12072
12073 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
12074 syscall_init();
12075 @@ -1194,7 +1147,6 @@ void __cpuinit cpu_init(void)
12076 wrmsrl(MSR_KERNEL_GS_BASE, 0);
12077 barrier();
12078
12079 - x86_configure_nx();
12080 if (cpu != 0)
12081 enable_x2apic();
12082
12083 @@ -1248,7 +1200,7 @@ void __cpuinit cpu_init(void)
12084 {
12085 int cpu = smp_processor_id();
12086 struct task_struct *curr = current;
12087 - struct tss_struct *t = &per_cpu(init_tss, cpu);
12088 + struct tss_struct *t = init_tss + cpu;
12089 struct thread_struct *thread = &curr->thread;
12090
12091 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
12092 diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
12093 index 5231312..a78a987 100644
12094 --- a/arch/x86/kernel/cpu/intel.c
12095 +++ b/arch/x86/kernel/cpu/intel.c
12096 @@ -174,7 +174,7 @@ static void __cpuinit trap_init_f00f_bug(void)
12097 * Update the IDT descriptor and reload the IDT so that
12098 * it uses the read-only mapped virtual address.
12099 */
12100 - idt_descr.address = fix_to_virt(FIX_F00F_IDT);
12101 + idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
12102 load_idt(&idt_descr);
12103 }
12104 #endif
12105 diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
12106 index 2af127d..8ff7ac0 100644
12107 --- a/arch/x86/kernel/cpu/mcheck/mce.c
12108 +++ b/arch/x86/kernel/cpu/mcheck/mce.c
12109 @@ -42,6 +42,7 @@
12110 #include <asm/processor.h>
12111 #include <asm/mce.h>
12112 #include <asm/msr.h>
12113 +#include <asm/local.h>
12114
12115 #include "mce-internal.h"
12116
12117 @@ -202,7 +203,7 @@ static void print_mce(struct mce *m)
12118 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
12119 m->cs, m->ip);
12120
12121 - if (m->cs == __KERNEL_CS)
12122 + if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
12123 print_symbol("{%s}", m->ip);
12124 pr_cont("\n");
12125 }
12126 @@ -235,10 +236,10 @@ static void print_mce(struct mce *m)
12127
12128 #define PANIC_TIMEOUT 5 /* 5 seconds */
12129
12130 -static atomic_t mce_paniced;
12131 +static atomic_unchecked_t mce_paniced;
12132
12133 static int fake_panic;
12134 -static atomic_t mce_fake_paniced;
12135 +static atomic_unchecked_t mce_fake_paniced;
12136
12137 /* Panic in progress. Enable interrupts and wait for final IPI */
12138 static void wait_for_panic(void)
12139 @@ -262,7 +263,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
12140 /*
12141 * Make sure only one CPU runs in machine check panic
12142 */
12143 - if (atomic_inc_return(&mce_paniced) > 1)
12144 + if (atomic_inc_return_unchecked(&mce_paniced) > 1)
12145 wait_for_panic();
12146 barrier();
12147
12148 @@ -270,7 +271,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
12149 console_verbose();
12150 } else {
12151 /* Don't log too much for fake panic */
12152 - if (atomic_inc_return(&mce_fake_paniced) > 1)
12153 + if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
12154 return;
12155 }
12156 /* First print corrected ones that are still unlogged */
12157 @@ -610,7 +611,7 @@ static int mce_timed_out(u64 *t)
12158 * might have been modified by someone else.
12159 */
12160 rmb();
12161 - if (atomic_read(&mce_paniced))
12162 + if (atomic_read_unchecked(&mce_paniced))
12163 wait_for_panic();
12164 if (!monarch_timeout)
12165 goto out;
12166 @@ -1398,7 +1399,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
12167 }
12168
12169 /* Call the installed machine check handler for this CPU setup. */
12170 -void (*machine_check_vector)(struct pt_regs *, long error_code) =
12171 +void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
12172 unexpected_machine_check;
12173
12174 /*
12175 @@ -1421,7 +1422,9 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
12176 return;
12177 }
12178
12179 + pax_open_kernel();
12180 machine_check_vector = do_machine_check;
12181 + pax_close_kernel();
12182
12183 __mcheck_cpu_init_generic();
12184 __mcheck_cpu_init_vendor(c);
12185 @@ -1435,7 +1438,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
12186 */
12187
12188 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
12189 -static int mce_chrdev_open_count; /* #times opened */
12190 +static local_t mce_chrdev_open_count; /* #times opened */
12191 static int mce_chrdev_open_exclu; /* already open exclusive? */
12192
12193 static int mce_chrdev_open(struct inode *inode, struct file *file)
12194 @@ -1443,7 +1446,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
12195 spin_lock(&mce_chrdev_state_lock);
12196
12197 if (mce_chrdev_open_exclu ||
12198 - (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
12199 + (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
12200 spin_unlock(&mce_chrdev_state_lock);
12201
12202 return -EBUSY;
12203 @@ -1451,7 +1454,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
12204
12205 if (file->f_flags & O_EXCL)
12206 mce_chrdev_open_exclu = 1;
12207 - mce_chrdev_open_count++;
12208 + local_inc(&mce_chrdev_open_count);
12209
12210 spin_unlock(&mce_chrdev_state_lock);
12211
12212 @@ -1462,7 +1465,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
12213 {
12214 spin_lock(&mce_chrdev_state_lock);
12215
12216 - mce_chrdev_open_count--;
12217 + local_dec(&mce_chrdev_open_count);
12218 mce_chrdev_open_exclu = 0;
12219
12220 spin_unlock(&mce_chrdev_state_lock);
12221 @@ -2171,7 +2174,7 @@ struct dentry *mce_get_debugfs_dir(void)
12222 static void mce_reset(void)
12223 {
12224 cpu_missing = 0;
12225 - atomic_set(&mce_fake_paniced, 0);
12226 + atomic_set_unchecked(&mce_fake_paniced, 0);
12227 atomic_set(&mce_executing, 0);
12228 atomic_set(&mce_callin, 0);
12229 atomic_set(&global_nwo, 0);
12230 diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
12231 index 5c0e653..0882b0a 100644
12232 --- a/arch/x86/kernel/cpu/mcheck/p5.c
12233 +++ b/arch/x86/kernel/cpu/mcheck/p5.c
12234 @@ -12,6 +12,7 @@
12235 #include <asm/system.h>
12236 #include <asm/mce.h>
12237 #include <asm/msr.h>
12238 +#include <asm/pgtable.h>
12239
12240 /* By default disabled */
12241 int mce_p5_enabled __read_mostly;
12242 @@ -50,7 +51,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
12243 if (!cpu_has(c, X86_FEATURE_MCE))
12244 return;
12245
12246 + pax_open_kernel();
12247 machine_check_vector = pentium_machine_check;
12248 + pax_close_kernel();
12249 /* Make sure the vector pointer is visible before we enable MCEs: */
12250 wmb();
12251
12252 diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
12253 index 54060f5..c1a7577 100644
12254 --- a/arch/x86/kernel/cpu/mcheck/winchip.c
12255 +++ b/arch/x86/kernel/cpu/mcheck/winchip.c
12256 @@ -11,6 +11,7 @@
12257 #include <asm/system.h>
12258 #include <asm/mce.h>
12259 #include <asm/msr.h>
12260 +#include <asm/pgtable.h>
12261
12262 /* Machine check handler for WinChip C6: */
12263 static void winchip_machine_check(struct pt_regs *regs, long error_code)
12264 @@ -24,7 +25,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
12265 {
12266 u32 lo, hi;
12267
12268 + pax_open_kernel();
12269 machine_check_vector = winchip_machine_check;
12270 + pax_close_kernel();
12271 /* Make sure the vector pointer is visible before we enable MCEs: */
12272 wmb();
12273
12274 diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
12275 index 6b96110..0da73eb 100644
12276 --- a/arch/x86/kernel/cpu/mtrr/main.c
12277 +++ b/arch/x86/kernel/cpu/mtrr/main.c
12278 @@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
12279 u64 size_or_mask, size_and_mask;
12280 static bool mtrr_aps_delayed_init;
12281
12282 -static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
12283 +static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
12284
12285 const struct mtrr_ops *mtrr_if;
12286
12287 diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
12288 index df5e41f..816c719 100644
12289 --- a/arch/x86/kernel/cpu/mtrr/mtrr.h
12290 +++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
12291 @@ -25,7 +25,7 @@ struct mtrr_ops {
12292 int (*validate_add_page)(unsigned long base, unsigned long size,
12293 unsigned int type);
12294 int (*have_wrcomb)(void);
12295 -};
12296 +} __do_const;
12297
12298 extern int generic_get_free_region(unsigned long base, unsigned long size,
12299 int replace_reg);
12300 diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
12301 index 2bda212..78cc605 100644
12302 --- a/arch/x86/kernel/cpu/perf_event.c
12303 +++ b/arch/x86/kernel/cpu/perf_event.c
12304 @@ -1529,7 +1529,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
12305 break;
12306
12307 perf_callchain_store(entry, frame.return_address);
12308 - fp = frame.next_frame;
12309 + fp = (const void __force_user *)frame.next_frame;
12310 }
12311 }
12312
12313 diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
12314 index 13ad899..f642b9a 100644
12315 --- a/arch/x86/kernel/crash.c
12316 +++ b/arch/x86/kernel/crash.c
12317 @@ -36,10 +36,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
12318 {
12319 #ifdef CONFIG_X86_32
12320 struct pt_regs fixed_regs;
12321 -#endif
12322
12323 -#ifdef CONFIG_X86_32
12324 - if (!user_mode_vm(regs)) {
12325 + if (!user_mode(regs)) {
12326 crash_fixup_ss_esp(&fixed_regs, regs);
12327 regs = &fixed_regs;
12328 }
12329 diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
12330 index 37250fe..bf2ec74 100644
12331 --- a/arch/x86/kernel/doublefault_32.c
12332 +++ b/arch/x86/kernel/doublefault_32.c
12333 @@ -11,7 +11,7 @@
12334
12335 #define DOUBLEFAULT_STACKSIZE (1024)
12336 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
12337 -#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
12338 +#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
12339
12340 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
12341
12342 @@ -21,7 +21,7 @@ static void doublefault_fn(void)
12343 unsigned long gdt, tss;
12344
12345 store_gdt(&gdt_desc);
12346 - gdt = gdt_desc.address;
12347 + gdt = (unsigned long)gdt_desc.address;
12348
12349 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
12350
12351 @@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
12352 /* 0x2 bit is always set */
12353 .flags = X86_EFLAGS_SF | 0x2,
12354 .sp = STACK_START,
12355 - .es = __USER_DS,
12356 + .es = __KERNEL_DS,
12357 .cs = __KERNEL_CS,
12358 .ss = __KERNEL_DS,
12359 - .ds = __USER_DS,
12360 + .ds = __KERNEL_DS,
12361 .fs = __KERNEL_PERCPU,
12362
12363 .__cr3 = __pa_nodebug(swapper_pg_dir),
12364 diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
12365 index 1aae78f..aab3a3d 100644
12366 --- a/arch/x86/kernel/dumpstack.c
12367 +++ b/arch/x86/kernel/dumpstack.c
12368 @@ -2,6 +2,9 @@
12369 * Copyright (C) 1991, 1992 Linus Torvalds
12370 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
12371 */
12372 +#ifdef CONFIG_GRKERNSEC_HIDESYM
12373 +#define __INCLUDED_BY_HIDESYM 1
12374 +#endif
12375 #include <linux/kallsyms.h>
12376 #include <linux/kprobes.h>
12377 #include <linux/uaccess.h>
12378 @@ -35,9 +38,8 @@ void printk_address(unsigned long address, int reliable)
12379 static void
12380 print_ftrace_graph_addr(unsigned long addr, void *data,
12381 const struct stacktrace_ops *ops,
12382 - struct thread_info *tinfo, int *graph)
12383 + struct task_struct *task, int *graph)
12384 {
12385 - struct task_struct *task = tinfo->task;
12386 unsigned long ret_addr;
12387 int index = task->curr_ret_stack;
12388
12389 @@ -58,7 +60,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
12390 static inline void
12391 print_ftrace_graph_addr(unsigned long addr, void *data,
12392 const struct stacktrace_ops *ops,
12393 - struct thread_info *tinfo, int *graph)
12394 + struct task_struct *task, int *graph)
12395 { }
12396 #endif
12397
12398 @@ -69,10 +71,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
12399 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
12400 */
12401
12402 -static inline int valid_stack_ptr(struct thread_info *tinfo,
12403 - void *p, unsigned int size, void *end)
12404 +static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
12405 {
12406 - void *t = tinfo;
12407 if (end) {
12408 if (p < end && p >= (end-THREAD_SIZE))
12409 return 1;
12410 @@ -83,14 +83,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
12411 }
12412
12413 unsigned long
12414 -print_context_stack(struct thread_info *tinfo,
12415 +print_context_stack(struct task_struct *task, void *stack_start,
12416 unsigned long *stack, unsigned long bp,
12417 const struct stacktrace_ops *ops, void *data,
12418 unsigned long *end, int *graph)
12419 {
12420 struct stack_frame *frame = (struct stack_frame *)bp;
12421
12422 - while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
12423 + while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
12424 unsigned long addr;
12425
12426 addr = *stack;
12427 @@ -102,7 +102,7 @@ print_context_stack(struct thread_info *tinfo,
12428 } else {
12429 ops->address(data, addr, 0);
12430 }
12431 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
12432 + print_ftrace_graph_addr(addr, data, ops, task, graph);
12433 }
12434 stack++;
12435 }
12436 @@ -111,7 +111,7 @@ print_context_stack(struct thread_info *tinfo,
12437 EXPORT_SYMBOL_GPL(print_context_stack);
12438
12439 unsigned long
12440 -print_context_stack_bp(struct thread_info *tinfo,
12441 +print_context_stack_bp(struct task_struct *task, void *stack_start,
12442 unsigned long *stack, unsigned long bp,
12443 const struct stacktrace_ops *ops, void *data,
12444 unsigned long *end, int *graph)
12445 @@ -119,7 +119,7 @@ print_context_stack_bp(struct thread_info *tinfo,
12446 struct stack_frame *frame = (struct stack_frame *)bp;
12447 unsigned long *ret_addr = &frame->return_address;
12448
12449 - while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
12450 + while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
12451 unsigned long addr = *ret_addr;
12452
12453 if (!__kernel_text_address(addr))
12454 @@ -128,7 +128,7 @@ print_context_stack_bp(struct thread_info *tinfo,
12455 ops->address(data, addr, 1);
12456 frame = frame->next_frame;
12457 ret_addr = &frame->return_address;
12458 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
12459 + print_ftrace_graph_addr(addr, data, ops, task, graph);
12460 }
12461
12462 return (unsigned long)frame;
12463 @@ -186,7 +186,7 @@ void dump_stack(void)
12464
12465 bp = stack_frame(current, NULL);
12466 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
12467 - current->pid, current->comm, print_tainted(),
12468 + task_pid_nr(current), current->comm, print_tainted(),
12469 init_utsname()->release,
12470 (int)strcspn(init_utsname()->version, " "),
12471 init_utsname()->version);
12472 @@ -222,6 +222,8 @@ unsigned __kprobes long oops_begin(void)
12473 }
12474 EXPORT_SYMBOL_GPL(oops_begin);
12475
12476 +extern void gr_handle_kernel_exploit(void);
12477 +
12478 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
12479 {
12480 if (regs && kexec_should_crash(current))
12481 @@ -243,7 +245,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
12482 panic("Fatal exception in interrupt");
12483 if (panic_on_oops)
12484 panic("Fatal exception");
12485 - do_exit(signr);
12486 +
12487 + gr_handle_kernel_exploit();
12488 +
12489 + do_group_exit(signr);
12490 }
12491
12492 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
12493 @@ -269,7 +274,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
12494
12495 show_registers(regs);
12496 #ifdef CONFIG_X86_32
12497 - if (user_mode_vm(regs)) {
12498 + if (user_mode(regs)) {
12499 sp = regs->sp;
12500 ss = regs->ss & 0xffff;
12501 } else {
12502 @@ -297,7 +302,7 @@ void die(const char *str, struct pt_regs *regs, long err)
12503 unsigned long flags = oops_begin();
12504 int sig = SIGSEGV;
12505
12506 - if (!user_mode_vm(regs))
12507 + if (!user_mode(regs))
12508 report_bug(regs->ip, regs);
12509
12510 if (__die(str, regs, err))
12511 diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
12512 index c99f9ed..2a15d80 100644
12513 --- a/arch/x86/kernel/dumpstack_32.c
12514 +++ b/arch/x86/kernel/dumpstack_32.c
12515 @@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12516 bp = stack_frame(task, regs);
12517
12518 for (;;) {
12519 - struct thread_info *context;
12520 + void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12521
12522 - context = (struct thread_info *)
12523 - ((unsigned long)stack & (~(THREAD_SIZE - 1)));
12524 - bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
12525 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12526
12527 - stack = (unsigned long *)context->previous_esp;
12528 - if (!stack)
12529 + if (stack_start == task_stack_page(task))
12530 break;
12531 + stack = *(unsigned long **)stack_start;
12532 if (ops->stack(data, "IRQ") < 0)
12533 break;
12534 touch_nmi_watchdog();
12535 @@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs)
12536 * When in-kernel, we also print out the stack and code at the
12537 * time of the fault..
12538 */
12539 - if (!user_mode_vm(regs)) {
12540 + if (!user_mode(regs)) {
12541 unsigned int code_prologue = code_bytes * 43 / 64;
12542 unsigned int code_len = code_bytes;
12543 unsigned char c;
12544 u8 *ip;
12545 + unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
12546
12547 printk(KERN_EMERG "Stack:\n");
12548 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
12549
12550 printk(KERN_EMERG "Code: ");
12551
12552 - ip = (u8 *)regs->ip - code_prologue;
12553 + ip = (u8 *)regs->ip - code_prologue + cs_base;
12554 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
12555 /* try starting at IP */
12556 - ip = (u8 *)regs->ip;
12557 + ip = (u8 *)regs->ip + cs_base;
12558 code_len = code_len - code_prologue + 1;
12559 }
12560 for (i = 0; i < code_len; i++, ip++) {
12561 @@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs)
12562 printk(KERN_CONT " Bad EIP value.");
12563 break;
12564 }
12565 - if (ip == (u8 *)regs->ip)
12566 + if (ip == (u8 *)regs->ip + cs_base)
12567 printk(KERN_CONT "<%02x> ", c);
12568 else
12569 printk(KERN_CONT "%02x ", c);
12570 @@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
12571 {
12572 unsigned short ud2;
12573
12574 + ip = ktla_ktva(ip);
12575 if (ip < PAGE_OFFSET)
12576 return 0;
12577 if (probe_kernel_address((unsigned short *)ip, ud2))
12578 @@ -139,3 +139,15 @@ int is_valid_bugaddr(unsigned long ip)
12579
12580 return ud2 == 0x0b0f;
12581 }
12582 +
12583 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12584 +void pax_check_alloca(unsigned long size)
12585 +{
12586 + unsigned long sp = (unsigned long)&sp, stack_left;
12587 +
12588 + /* all kernel stacks are of the same size */
12589 + stack_left = sp & (THREAD_SIZE - 1);
12590 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
12591 +}
12592 +EXPORT_SYMBOL(pax_check_alloca);
12593 +#endif
12594 diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
12595 index 6d728d9..279514e 100644
12596 --- a/arch/x86/kernel/dumpstack_64.c
12597 +++ b/arch/x86/kernel/dumpstack_64.c
12598 @@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12599 unsigned long *irq_stack_end =
12600 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
12601 unsigned used = 0;
12602 - struct thread_info *tinfo;
12603 int graph = 0;
12604 unsigned long dummy;
12605 + void *stack_start;
12606
12607 if (!task)
12608 task = current;
12609 @@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12610 * current stack address. If the stacks consist of nested
12611 * exceptions
12612 */
12613 - tinfo = task_thread_info(task);
12614 for (;;) {
12615 char *id;
12616 unsigned long *estack_end;
12617 +
12618 estack_end = in_exception_stack(cpu, (unsigned long)stack,
12619 &used, &id);
12620
12621 @@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12622 if (ops->stack(data, id) < 0)
12623 break;
12624
12625 - bp = ops->walk_stack(tinfo, stack, bp, ops,
12626 + bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
12627 data, estack_end, &graph);
12628 ops->stack(data, "<EOE>");
12629 /*
12630 @@ -172,7 +172,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12631 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
12632 if (ops->stack(data, "IRQ") < 0)
12633 break;
12634 - bp = ops->walk_stack(tinfo, stack, bp,
12635 + bp = ops->walk_stack(task, irq_stack, stack, bp,
12636 ops, data, irq_stack_end, &graph);
12637 /*
12638 * We link to the next stack (which would be
12639 @@ -191,7 +191,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12640 /*
12641 * This handles the process stack:
12642 */
12643 - bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
12644 + stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12645 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12646 put_cpu();
12647 }
12648 EXPORT_SYMBOL(dump_trace);
12649 @@ -305,3 +306,50 @@ int is_valid_bugaddr(unsigned long ip)
12650
12651 return ud2 == 0x0b0f;
12652 }
12653 +
12654 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12655 +void pax_check_alloca(unsigned long size)
12656 +{
12657 + unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
12658 + unsigned cpu, used;
12659 + char *id;
12660 +
12661 + /* check the process stack first */
12662 + stack_start = (unsigned long)task_stack_page(current);
12663 + stack_end = stack_start + THREAD_SIZE;
12664 + if (likely(stack_start <= sp && sp < stack_end)) {
12665 + unsigned long stack_left = sp & (THREAD_SIZE - 1);
12666 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
12667 + return;
12668 + }
12669 +
12670 + cpu = get_cpu();
12671 +
12672 + /* check the irq stacks */
12673 + stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
12674 + stack_start = stack_end - IRQ_STACK_SIZE;
12675 + if (stack_start <= sp && sp < stack_end) {
12676 + unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
12677 + put_cpu();
12678 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
12679 + return;
12680 + }
12681 +
12682 + /* check the exception stacks */
12683 + used = 0;
12684 + stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
12685 + stack_start = stack_end - EXCEPTION_STKSZ;
12686 + if (stack_end && stack_start <= sp && sp < stack_end) {
12687 + unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
12688 + put_cpu();
12689 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
12690 + return;
12691 + }
12692 +
12693 + put_cpu();
12694 +
12695 + /* unknown stack */
12696 + BUG();
12697 +}
12698 +EXPORT_SYMBOL(pax_check_alloca);
12699 +#endif
12700 diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
12701 index cd28a35..c72ed9a 100644
12702 --- a/arch/x86/kernel/early_printk.c
12703 +++ b/arch/x86/kernel/early_printk.c
12704 @@ -7,6 +7,7 @@
12705 #include <linux/pci_regs.h>
12706 #include <linux/pci_ids.h>
12707 #include <linux/errno.h>
12708 +#include <linux/sched.h>
12709 #include <asm/io.h>
12710 #include <asm/processor.h>
12711 #include <asm/fcntl.h>
12712 diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
12713 index f3f6f53..0841b66 100644
12714 --- a/arch/x86/kernel/entry_32.S
12715 +++ b/arch/x86/kernel/entry_32.S
12716 @@ -186,13 +186,146 @@
12717 /*CFI_REL_OFFSET gs, PT_GS*/
12718 .endm
12719 .macro SET_KERNEL_GS reg
12720 +
12721 +#ifdef CONFIG_CC_STACKPROTECTOR
12722 movl $(__KERNEL_STACK_CANARY), \reg
12723 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
12724 + movl $(__USER_DS), \reg
12725 +#else
12726 + xorl \reg, \reg
12727 +#endif
12728 +
12729 movl \reg, %gs
12730 .endm
12731
12732 #endif /* CONFIG_X86_32_LAZY_GS */
12733
12734 -.macro SAVE_ALL
12735 +.macro pax_enter_kernel
12736 +#ifdef CONFIG_PAX_KERNEXEC
12737 + call pax_enter_kernel
12738 +#endif
12739 +.endm
12740 +
12741 +.macro pax_exit_kernel
12742 +#ifdef CONFIG_PAX_KERNEXEC
12743 + call pax_exit_kernel
12744 +#endif
12745 +.endm
12746 +
12747 +#ifdef CONFIG_PAX_KERNEXEC
12748 +ENTRY(pax_enter_kernel)
12749 +#ifdef CONFIG_PARAVIRT
12750 + pushl %eax
12751 + pushl %ecx
12752 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
12753 + mov %eax, %esi
12754 +#else
12755 + mov %cr0, %esi
12756 +#endif
12757 + bts $16, %esi
12758 + jnc 1f
12759 + mov %cs, %esi
12760 + cmp $__KERNEL_CS, %esi
12761 + jz 3f
12762 + ljmp $__KERNEL_CS, $3f
12763 +1: ljmp $__KERNEXEC_KERNEL_CS, $2f
12764 +2:
12765 +#ifdef CONFIG_PARAVIRT
12766 + mov %esi, %eax
12767 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
12768 +#else
12769 + mov %esi, %cr0
12770 +#endif
12771 +3:
12772 +#ifdef CONFIG_PARAVIRT
12773 + popl %ecx
12774 + popl %eax
12775 +#endif
12776 + ret
12777 +ENDPROC(pax_enter_kernel)
12778 +
12779 +ENTRY(pax_exit_kernel)
12780 +#ifdef CONFIG_PARAVIRT
12781 + pushl %eax
12782 + pushl %ecx
12783 +#endif
12784 + mov %cs, %esi
12785 + cmp $__KERNEXEC_KERNEL_CS, %esi
12786 + jnz 2f
12787 +#ifdef CONFIG_PARAVIRT
12788 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
12789 + mov %eax, %esi
12790 +#else
12791 + mov %cr0, %esi
12792 +#endif
12793 + btr $16, %esi
12794 + ljmp $__KERNEL_CS, $1f
12795 +1:
12796 +#ifdef CONFIG_PARAVIRT
12797 + mov %esi, %eax
12798 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
12799 +#else
12800 + mov %esi, %cr0
12801 +#endif
12802 +2:
12803 +#ifdef CONFIG_PARAVIRT
12804 + popl %ecx
12805 + popl %eax
12806 +#endif
12807 + ret
12808 +ENDPROC(pax_exit_kernel)
12809 +#endif
12810 +
12811 +.macro pax_erase_kstack
12812 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12813 + call pax_erase_kstack
12814 +#endif
12815 +.endm
12816 +
12817 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12818 +/*
12819 + * ebp: thread_info
12820 + * ecx, edx: can be clobbered
12821 + */
12822 +ENTRY(pax_erase_kstack)
12823 + pushl %edi
12824 + pushl %eax
12825 +
12826 + mov TI_lowest_stack(%ebp), %edi
12827 + mov $-0xBEEF, %eax
12828 + std
12829 +
12830 +1: mov %edi, %ecx
12831 + and $THREAD_SIZE_asm - 1, %ecx
12832 + shr $2, %ecx
12833 + repne scasl
12834 + jecxz 2f
12835 +
12836 + cmp $2*16, %ecx
12837 + jc 2f
12838 +
12839 + mov $2*16, %ecx
12840 + repe scasl
12841 + jecxz 2f
12842 + jne 1b
12843 +
12844 +2: cld
12845 + mov %esp, %ecx
12846 + sub %edi, %ecx
12847 + shr $2, %ecx
12848 + rep stosl
12849 +
12850 + mov TI_task_thread_sp0(%ebp), %edi
12851 + sub $128, %edi
12852 + mov %edi, TI_lowest_stack(%ebp)
12853 +
12854 + popl %eax
12855 + popl %edi
12856 + ret
12857 +ENDPROC(pax_erase_kstack)
12858 +#endif
12859 +
12860 +.macro __SAVE_ALL _DS
12861 cld
12862 PUSH_GS
12863 pushl_cfi %fs
12864 @@ -215,7 +348,7 @@
12865 CFI_REL_OFFSET ecx, 0
12866 pushl_cfi %ebx
12867 CFI_REL_OFFSET ebx, 0
12868 - movl $(__USER_DS), %edx
12869 + movl $\_DS, %edx
12870 movl %edx, %ds
12871 movl %edx, %es
12872 movl $(__KERNEL_PERCPU), %edx
12873 @@ -223,6 +356,15 @@
12874 SET_KERNEL_GS %edx
12875 .endm
12876
12877 +.macro SAVE_ALL
12878 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
12879 + __SAVE_ALL __KERNEL_DS
12880 + pax_enter_kernel
12881 +#else
12882 + __SAVE_ALL __USER_DS
12883 +#endif
12884 +.endm
12885 +
12886 .macro RESTORE_INT_REGS
12887 popl_cfi %ebx
12888 CFI_RESTORE ebx
12889 @@ -308,7 +450,7 @@ ENTRY(ret_from_fork)
12890 popfl_cfi
12891 jmp syscall_exit
12892 CFI_ENDPROC
12893 -END(ret_from_fork)
12894 +ENDPROC(ret_from_fork)
12895
12896 /*
12897 * Interrupt exit functions should be protected against kprobes
12898 @@ -333,7 +475,15 @@ check_userspace:
12899 movb PT_CS(%esp), %al
12900 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
12901 cmpl $USER_RPL, %eax
12902 +
12903 +#ifdef CONFIG_PAX_KERNEXEC
12904 + jae resume_userspace
12905 +
12906 + PAX_EXIT_KERNEL
12907 + jmp resume_kernel
12908 +#else
12909 jb resume_kernel # not returning to v8086 or userspace
12910 +#endif
12911
12912 ENTRY(resume_userspace)
12913 LOCKDEP_SYS_EXIT
12914 @@ -345,8 +495,8 @@ ENTRY(resume_userspace)
12915 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
12916 # int/exception return?
12917 jne work_pending
12918 - jmp restore_all
12919 -END(ret_from_exception)
12920 + jmp restore_all_pax
12921 +ENDPROC(ret_from_exception)
12922
12923 #ifdef CONFIG_PREEMPT
12924 ENTRY(resume_kernel)
12925 @@ -361,7 +511,7 @@ need_resched:
12926 jz restore_all
12927 call preempt_schedule_irq
12928 jmp need_resched
12929 -END(resume_kernel)
12930 +ENDPROC(resume_kernel)
12931 #endif
12932 CFI_ENDPROC
12933 /*
12934 @@ -395,23 +545,34 @@ sysenter_past_esp:
12935 /*CFI_REL_OFFSET cs, 0*/
12936 /*
12937 * Push current_thread_info()->sysenter_return to the stack.
12938 - * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
12939 - * pushed above; +8 corresponds to copy_thread's esp0 setting.
12940 */
12941 - pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
12942 + pushl_cfi $0
12943 CFI_REL_OFFSET eip, 0
12944
12945 pushl_cfi %eax
12946 SAVE_ALL
12947 + GET_THREAD_INFO(%ebp)
12948 + movl TI_sysenter_return(%ebp),%ebp
12949 + movl %ebp,PT_EIP(%esp)
12950 ENABLE_INTERRUPTS(CLBR_NONE)
12951
12952 /*
12953 * Load the potential sixth argument from user stack.
12954 * Careful about security.
12955 */
12956 + movl PT_OLDESP(%esp),%ebp
12957 +
12958 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12959 + mov PT_OLDSS(%esp),%ds
12960 +1: movl %ds:(%ebp),%ebp
12961 + push %ss
12962 + pop %ds
12963 +#else
12964 cmpl $__PAGE_OFFSET-3,%ebp
12965 jae syscall_fault
12966 1: movl (%ebp),%ebp
12967 +#endif
12968 +
12969 movl %ebp,PT_EBP(%esp)
12970 .section __ex_table,"a"
12971 .align 4
12972 @@ -434,12 +595,24 @@ sysenter_do_call:
12973 testl $_TIF_ALLWORK_MASK, %ecx
12974 jne sysexit_audit
12975 sysenter_exit:
12976 +
12977 +#ifdef CONFIG_PAX_RANDKSTACK
12978 + pushl_cfi %eax
12979 + movl %esp, %eax
12980 + call pax_randomize_kstack
12981 + popl_cfi %eax
12982 +#endif
12983 +
12984 + pax_erase_kstack
12985 +
12986 /* if something modifies registers it must also disable sysexit */
12987 movl PT_EIP(%esp), %edx
12988 movl PT_OLDESP(%esp), %ecx
12989 xorl %ebp,%ebp
12990 TRACE_IRQS_ON
12991 1: mov PT_FS(%esp), %fs
12992 +2: mov PT_DS(%esp), %ds
12993 +3: mov PT_ES(%esp), %es
12994 PTGS_TO_GS
12995 ENABLE_INTERRUPTS_SYSEXIT
12996
12997 @@ -456,6 +629,9 @@ sysenter_audit:
12998 movl %eax,%edx /* 2nd arg: syscall number */
12999 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
13000 call audit_syscall_entry
13001 +
13002 + pax_erase_kstack
13003 +
13004 pushl_cfi %ebx
13005 movl PT_EAX(%esp),%eax /* reload syscall number */
13006 jmp sysenter_do_call
13007 @@ -482,11 +658,17 @@ sysexit_audit:
13008
13009 CFI_ENDPROC
13010 .pushsection .fixup,"ax"
13011 -2: movl $0,PT_FS(%esp)
13012 +4: movl $0,PT_FS(%esp)
13013 + jmp 1b
13014 +5: movl $0,PT_DS(%esp)
13015 + jmp 1b
13016 +6: movl $0,PT_ES(%esp)
13017 jmp 1b
13018 .section __ex_table,"a"
13019 .align 4
13020 - .long 1b,2b
13021 + .long 1b,4b
13022 + .long 2b,5b
13023 + .long 3b,6b
13024 .popsection
13025 PTGS_TO_GS_EX
13026 ENDPROC(ia32_sysenter_target)
13027 @@ -519,6 +701,15 @@ syscall_exit:
13028 testl $_TIF_ALLWORK_MASK, %ecx # current->work
13029 jne syscall_exit_work
13030
13031 +restore_all_pax:
13032 +
13033 +#ifdef CONFIG_PAX_RANDKSTACK
13034 + movl %esp, %eax
13035 + call pax_randomize_kstack
13036 +#endif
13037 +
13038 + pax_erase_kstack
13039 +
13040 restore_all:
13041 TRACE_IRQS_IRET
13042 restore_all_notrace:
13043 @@ -578,14 +769,34 @@ ldt_ss:
13044 * compensating for the offset by changing to the ESPFIX segment with
13045 * a base address that matches for the difference.
13046 */
13047 -#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
13048 +#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
13049 mov %esp, %edx /* load kernel esp */
13050 mov PT_OLDESP(%esp), %eax /* load userspace esp */
13051 mov %dx, %ax /* eax: new kernel esp */
13052 sub %eax, %edx /* offset (low word is 0) */
13053 +#ifdef CONFIG_SMP
13054 + movl PER_CPU_VAR(cpu_number), %ebx
13055 + shll $PAGE_SHIFT_asm, %ebx
13056 + addl $cpu_gdt_table, %ebx
13057 +#else
13058 + movl $cpu_gdt_table, %ebx
13059 +#endif
13060 shr $16, %edx
13061 - mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
13062 - mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
13063 +
13064 +#ifdef CONFIG_PAX_KERNEXEC
13065 + mov %cr0, %esi
13066 + btr $16, %esi
13067 + mov %esi, %cr0
13068 +#endif
13069 +
13070 + mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
13071 + mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
13072 +
13073 +#ifdef CONFIG_PAX_KERNEXEC
13074 + bts $16, %esi
13075 + mov %esi, %cr0
13076 +#endif
13077 +
13078 pushl_cfi $__ESPFIX_SS
13079 pushl_cfi %eax /* new kernel esp */
13080 /* Disable interrupts, but do not irqtrace this section: we
13081 @@ -614,34 +825,28 @@ work_resched:
13082 movl TI_flags(%ebp), %ecx
13083 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
13084 # than syscall tracing?
13085 - jz restore_all
13086 + jz restore_all_pax
13087 testb $_TIF_NEED_RESCHED, %cl
13088 jnz work_resched
13089
13090 work_notifysig: # deal with pending signals and
13091 # notify-resume requests
13092 + movl %esp, %eax
13093 #ifdef CONFIG_VM86
13094 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
13095 - movl %esp, %eax
13096 - jne work_notifysig_v86 # returning to kernel-space or
13097 + jz 1f # returning to kernel-space or
13098 # vm86-space
13099 - xorl %edx, %edx
13100 - call do_notify_resume
13101 - jmp resume_userspace_sig
13102
13103 - ALIGN
13104 -work_notifysig_v86:
13105 pushl_cfi %ecx # save ti_flags for do_notify_resume
13106 call save_v86_state # %eax contains pt_regs pointer
13107 popl_cfi %ecx
13108 movl %eax, %esp
13109 -#else
13110 - movl %esp, %eax
13111 +1:
13112 #endif
13113 xorl %edx, %edx
13114 call do_notify_resume
13115 jmp resume_userspace_sig
13116 -END(work_pending)
13117 +ENDPROC(work_pending)
13118
13119 # perform syscall exit tracing
13120 ALIGN
13121 @@ -649,11 +854,14 @@ syscall_trace_entry:
13122 movl $-ENOSYS,PT_EAX(%esp)
13123 movl %esp, %eax
13124 call syscall_trace_enter
13125 +
13126 + pax_erase_kstack
13127 +
13128 /* What it returned is what we'll actually use. */
13129 cmpl $(nr_syscalls), %eax
13130 jnae syscall_call
13131 jmp syscall_exit
13132 -END(syscall_trace_entry)
13133 +ENDPROC(syscall_trace_entry)
13134
13135 # perform syscall exit tracing
13136 ALIGN
13137 @@ -666,20 +874,24 @@ syscall_exit_work:
13138 movl %esp, %eax
13139 call syscall_trace_leave
13140 jmp resume_userspace
13141 -END(syscall_exit_work)
13142 +ENDPROC(syscall_exit_work)
13143 CFI_ENDPROC
13144
13145 RING0_INT_FRAME # can't unwind into user space anyway
13146 syscall_fault:
13147 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13148 + push %ss
13149 + pop %ds
13150 +#endif
13151 GET_THREAD_INFO(%ebp)
13152 movl $-EFAULT,PT_EAX(%esp)
13153 jmp resume_userspace
13154 -END(syscall_fault)
13155 +ENDPROC(syscall_fault)
13156
13157 syscall_badsys:
13158 movl $-ENOSYS,PT_EAX(%esp)
13159 jmp resume_userspace
13160 -END(syscall_badsys)
13161 +ENDPROC(syscall_badsys)
13162 CFI_ENDPROC
13163 /*
13164 * End of kprobes section
13165 @@ -753,6 +965,36 @@ ptregs_clone:
13166 CFI_ENDPROC
13167 ENDPROC(ptregs_clone)
13168
13169 + ALIGN;
13170 +ENTRY(kernel_execve)
13171 + CFI_STARTPROC
13172 + pushl_cfi %ebp
13173 + sub $PT_OLDSS+4,%esp
13174 + pushl_cfi %edi
13175 + pushl_cfi %ecx
13176 + pushl_cfi %eax
13177 + lea 3*4(%esp),%edi
13178 + mov $PT_OLDSS/4+1,%ecx
13179 + xorl %eax,%eax
13180 + rep stosl
13181 + popl_cfi %eax
13182 + popl_cfi %ecx
13183 + popl_cfi %edi
13184 + movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
13185 + pushl_cfi %esp
13186 + call sys_execve
13187 + add $4,%esp
13188 + CFI_ADJUST_CFA_OFFSET -4
13189 + GET_THREAD_INFO(%ebp)
13190 + test %eax,%eax
13191 + jz syscall_exit
13192 + add $PT_OLDSS+4,%esp
13193 + CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
13194 + popl_cfi %ebp
13195 + ret
13196 + CFI_ENDPROC
13197 +ENDPROC(kernel_execve)
13198 +
13199 .macro FIXUP_ESPFIX_STACK
13200 /*
13201 * Switch back for ESPFIX stack to the normal zerobased stack
13202 @@ -762,8 +1004,15 @@ ENDPROC(ptregs_clone)
13203 * normal stack and adjusts ESP with the matching offset.
13204 */
13205 /* fixup the stack */
13206 - mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
13207 - mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
13208 +#ifdef CONFIG_SMP
13209 + movl PER_CPU_VAR(cpu_number), %ebx
13210 + shll $PAGE_SHIFT_asm, %ebx
13211 + addl $cpu_gdt_table, %ebx
13212 +#else
13213 + movl $cpu_gdt_table, %ebx
13214 +#endif
13215 + mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
13216 + mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
13217 shl $16, %eax
13218 addl %esp, %eax /* the adjusted stack pointer */
13219 pushl_cfi $__KERNEL_DS
13220 @@ -816,7 +1065,7 @@ vector=vector+1
13221 .endr
13222 2: jmp common_interrupt
13223 .endr
13224 -END(irq_entries_start)
13225 +ENDPROC(irq_entries_start)
13226
13227 .previous
13228 END(interrupt)
13229 @@ -864,7 +1113,7 @@ ENTRY(coprocessor_error)
13230 pushl_cfi $do_coprocessor_error
13231 jmp error_code
13232 CFI_ENDPROC
13233 -END(coprocessor_error)
13234 +ENDPROC(coprocessor_error)
13235
13236 ENTRY(simd_coprocessor_error)
13237 RING0_INT_FRAME
13238 @@ -885,7 +1134,7 @@ ENTRY(simd_coprocessor_error)
13239 #endif
13240 jmp error_code
13241 CFI_ENDPROC
13242 -END(simd_coprocessor_error)
13243 +ENDPROC(simd_coprocessor_error)
13244
13245 ENTRY(device_not_available)
13246 RING0_INT_FRAME
13247 @@ -893,7 +1142,7 @@ ENTRY(device_not_available)
13248 pushl_cfi $do_device_not_available
13249 jmp error_code
13250 CFI_ENDPROC
13251 -END(device_not_available)
13252 +ENDPROC(device_not_available)
13253
13254 #ifdef CONFIG_PARAVIRT
13255 ENTRY(native_iret)
13256 @@ -902,12 +1151,12 @@ ENTRY(native_iret)
13257 .align 4
13258 .long native_iret, iret_exc
13259 .previous
13260 -END(native_iret)
13261 +ENDPROC(native_iret)
13262
13263 ENTRY(native_irq_enable_sysexit)
13264 sti
13265 sysexit
13266 -END(native_irq_enable_sysexit)
13267 +ENDPROC(native_irq_enable_sysexit)
13268 #endif
13269
13270 ENTRY(overflow)
13271 @@ -916,7 +1165,7 @@ ENTRY(overflow)
13272 pushl_cfi $do_overflow
13273 jmp error_code
13274 CFI_ENDPROC
13275 -END(overflow)
13276 +ENDPROC(overflow)
13277
13278 ENTRY(bounds)
13279 RING0_INT_FRAME
13280 @@ -924,7 +1173,7 @@ ENTRY(bounds)
13281 pushl_cfi $do_bounds
13282 jmp error_code
13283 CFI_ENDPROC
13284 -END(bounds)
13285 +ENDPROC(bounds)
13286
13287 ENTRY(invalid_op)
13288 RING0_INT_FRAME
13289 @@ -932,7 +1181,7 @@ ENTRY(invalid_op)
13290 pushl_cfi $do_invalid_op
13291 jmp error_code
13292 CFI_ENDPROC
13293 -END(invalid_op)
13294 +ENDPROC(invalid_op)
13295
13296 ENTRY(coprocessor_segment_overrun)
13297 RING0_INT_FRAME
13298 @@ -940,35 +1189,35 @@ ENTRY(coprocessor_segment_overrun)
13299 pushl_cfi $do_coprocessor_segment_overrun
13300 jmp error_code
13301 CFI_ENDPROC
13302 -END(coprocessor_segment_overrun)
13303 +ENDPROC(coprocessor_segment_overrun)
13304
13305 ENTRY(invalid_TSS)
13306 RING0_EC_FRAME
13307 pushl_cfi $do_invalid_TSS
13308 jmp error_code
13309 CFI_ENDPROC
13310 -END(invalid_TSS)
13311 +ENDPROC(invalid_TSS)
13312
13313 ENTRY(segment_not_present)
13314 RING0_EC_FRAME
13315 pushl_cfi $do_segment_not_present
13316 jmp error_code
13317 CFI_ENDPROC
13318 -END(segment_not_present)
13319 +ENDPROC(segment_not_present)
13320
13321 ENTRY(stack_segment)
13322 RING0_EC_FRAME
13323 pushl_cfi $do_stack_segment
13324 jmp error_code
13325 CFI_ENDPROC
13326 -END(stack_segment)
13327 +ENDPROC(stack_segment)
13328
13329 ENTRY(alignment_check)
13330 RING0_EC_FRAME
13331 pushl_cfi $do_alignment_check
13332 jmp error_code
13333 CFI_ENDPROC
13334 -END(alignment_check)
13335 +ENDPROC(alignment_check)
13336
13337 ENTRY(divide_error)
13338 RING0_INT_FRAME
13339 @@ -976,7 +1225,7 @@ ENTRY(divide_error)
13340 pushl_cfi $do_divide_error
13341 jmp error_code
13342 CFI_ENDPROC
13343 -END(divide_error)
13344 +ENDPROC(divide_error)
13345
13346 #ifdef CONFIG_X86_MCE
13347 ENTRY(machine_check)
13348 @@ -985,7 +1234,7 @@ ENTRY(machine_check)
13349 pushl_cfi machine_check_vector
13350 jmp error_code
13351 CFI_ENDPROC
13352 -END(machine_check)
13353 +ENDPROC(machine_check)
13354 #endif
13355
13356 ENTRY(spurious_interrupt_bug)
13357 @@ -994,7 +1243,7 @@ ENTRY(spurious_interrupt_bug)
13358 pushl_cfi $do_spurious_interrupt_bug
13359 jmp error_code
13360 CFI_ENDPROC
13361 -END(spurious_interrupt_bug)
13362 +ENDPROC(spurious_interrupt_bug)
13363 /*
13364 * End of kprobes section
13365 */
13366 @@ -1109,7 +1358,7 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
13367
13368 ENTRY(mcount)
13369 ret
13370 -END(mcount)
13371 +ENDPROC(mcount)
13372
13373 ENTRY(ftrace_caller)
13374 cmpl $0, function_trace_stop
13375 @@ -1138,7 +1387,7 @@ ftrace_graph_call:
13376 .globl ftrace_stub
13377 ftrace_stub:
13378 ret
13379 -END(ftrace_caller)
13380 +ENDPROC(ftrace_caller)
13381
13382 #else /* ! CONFIG_DYNAMIC_FTRACE */
13383
13384 @@ -1174,7 +1423,7 @@ trace:
13385 popl %ecx
13386 popl %eax
13387 jmp ftrace_stub
13388 -END(mcount)
13389 +ENDPROC(mcount)
13390 #endif /* CONFIG_DYNAMIC_FTRACE */
13391 #endif /* CONFIG_FUNCTION_TRACER */
13392
13393 @@ -1195,7 +1444,7 @@ ENTRY(ftrace_graph_caller)
13394 popl %ecx
13395 popl %eax
13396 ret
13397 -END(ftrace_graph_caller)
13398 +ENDPROC(ftrace_graph_caller)
13399
13400 .globl return_to_handler
13401 return_to_handler:
13402 @@ -1209,7 +1458,6 @@ return_to_handler:
13403 jmp *%ecx
13404 #endif
13405
13406 -.section .rodata,"a"
13407 #include "syscall_table_32.S"
13408
13409 syscall_table_size=(.-sys_call_table)
13410 @@ -1255,15 +1503,18 @@ error_code:
13411 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
13412 REG_TO_PTGS %ecx
13413 SET_KERNEL_GS %ecx
13414 - movl $(__USER_DS), %ecx
13415 + movl $(__KERNEL_DS), %ecx
13416 movl %ecx, %ds
13417 movl %ecx, %es
13418 +
13419 + pax_enter_kernel
13420 +
13421 TRACE_IRQS_OFF
13422 movl %esp,%eax # pt_regs pointer
13423 call *%edi
13424 jmp ret_from_exception
13425 CFI_ENDPROC
13426 -END(page_fault)
13427 +ENDPROC(page_fault)
13428
13429 /*
13430 * Debug traps and NMI can happen at the one SYSENTER instruction
13431 @@ -1305,7 +1556,7 @@ debug_stack_correct:
13432 call do_debug
13433 jmp ret_from_exception
13434 CFI_ENDPROC
13435 -END(debug)
13436 +ENDPROC(debug)
13437
13438 /*
13439 * NMI is doubly nasty. It can happen _while_ we're handling
13440 @@ -1342,6 +1593,9 @@ nmi_stack_correct:
13441 xorl %edx,%edx # zero error code
13442 movl %esp,%eax # pt_regs pointer
13443 call do_nmi
13444 +
13445 + pax_exit_kernel
13446 +
13447 jmp restore_all_notrace
13448 CFI_ENDPROC
13449
13450 @@ -1378,12 +1632,15 @@ nmi_espfix_stack:
13451 FIXUP_ESPFIX_STACK # %eax == %esp
13452 xorl %edx,%edx # zero error code
13453 call do_nmi
13454 +
13455 + pax_exit_kernel
13456 +
13457 RESTORE_REGS
13458 lss 12+4(%esp), %esp # back to espfix stack
13459 CFI_ADJUST_CFA_OFFSET -24
13460 jmp irq_return
13461 CFI_ENDPROC
13462 -END(nmi)
13463 +ENDPROC(nmi)
13464
13465 ENTRY(int3)
13466 RING0_INT_FRAME
13467 @@ -1395,14 +1652,14 @@ ENTRY(int3)
13468 call do_int3
13469 jmp ret_from_exception
13470 CFI_ENDPROC
13471 -END(int3)
13472 +ENDPROC(int3)
13473
13474 ENTRY(general_protection)
13475 RING0_EC_FRAME
13476 pushl_cfi $do_general_protection
13477 jmp error_code
13478 CFI_ENDPROC
13479 -END(general_protection)
13480 +ENDPROC(general_protection)
13481
13482 #ifdef CONFIG_KVM_GUEST
13483 ENTRY(async_page_fault)
13484 @@ -1410,7 +1667,7 @@ ENTRY(async_page_fault)
13485 pushl_cfi $do_async_page_fault
13486 jmp error_code
13487 CFI_ENDPROC
13488 -END(async_page_fault)
13489 +ENDPROC(async_page_fault)
13490 #endif
13491
13492 /*
13493 diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
13494 index faf8d5e..4f16a68 100644
13495 --- a/arch/x86/kernel/entry_64.S
13496 +++ b/arch/x86/kernel/entry_64.S
13497 @@ -55,6 +55,8 @@
13498 #include <asm/paravirt.h>
13499 #include <asm/ftrace.h>
13500 #include <asm/percpu.h>
13501 +#include <asm/pgtable.h>
13502 +#include <asm/alternative-asm.h>
13503
13504 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
13505 #include <linux/elf-em.h>
13506 @@ -68,8 +70,9 @@
13507 #ifdef CONFIG_FUNCTION_TRACER
13508 #ifdef CONFIG_DYNAMIC_FTRACE
13509 ENTRY(mcount)
13510 + pax_force_retaddr
13511 retq
13512 -END(mcount)
13513 +ENDPROC(mcount)
13514
13515 ENTRY(ftrace_caller)
13516 cmpl $0, function_trace_stop
13517 @@ -92,8 +95,9 @@ GLOBAL(ftrace_graph_call)
13518 #endif
13519
13520 GLOBAL(ftrace_stub)
13521 + pax_force_retaddr
13522 retq
13523 -END(ftrace_caller)
13524 +ENDPROC(ftrace_caller)
13525
13526 #else /* ! CONFIG_DYNAMIC_FTRACE */
13527 ENTRY(mcount)
13528 @@ -112,6 +116,7 @@ ENTRY(mcount)
13529 #endif
13530
13531 GLOBAL(ftrace_stub)
13532 + pax_force_retaddr
13533 retq
13534
13535 trace:
13536 @@ -121,12 +126,13 @@ trace:
13537 movq 8(%rbp), %rsi
13538 subq $MCOUNT_INSN_SIZE, %rdi
13539
13540 + pax_force_fptr ftrace_trace_function
13541 call *ftrace_trace_function
13542
13543 MCOUNT_RESTORE_FRAME
13544
13545 jmp ftrace_stub
13546 -END(mcount)
13547 +ENDPROC(mcount)
13548 #endif /* CONFIG_DYNAMIC_FTRACE */
13549 #endif /* CONFIG_FUNCTION_TRACER */
13550
13551 @@ -146,8 +152,9 @@ ENTRY(ftrace_graph_caller)
13552
13553 MCOUNT_RESTORE_FRAME
13554
13555 + pax_force_retaddr
13556 retq
13557 -END(ftrace_graph_caller)
13558 +ENDPROC(ftrace_graph_caller)
13559
13560 GLOBAL(return_to_handler)
13561 subq $24, %rsp
13562 @@ -163,6 +170,7 @@ GLOBAL(return_to_handler)
13563 movq 8(%rsp), %rdx
13564 movq (%rsp), %rax
13565 addq $24, %rsp
13566 + pax_force_fptr %rdi
13567 jmp *%rdi
13568 #endif
13569
13570 @@ -178,6 +186,282 @@ ENTRY(native_usergs_sysret64)
13571 ENDPROC(native_usergs_sysret64)
13572 #endif /* CONFIG_PARAVIRT */
13573
13574 + .macro ljmpq sel, off
13575 +#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
13576 + .byte 0x48; ljmp *1234f(%rip)
13577 + .pushsection .rodata
13578 + .align 16
13579 + 1234: .quad \off; .word \sel
13580 + .popsection
13581 +#else
13582 + pushq $\sel
13583 + pushq $\off
13584 + lretq
13585 +#endif
13586 + .endm
13587 +
13588 + .macro pax_enter_kernel
13589 + pax_set_fptr_mask
13590 +#ifdef CONFIG_PAX_KERNEXEC
13591 + call pax_enter_kernel
13592 +#endif
13593 + .endm
13594 +
13595 + .macro pax_exit_kernel
13596 +#ifdef CONFIG_PAX_KERNEXEC
13597 + call pax_exit_kernel
13598 +#endif
13599 + .endm
13600 +
13601 +#ifdef CONFIG_PAX_KERNEXEC
13602 +ENTRY(pax_enter_kernel)
13603 + pushq %rdi
13604 +
13605 +#ifdef CONFIG_PARAVIRT
13606 + PV_SAVE_REGS(CLBR_RDI)
13607 +#endif
13608 +
13609 + GET_CR0_INTO_RDI
13610 + bts $16,%rdi
13611 + jnc 3f
13612 + mov %cs,%edi
13613 + cmp $__KERNEL_CS,%edi
13614 + jnz 2f
13615 +1:
13616 +
13617 +#ifdef CONFIG_PARAVIRT
13618 + PV_RESTORE_REGS(CLBR_RDI)
13619 +#endif
13620 +
13621 + popq %rdi
13622 + pax_force_retaddr
13623 + retq
13624 +
13625 +2: ljmpq __KERNEL_CS,1f
13626 +3: ljmpq __KERNEXEC_KERNEL_CS,4f
13627 +4: SET_RDI_INTO_CR0
13628 + jmp 1b
13629 +ENDPROC(pax_enter_kernel)
13630 +
13631 +ENTRY(pax_exit_kernel)
13632 + pushq %rdi
13633 +
13634 +#ifdef CONFIG_PARAVIRT
13635 + PV_SAVE_REGS(CLBR_RDI)
13636 +#endif
13637 +
13638 + mov %cs,%rdi
13639 + cmp $__KERNEXEC_KERNEL_CS,%edi
13640 + jz 2f
13641 +1:
13642 +
13643 +#ifdef CONFIG_PARAVIRT
13644 + PV_RESTORE_REGS(CLBR_RDI);
13645 +#endif
13646 +
13647 + popq %rdi
13648 + pax_force_retaddr
13649 + retq
13650 +
13651 +2: GET_CR0_INTO_RDI
13652 + btr $16,%rdi
13653 + ljmpq __KERNEL_CS,3f
13654 +3: SET_RDI_INTO_CR0
13655 + jmp 1b
13656 +#ifdef CONFIG_PARAVIRT
13657 + PV_RESTORE_REGS(CLBR_RDI);
13658 +#endif
13659 +
13660 + popq %rdi
13661 + pax_force_retaddr
13662 + retq
13663 +ENDPROC(pax_exit_kernel)
13664 +#endif
13665 +
13666 + .macro pax_enter_kernel_user
13667 + pax_set_fptr_mask
13668 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13669 + call pax_enter_kernel_user
13670 +#endif
13671 + .endm
13672 +
13673 + .macro pax_exit_kernel_user
13674 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13675 + call pax_exit_kernel_user
13676 +#endif
13677 +#ifdef CONFIG_PAX_RANDKSTACK
13678 + pushq %rax
13679 + call pax_randomize_kstack
13680 + popq %rax
13681 +#endif
13682 + .endm
13683 +
13684 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13685 +ENTRY(pax_enter_kernel_user)
13686 + pushq %rdi
13687 + pushq %rbx
13688 +
13689 +#ifdef CONFIG_PARAVIRT
13690 + PV_SAVE_REGS(CLBR_RDI)
13691 +#endif
13692 +
13693 + GET_CR3_INTO_RDI
13694 + mov %rdi,%rbx
13695 + add $__START_KERNEL_map,%rbx
13696 + sub phys_base(%rip),%rbx
13697 +
13698 +#ifdef CONFIG_PARAVIRT
13699 + pushq %rdi
13700 + cmpl $0, pv_info+PARAVIRT_enabled
13701 + jz 1f
13702 + i = 0
13703 + .rept USER_PGD_PTRS
13704 + mov i*8(%rbx),%rsi
13705 + mov $0,%sil
13706 + lea i*8(%rbx),%rdi
13707 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
13708 + i = i + 1
13709 + .endr
13710 + jmp 2f
13711 +1:
13712 +#endif
13713 +
13714 + i = 0
13715 + .rept USER_PGD_PTRS
13716 + movb $0,i*8(%rbx)
13717 + i = i + 1
13718 + .endr
13719 +
13720 +#ifdef CONFIG_PARAVIRT
13721 +2: popq %rdi
13722 +#endif
13723 + SET_RDI_INTO_CR3
13724 +
13725 +#ifdef CONFIG_PAX_KERNEXEC
13726 + GET_CR0_INTO_RDI
13727 + bts $16,%rdi
13728 + SET_RDI_INTO_CR0
13729 +#endif
13730 +
13731 +#ifdef CONFIG_PARAVIRT
13732 + PV_RESTORE_REGS(CLBR_RDI)
13733 +#endif
13734 +
13735 + popq %rbx
13736 + popq %rdi
13737 + pax_force_retaddr
13738 + retq
13739 +ENDPROC(pax_enter_kernel_user)
13740 +
13741 +ENTRY(pax_exit_kernel_user)
13742 + push %rdi
13743 +
13744 +#ifdef CONFIG_PARAVIRT
13745 + pushq %rbx
13746 + PV_SAVE_REGS(CLBR_RDI)
13747 +#endif
13748 +
13749 +#ifdef CONFIG_PAX_KERNEXEC
13750 + GET_CR0_INTO_RDI
13751 + btr $16,%rdi
13752 + SET_RDI_INTO_CR0
13753 +#endif
13754 +
13755 + GET_CR3_INTO_RDI
13756 + add $__START_KERNEL_map,%rdi
13757 + sub phys_base(%rip),%rdi
13758 +
13759 +#ifdef CONFIG_PARAVIRT
13760 + cmpl $0, pv_info+PARAVIRT_enabled
13761 + jz 1f
13762 + mov %rdi,%rbx
13763 + i = 0
13764 + .rept USER_PGD_PTRS
13765 + mov i*8(%rbx),%rsi
13766 + mov $0x67,%sil
13767 + lea i*8(%rbx),%rdi
13768 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
13769 + i = i + 1
13770 + .endr
13771 + jmp 2f
13772 +1:
13773 +#endif
13774 +
13775 + i = 0
13776 + .rept USER_PGD_PTRS
13777 + movb $0x67,i*8(%rdi)
13778 + i = i + 1
13779 + .endr
13780 +
13781 +#ifdef CONFIG_PARAVIRT
13782 +2: PV_RESTORE_REGS(CLBR_RDI)
13783 + popq %rbx
13784 +#endif
13785 +
13786 + popq %rdi
13787 + pax_force_retaddr
13788 + retq
13789 +ENDPROC(pax_exit_kernel_user)
13790 +#endif
13791 +
13792 +.macro pax_erase_kstack
13793 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13794 + call pax_erase_kstack
13795 +#endif
13796 +.endm
13797 +
13798 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13799 +/*
13800 + * r11: thread_info
13801 + * rcx, rdx: can be clobbered
13802 + */
13803 +ENTRY(pax_erase_kstack)
13804 + pushq %rdi
13805 + pushq %rax
13806 + pushq %r11
13807 +
13808 + GET_THREAD_INFO(%r11)
13809 + mov TI_lowest_stack(%r11), %rdi
13810 + mov $-0xBEEF, %rax
13811 + std
13812 +
13813 +1: mov %edi, %ecx
13814 + and $THREAD_SIZE_asm - 1, %ecx
13815 + shr $3, %ecx
13816 + repne scasq
13817 + jecxz 2f
13818 +
13819 + cmp $2*8, %ecx
13820 + jc 2f
13821 +
13822 + mov $2*8, %ecx
13823 + repe scasq
13824 + jecxz 2f
13825 + jne 1b
13826 +
13827 +2: cld
13828 + mov %esp, %ecx
13829 + sub %edi, %ecx
13830 +
13831 + cmp $THREAD_SIZE_asm, %rcx
13832 + jb 3f
13833 + ud2
13834 +3:
13835 +
13836 + shr $3, %ecx
13837 + rep stosq
13838 +
13839 + mov TI_task_thread_sp0(%r11), %rdi
13840 + sub $256, %rdi
13841 + mov %rdi, TI_lowest_stack(%r11)
13842 +
13843 + popq %r11
13844 + popq %rax
13845 + popq %rdi
13846 + pax_force_retaddr
13847 + ret
13848 +ENDPROC(pax_erase_kstack)
13849 +#endif
13850
13851 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
13852 #ifdef CONFIG_TRACE_IRQFLAGS
13853 @@ -231,8 +515,8 @@ ENDPROC(native_usergs_sysret64)
13854 .endm
13855
13856 .macro UNFAKE_STACK_FRAME
13857 - addq $8*6, %rsp
13858 - CFI_ADJUST_CFA_OFFSET -(6*8)
13859 + addq $8*6 + ARG_SKIP, %rsp
13860 + CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
13861 .endm
13862
13863 /*
13864 @@ -319,7 +603,7 @@ ENDPROC(native_usergs_sysret64)
13865 movq %rsp, %rsi
13866
13867 leaq -RBP(%rsp),%rdi /* arg1 for handler */
13868 - testl $3, CS(%rdi)
13869 + testb $3, CS(%rdi)
13870 je 1f
13871 SWAPGS
13872 /*
13873 @@ -355,9 +639,10 @@ ENTRY(save_rest)
13874 movq_cfi r15, R15+16
13875 movq %r11, 8(%rsp) /* return address */
13876 FIXUP_TOP_OF_STACK %r11, 16
13877 + pax_force_retaddr
13878 ret
13879 CFI_ENDPROC
13880 -END(save_rest)
13881 +ENDPROC(save_rest)
13882
13883 /* save complete stack frame */
13884 .pushsection .kprobes.text, "ax"
13885 @@ -386,9 +671,10 @@ ENTRY(save_paranoid)
13886 js 1f /* negative -> in kernel */
13887 SWAPGS
13888 xorl %ebx,%ebx
13889 -1: ret
13890 +1: pax_force_retaddr_bts
13891 + ret
13892 CFI_ENDPROC
13893 -END(save_paranoid)
13894 +ENDPROC(save_paranoid)
13895 .popsection
13896
13897 /*
13898 @@ -410,7 +696,7 @@ ENTRY(ret_from_fork)
13899
13900 RESTORE_REST
13901
13902 - testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
13903 + testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
13904 je int_ret_from_sys_call
13905
13906 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
13907 @@ -420,7 +706,7 @@ ENTRY(ret_from_fork)
13908 jmp ret_from_sys_call # go to the SYSRET fastpath
13909
13910 CFI_ENDPROC
13911 -END(ret_from_fork)
13912 +ENDPROC(ret_from_fork)
13913
13914 /*
13915 * System call entry. Up to 6 arguments in registers are supported.
13916 @@ -456,7 +742,7 @@ END(ret_from_fork)
13917 ENTRY(system_call)
13918 CFI_STARTPROC simple
13919 CFI_SIGNAL_FRAME
13920 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
13921 + CFI_DEF_CFA rsp,0
13922 CFI_REGISTER rip,rcx
13923 /*CFI_REGISTER rflags,r11*/
13924 SWAPGS_UNSAFE_STACK
13925 @@ -469,12 +755,13 @@ ENTRY(system_call_after_swapgs)
13926
13927 movq %rsp,PER_CPU_VAR(old_rsp)
13928 movq PER_CPU_VAR(kernel_stack),%rsp
13929 + SAVE_ARGS 8*6,0
13930 + pax_enter_kernel_user
13931 /*
13932 * No need to follow this irqs off/on section - it's straight
13933 * and short:
13934 */
13935 ENABLE_INTERRUPTS(CLBR_NONE)
13936 - SAVE_ARGS 8,0
13937 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
13938 movq %rcx,RIP-ARGOFFSET(%rsp)
13939 CFI_REL_OFFSET rip,RIP-ARGOFFSET
13940 @@ -484,7 +771,7 @@ ENTRY(system_call_after_swapgs)
13941 system_call_fastpath:
13942 cmpq $__NR_syscall_max,%rax
13943 ja badsys
13944 - movq %r10,%rcx
13945 + movq R10-ARGOFFSET(%rsp),%rcx
13946 call *sys_call_table(,%rax,8) # XXX: rip relative
13947 movq %rax,RAX-ARGOFFSET(%rsp)
13948 /*
13949 @@ -503,6 +790,8 @@ sysret_check:
13950 andl %edi,%edx
13951 jnz sysret_careful
13952 CFI_REMEMBER_STATE
13953 + pax_exit_kernel_user
13954 + pax_erase_kstack
13955 /*
13956 * sysretq will re-enable interrupts:
13957 */
13958 @@ -554,14 +843,18 @@ badsys:
13959 * jump back to the normal fast path.
13960 */
13961 auditsys:
13962 - movq %r10,%r9 /* 6th arg: 4th syscall arg */
13963 + movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
13964 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
13965 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
13966 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
13967 movq %rax,%rsi /* 2nd arg: syscall number */
13968 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
13969 call audit_syscall_entry
13970 +
13971 + pax_erase_kstack
13972 +
13973 LOAD_ARGS 0 /* reload call-clobbered registers */
13974 + pax_set_fptr_mask
13975 jmp system_call_fastpath
13976
13977 /*
13978 @@ -591,16 +884,20 @@ tracesys:
13979 FIXUP_TOP_OF_STACK %rdi
13980 movq %rsp,%rdi
13981 call syscall_trace_enter
13982 +
13983 + pax_erase_kstack
13984 +
13985 /*
13986 * Reload arg registers from stack in case ptrace changed them.
13987 * We don't reload %rax because syscall_trace_enter() returned
13988 * the value it wants us to use in the table lookup.
13989 */
13990 LOAD_ARGS ARGOFFSET, 1
13991 + pax_set_fptr_mask
13992 RESTORE_REST
13993 cmpq $__NR_syscall_max,%rax
13994 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
13995 - movq %r10,%rcx /* fixup for C */
13996 + movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
13997 call *sys_call_table(,%rax,8)
13998 movq %rax,RAX-ARGOFFSET(%rsp)
13999 /* Use IRET because user could have changed frame */
14000 @@ -612,7 +909,7 @@ tracesys:
14001 GLOBAL(int_ret_from_sys_call)
14002 DISABLE_INTERRUPTS(CLBR_NONE)
14003 TRACE_IRQS_OFF
14004 - testl $3,CS-ARGOFFSET(%rsp)
14005 + testb $3,CS-ARGOFFSET(%rsp)
14006 je retint_restore_args
14007 movl $_TIF_ALLWORK_MASK,%edi
14008 /* edi: mask to check */
14009 @@ -623,6 +920,7 @@ GLOBAL(int_with_check)
14010 andl %edi,%edx
14011 jnz int_careful
14012 andl $~TS_COMPAT,TI_status(%rcx)
14013 + pax_erase_kstack
14014 jmp retint_swapgs
14015
14016 /* Either reschedule or signal or syscall exit tracking needed. */
14017 @@ -669,7 +967,7 @@ int_restore_rest:
14018 TRACE_IRQS_OFF
14019 jmp int_with_check
14020 CFI_ENDPROC
14021 -END(system_call)
14022 +ENDPROC(system_call)
14023
14024 /*
14025 * Certain special system calls that need to save a complete full stack frame.
14026 @@ -685,7 +983,7 @@ ENTRY(\label)
14027 call \func
14028 jmp ptregscall_common
14029 CFI_ENDPROC
14030 -END(\label)
14031 +ENDPROC(\label)
14032 .endm
14033
14034 PTREGSCALL stub_clone, sys_clone, %r8
14035 @@ -703,9 +1001,10 @@ ENTRY(ptregscall_common)
14036 movq_cfi_restore R12+8, r12
14037 movq_cfi_restore RBP+8, rbp
14038 movq_cfi_restore RBX+8, rbx
14039 + pax_force_retaddr
14040 ret $REST_SKIP /* pop extended registers */
14041 CFI_ENDPROC
14042 -END(ptregscall_common)
14043 +ENDPROC(ptregscall_common)
14044
14045 ENTRY(stub_execve)
14046 CFI_STARTPROC
14047 @@ -720,7 +1019,7 @@ ENTRY(stub_execve)
14048 RESTORE_REST
14049 jmp int_ret_from_sys_call
14050 CFI_ENDPROC
14051 -END(stub_execve)
14052 +ENDPROC(stub_execve)
14053
14054 /*
14055 * sigreturn is special because it needs to restore all registers on return.
14056 @@ -738,7 +1037,7 @@ ENTRY(stub_rt_sigreturn)
14057 RESTORE_REST
14058 jmp int_ret_from_sys_call
14059 CFI_ENDPROC
14060 -END(stub_rt_sigreturn)
14061 +ENDPROC(stub_rt_sigreturn)
14062
14063 /*
14064 * Build the entry stubs and pointer table with some assembler magic.
14065 @@ -773,7 +1072,7 @@ vector=vector+1
14066 2: jmp common_interrupt
14067 .endr
14068 CFI_ENDPROC
14069 -END(irq_entries_start)
14070 +ENDPROC(irq_entries_start)
14071
14072 .previous
14073 END(interrupt)
14074 @@ -793,6 +1092,16 @@ END(interrupt)
14075 subq $ORIG_RAX-RBP, %rsp
14076 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
14077 SAVE_ARGS_IRQ
14078 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14079 + testb $3, CS(%rdi)
14080 + jnz 1f
14081 + pax_enter_kernel
14082 + jmp 2f
14083 +1: pax_enter_kernel_user
14084 +2:
14085 +#else
14086 + pax_enter_kernel
14087 +#endif
14088 call \func
14089 .endm
14090
14091 @@ -824,7 +1133,7 @@ ret_from_intr:
14092
14093 exit_intr:
14094 GET_THREAD_INFO(%rcx)
14095 - testl $3,CS-ARGOFFSET(%rsp)
14096 + testb $3,CS-ARGOFFSET(%rsp)
14097 je retint_kernel
14098
14099 /* Interrupt came from user space */
14100 @@ -846,12 +1155,15 @@ retint_swapgs: /* return to user-space */
14101 * The iretq could re-enable interrupts:
14102 */
14103 DISABLE_INTERRUPTS(CLBR_ANY)
14104 + pax_exit_kernel_user
14105 TRACE_IRQS_IRETQ
14106 SWAPGS
14107 jmp restore_args
14108
14109 retint_restore_args: /* return to kernel space */
14110 DISABLE_INTERRUPTS(CLBR_ANY)
14111 + pax_exit_kernel
14112 + pax_force_retaddr RIP-ARGOFFSET
14113 /*
14114 * The iretq could re-enable interrupts:
14115 */
14116 @@ -940,7 +1252,7 @@ ENTRY(retint_kernel)
14117 #endif
14118
14119 CFI_ENDPROC
14120 -END(common_interrupt)
14121 +ENDPROC(common_interrupt)
14122 /*
14123 * End of kprobes section
14124 */
14125 @@ -956,7 +1268,7 @@ ENTRY(\sym)
14126 interrupt \do_sym
14127 jmp ret_from_intr
14128 CFI_ENDPROC
14129 -END(\sym)
14130 +ENDPROC(\sym)
14131 .endm
14132
14133 #ifdef CONFIG_SMP
14134 @@ -1021,12 +1333,22 @@ ENTRY(\sym)
14135 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14136 call error_entry
14137 DEFAULT_FRAME 0
14138 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14139 + testb $3, CS(%rsp)
14140 + jnz 1f
14141 + pax_enter_kernel
14142 + jmp 2f
14143 +1: pax_enter_kernel_user
14144 +2:
14145 +#else
14146 + pax_enter_kernel
14147 +#endif
14148 movq %rsp,%rdi /* pt_regs pointer */
14149 xorl %esi,%esi /* no error code */
14150 call \do_sym
14151 jmp error_exit /* %ebx: no swapgs flag */
14152 CFI_ENDPROC
14153 -END(\sym)
14154 +ENDPROC(\sym)
14155 .endm
14156
14157 .macro paranoidzeroentry sym do_sym
14158 @@ -1038,15 +1360,25 @@ ENTRY(\sym)
14159 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14160 call save_paranoid
14161 TRACE_IRQS_OFF
14162 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14163 + testb $3, CS(%rsp)
14164 + jnz 1f
14165 + pax_enter_kernel
14166 + jmp 2f
14167 +1: pax_enter_kernel_user
14168 +2:
14169 +#else
14170 + pax_enter_kernel
14171 +#endif
14172 movq %rsp,%rdi /* pt_regs pointer */
14173 xorl %esi,%esi /* no error code */
14174 call \do_sym
14175 jmp paranoid_exit /* %ebx: no swapgs flag */
14176 CFI_ENDPROC
14177 -END(\sym)
14178 +ENDPROC(\sym)
14179 .endm
14180
14181 -#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
14182 +#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
14183 .macro paranoidzeroentry_ist sym do_sym ist
14184 ENTRY(\sym)
14185 INTR_FRAME
14186 @@ -1056,14 +1388,30 @@ ENTRY(\sym)
14187 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14188 call save_paranoid
14189 TRACE_IRQS_OFF
14190 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14191 + testb $3, CS(%rsp)
14192 + jnz 1f
14193 + pax_enter_kernel
14194 + jmp 2f
14195 +1: pax_enter_kernel_user
14196 +2:
14197 +#else
14198 + pax_enter_kernel
14199 +#endif
14200 movq %rsp,%rdi /* pt_regs pointer */
14201 xorl %esi,%esi /* no error code */
14202 +#ifdef CONFIG_SMP
14203 + imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
14204 + lea init_tss(%r12), %r12
14205 +#else
14206 + lea init_tss(%rip), %r12
14207 +#endif
14208 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
14209 call \do_sym
14210 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
14211 jmp paranoid_exit /* %ebx: no swapgs flag */
14212 CFI_ENDPROC
14213 -END(\sym)
14214 +ENDPROC(\sym)
14215 .endm
14216
14217 .macro errorentry sym do_sym
14218 @@ -1074,13 +1422,23 @@ ENTRY(\sym)
14219 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14220 call error_entry
14221 DEFAULT_FRAME 0
14222 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14223 + testb $3, CS(%rsp)
14224 + jnz 1f
14225 + pax_enter_kernel
14226 + jmp 2f
14227 +1: pax_enter_kernel_user
14228 +2:
14229 +#else
14230 + pax_enter_kernel
14231 +#endif
14232 movq %rsp,%rdi /* pt_regs pointer */
14233 movq ORIG_RAX(%rsp),%rsi /* get error code */
14234 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
14235 call \do_sym
14236 jmp error_exit /* %ebx: no swapgs flag */
14237 CFI_ENDPROC
14238 -END(\sym)
14239 +ENDPROC(\sym)
14240 .endm
14241
14242 /* error code is on the stack already */
14243 @@ -1093,13 +1451,23 @@ ENTRY(\sym)
14244 call save_paranoid
14245 DEFAULT_FRAME 0
14246 TRACE_IRQS_OFF
14247 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14248 + testb $3, CS(%rsp)
14249 + jnz 1f
14250 + pax_enter_kernel
14251 + jmp 2f
14252 +1: pax_enter_kernel_user
14253 +2:
14254 +#else
14255 + pax_enter_kernel
14256 +#endif
14257 movq %rsp,%rdi /* pt_regs pointer */
14258 movq ORIG_RAX(%rsp),%rsi /* get error code */
14259 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
14260 call \do_sym
14261 jmp paranoid_exit /* %ebx: no swapgs flag */
14262 CFI_ENDPROC
14263 -END(\sym)
14264 +ENDPROC(\sym)
14265 .endm
14266
14267 zeroentry divide_error do_divide_error
14268 @@ -1129,9 +1497,10 @@ gs_change:
14269 2: mfence /* workaround */
14270 SWAPGS
14271 popfq_cfi
14272 + pax_force_retaddr
14273 ret
14274 CFI_ENDPROC
14275 -END(native_load_gs_index)
14276 +ENDPROC(native_load_gs_index)
14277
14278 .section __ex_table,"a"
14279 .align 8
14280 @@ -1153,13 +1522,14 @@ ENTRY(kernel_thread_helper)
14281 * Here we are in the child and the registers are set as they were
14282 * at kernel_thread() invocation in the parent.
14283 */
14284 + pax_force_fptr %rsi
14285 call *%rsi
14286 # exit
14287 mov %eax, %edi
14288 call do_exit
14289 ud2 # padding for call trace
14290 CFI_ENDPROC
14291 -END(kernel_thread_helper)
14292 +ENDPROC(kernel_thread_helper)
14293
14294 /*
14295 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
14296 @@ -1186,11 +1556,11 @@ ENTRY(kernel_execve)
14297 RESTORE_REST
14298 testq %rax,%rax
14299 je int_ret_from_sys_call
14300 - RESTORE_ARGS
14301 UNFAKE_STACK_FRAME
14302 + pax_force_retaddr
14303 ret
14304 CFI_ENDPROC
14305 -END(kernel_execve)
14306 +ENDPROC(kernel_execve)
14307
14308 /* Call softirq on interrupt stack. Interrupts are off. */
14309 ENTRY(call_softirq)
14310 @@ -1208,9 +1578,10 @@ ENTRY(call_softirq)
14311 CFI_DEF_CFA_REGISTER rsp
14312 CFI_ADJUST_CFA_OFFSET -8
14313 decl PER_CPU_VAR(irq_count)
14314 + pax_force_retaddr
14315 ret
14316 CFI_ENDPROC
14317 -END(call_softirq)
14318 +ENDPROC(call_softirq)
14319
14320 #ifdef CONFIG_XEN
14321 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
14322 @@ -1248,7 +1619,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
14323 decl PER_CPU_VAR(irq_count)
14324 jmp error_exit
14325 CFI_ENDPROC
14326 -END(xen_do_hypervisor_callback)
14327 +ENDPROC(xen_do_hypervisor_callback)
14328
14329 /*
14330 * Hypervisor uses this for application faults while it executes.
14331 @@ -1307,7 +1678,7 @@ ENTRY(xen_failsafe_callback)
14332 SAVE_ALL
14333 jmp error_exit
14334 CFI_ENDPROC
14335 -END(xen_failsafe_callback)
14336 +ENDPROC(xen_failsafe_callback)
14337
14338 apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
14339 xen_hvm_callback_vector xen_evtchn_do_upcall
14340 @@ -1356,16 +1727,31 @@ ENTRY(paranoid_exit)
14341 TRACE_IRQS_OFF
14342 testl %ebx,%ebx /* swapgs needed? */
14343 jnz paranoid_restore
14344 - testl $3,CS(%rsp)
14345 + testb $3,CS(%rsp)
14346 jnz paranoid_userspace
14347 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14348 + pax_exit_kernel
14349 + TRACE_IRQS_IRETQ 0
14350 + SWAPGS_UNSAFE_STACK
14351 + RESTORE_ALL 8
14352 + pax_force_retaddr_bts
14353 + jmp irq_return
14354 +#endif
14355 paranoid_swapgs:
14356 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14357 + pax_exit_kernel_user
14358 +#else
14359 + pax_exit_kernel
14360 +#endif
14361 TRACE_IRQS_IRETQ 0
14362 SWAPGS_UNSAFE_STACK
14363 RESTORE_ALL 8
14364 jmp irq_return
14365 paranoid_restore:
14366 + pax_exit_kernel
14367 TRACE_IRQS_IRETQ 0
14368 RESTORE_ALL 8
14369 + pax_force_retaddr_bts
14370 jmp irq_return
14371 paranoid_userspace:
14372 GET_THREAD_INFO(%rcx)
14373 @@ -1394,7 +1780,7 @@ paranoid_schedule:
14374 TRACE_IRQS_OFF
14375 jmp paranoid_userspace
14376 CFI_ENDPROC
14377 -END(paranoid_exit)
14378 +ENDPROC(paranoid_exit)
14379
14380 /*
14381 * Exception entry point. This expects an error code/orig_rax on the stack.
14382 @@ -1421,12 +1807,13 @@ ENTRY(error_entry)
14383 movq_cfi r14, R14+8
14384 movq_cfi r15, R15+8
14385 xorl %ebx,%ebx
14386 - testl $3,CS+8(%rsp)
14387 + testb $3,CS+8(%rsp)
14388 je error_kernelspace
14389 error_swapgs:
14390 SWAPGS
14391 error_sti:
14392 TRACE_IRQS_OFF
14393 + pax_force_retaddr_bts
14394 ret
14395
14396 /*
14397 @@ -1453,7 +1840,7 @@ bstep_iret:
14398 movq %rcx,RIP+8(%rsp)
14399 jmp error_swapgs
14400 CFI_ENDPROC
14401 -END(error_entry)
14402 +ENDPROC(error_entry)
14403
14404
14405 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
14406 @@ -1473,7 +1860,7 @@ ENTRY(error_exit)
14407 jnz retint_careful
14408 jmp retint_swapgs
14409 CFI_ENDPROC
14410 -END(error_exit)
14411 +ENDPROC(error_exit)
14412
14413
14414 /* runs on exception stack */
14415 @@ -1485,6 +1872,16 @@ ENTRY(nmi)
14416 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14417 call save_paranoid
14418 DEFAULT_FRAME 0
14419 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14420 + testb $3, CS(%rsp)
14421 + jnz 1f
14422 + pax_enter_kernel
14423 + jmp 2f
14424 +1: pax_enter_kernel_user
14425 +2:
14426 +#else
14427 + pax_enter_kernel
14428 +#endif
14429 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
14430 movq %rsp,%rdi
14431 movq $-1,%rsi
14432 @@ -1495,12 +1892,28 @@ ENTRY(nmi)
14433 DISABLE_INTERRUPTS(CLBR_NONE)
14434 testl %ebx,%ebx /* swapgs needed? */
14435 jnz nmi_restore
14436 - testl $3,CS(%rsp)
14437 + testb $3,CS(%rsp)
14438 jnz nmi_userspace
14439 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14440 + pax_exit_kernel
14441 + SWAPGS_UNSAFE_STACK
14442 + RESTORE_ALL 8
14443 + pax_force_retaddr_bts
14444 + jmp irq_return
14445 +#endif
14446 nmi_swapgs:
14447 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14448 + pax_exit_kernel_user
14449 +#else
14450 + pax_exit_kernel
14451 +#endif
14452 SWAPGS_UNSAFE_STACK
14453 + RESTORE_ALL 8
14454 + jmp irq_return
14455 nmi_restore:
14456 + pax_exit_kernel
14457 RESTORE_ALL 8
14458 + pax_force_retaddr_bts
14459 jmp irq_return
14460 nmi_userspace:
14461 GET_THREAD_INFO(%rcx)
14462 @@ -1529,14 +1942,14 @@ nmi_schedule:
14463 jmp paranoid_exit
14464 CFI_ENDPROC
14465 #endif
14466 -END(nmi)
14467 +ENDPROC(nmi)
14468
14469 ENTRY(ignore_sysret)
14470 CFI_STARTPROC
14471 mov $-ENOSYS,%eax
14472 sysret
14473 CFI_ENDPROC
14474 -END(ignore_sysret)
14475 +ENDPROC(ignore_sysret)
14476
14477 /*
14478 * End of kprobes section
14479 diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
14480 index c9a281f..ce2f317 100644
14481 --- a/arch/x86/kernel/ftrace.c
14482 +++ b/arch/x86/kernel/ftrace.c
14483 @@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the IP to write to */
14484 static const void *mod_code_newcode; /* holds the text to write to the IP */
14485
14486 static unsigned nmi_wait_count;
14487 -static atomic_t nmi_update_count = ATOMIC_INIT(0);
14488 +static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
14489
14490 int ftrace_arch_read_dyn_info(char *buf, int size)
14491 {
14492 @@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf, int size)
14493
14494 r = snprintf(buf, size, "%u %u",
14495 nmi_wait_count,
14496 - atomic_read(&nmi_update_count));
14497 + atomic_read_unchecked(&nmi_update_count));
14498 return r;
14499 }
14500
14501 @@ -177,8 +177,10 @@ void ftrace_nmi_enter(void)
14502
14503 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
14504 smp_rmb();
14505 + pax_open_kernel();
14506 ftrace_mod_code();
14507 - atomic_inc(&nmi_update_count);
14508 + pax_close_kernel();
14509 + atomic_inc_unchecked(&nmi_update_count);
14510 }
14511 /* Must have previous changes seen before executions */
14512 smp_mb();
14513 @@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
14514 {
14515 unsigned char replaced[MCOUNT_INSN_SIZE];
14516
14517 + ip = ktla_ktva(ip);
14518 +
14519 /*
14520 * Note: Due to modules and __init, code can
14521 * disappear and change, we need to protect against faulting
14522 @@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
14523 unsigned char old[MCOUNT_INSN_SIZE], *new;
14524 int ret;
14525
14526 - memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
14527 + memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
14528 new = ftrace_call_replace(ip, (unsigned long)func);
14529 ret = ftrace_modify_code(ip, old, new);
14530
14531 @@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long ip,
14532 {
14533 unsigned char code[MCOUNT_INSN_SIZE];
14534
14535 + ip = ktla_ktva(ip);
14536 +
14537 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
14538 return -EFAULT;
14539
14540 diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
14541 index 3bb0850..55a56f4 100644
14542 --- a/arch/x86/kernel/head32.c
14543 +++ b/arch/x86/kernel/head32.c
14544 @@ -19,6 +19,7 @@
14545 #include <asm/io_apic.h>
14546 #include <asm/bios_ebda.h>
14547 #include <asm/tlbflush.h>
14548 +#include <asm/boot.h>
14549
14550 static void __init i386_default_early_setup(void)
14551 {
14552 @@ -33,7 +34,7 @@ void __init i386_start_kernel(void)
14553 {
14554 memblock_init();
14555
14556 - memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
14557 + memblock_x86_reserve_range(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
14558
14559 #ifdef CONFIG_BLK_DEV_INITRD
14560 /* Reserve INITRD */
14561 diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
14562 index ce0be7c..c41476e 100644
14563 --- a/arch/x86/kernel/head_32.S
14564 +++ b/arch/x86/kernel/head_32.S
14565 @@ -25,6 +25,12 @@
14566 /* Physical address */
14567 #define pa(X) ((X) - __PAGE_OFFSET)
14568
14569 +#ifdef CONFIG_PAX_KERNEXEC
14570 +#define ta(X) (X)
14571 +#else
14572 +#define ta(X) ((X) - __PAGE_OFFSET)
14573 +#endif
14574 +
14575 /*
14576 * References to members of the new_cpu_data structure.
14577 */
14578 @@ -54,11 +60,7 @@
14579 * and small than max_low_pfn, otherwise will waste some page table entries
14580 */
14581
14582 -#if PTRS_PER_PMD > 1
14583 -#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
14584 -#else
14585 -#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
14586 -#endif
14587 +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
14588
14589 /* Number of possible pages in the lowmem region */
14590 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
14591 @@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
14592 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
14593
14594 /*
14595 + * Real beginning of normal "text" segment
14596 + */
14597 +ENTRY(stext)
14598 +ENTRY(_stext)
14599 +
14600 +/*
14601 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
14602 * %esi points to the real-mode code as a 32-bit pointer.
14603 * CS and DS must be 4 GB flat segments, but we don't depend on
14604 @@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
14605 * can.
14606 */
14607 __HEAD
14608 +
14609 +#ifdef CONFIG_PAX_KERNEXEC
14610 + jmp startup_32
14611 +/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
14612 +.fill PAGE_SIZE-5,1,0xcc
14613 +#endif
14614 +
14615 ENTRY(startup_32)
14616 movl pa(stack_start),%ecx
14617
14618 @@ -105,6 +120,57 @@ ENTRY(startup_32)
14619 2:
14620 leal -__PAGE_OFFSET(%ecx),%esp
14621
14622 +#ifdef CONFIG_SMP
14623 + movl $pa(cpu_gdt_table),%edi
14624 + movl $__per_cpu_load,%eax
14625 + movw %ax,__KERNEL_PERCPU + 2(%edi)
14626 + rorl $16,%eax
14627 + movb %al,__KERNEL_PERCPU + 4(%edi)
14628 + movb %ah,__KERNEL_PERCPU + 7(%edi)
14629 + movl $__per_cpu_end - 1,%eax
14630 + subl $__per_cpu_start,%eax
14631 + movw %ax,__KERNEL_PERCPU + 0(%edi)
14632 +#endif
14633 +
14634 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14635 + movl $NR_CPUS,%ecx
14636 + movl $pa(cpu_gdt_table),%edi
14637 +1:
14638 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
14639 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
14640 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
14641 + addl $PAGE_SIZE_asm,%edi
14642 + loop 1b
14643 +#endif
14644 +
14645 +#ifdef CONFIG_PAX_KERNEXEC
14646 + movl $pa(boot_gdt),%edi
14647 + movl $__LOAD_PHYSICAL_ADDR,%eax
14648 + movw %ax,__BOOT_CS + 2(%edi)
14649 + rorl $16,%eax
14650 + movb %al,__BOOT_CS + 4(%edi)
14651 + movb %ah,__BOOT_CS + 7(%edi)
14652 + rorl $16,%eax
14653 +
14654 + ljmp $(__BOOT_CS),$1f
14655 +1:
14656 +
14657 + movl $NR_CPUS,%ecx
14658 + movl $pa(cpu_gdt_table),%edi
14659 + addl $__PAGE_OFFSET,%eax
14660 +1:
14661 + movw %ax,__KERNEL_CS + 2(%edi)
14662 + movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
14663 + rorl $16,%eax
14664 + movb %al,__KERNEL_CS + 4(%edi)
14665 + movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
14666 + movb %ah,__KERNEL_CS + 7(%edi)
14667 + movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
14668 + rorl $16,%eax
14669 + addl $PAGE_SIZE_asm,%edi
14670 + loop 1b
14671 +#endif
14672 +
14673 /*
14674 * Clear BSS first so that there are no surprises...
14675 */
14676 @@ -195,8 +261,11 @@ ENTRY(startup_32)
14677 movl %eax, pa(max_pfn_mapped)
14678
14679 /* Do early initialization of the fixmap area */
14680 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
14681 - movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
14682 +#ifdef CONFIG_COMPAT_VDSO
14683 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
14684 +#else
14685 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
14686 +#endif
14687 #else /* Not PAE */
14688
14689 page_pde_offset = (__PAGE_OFFSET >> 20);
14690 @@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
14691 movl %eax, pa(max_pfn_mapped)
14692
14693 /* Do early initialization of the fixmap area */
14694 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
14695 - movl %eax,pa(initial_page_table+0xffc)
14696 +#ifdef CONFIG_COMPAT_VDSO
14697 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
14698 +#else
14699 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
14700 +#endif
14701 #endif
14702
14703 #ifdef CONFIG_PARAVIRT
14704 @@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
14705 cmpl $num_subarch_entries, %eax
14706 jae bad_subarch
14707
14708 - movl pa(subarch_entries)(,%eax,4), %eax
14709 - subl $__PAGE_OFFSET, %eax
14710 - jmp *%eax
14711 + jmp *pa(subarch_entries)(,%eax,4)
14712
14713 bad_subarch:
14714 WEAK(lguest_entry)
14715 @@ -255,10 +325,10 @@ WEAK(xen_entry)
14716 __INITDATA
14717
14718 subarch_entries:
14719 - .long default_entry /* normal x86/PC */
14720 - .long lguest_entry /* lguest hypervisor */
14721 - .long xen_entry /* Xen hypervisor */
14722 - .long default_entry /* Moorestown MID */
14723 + .long ta(default_entry) /* normal x86/PC */
14724 + .long ta(lguest_entry) /* lguest hypervisor */
14725 + .long ta(xen_entry) /* Xen hypervisor */
14726 + .long ta(default_entry) /* Moorestown MID */
14727 num_subarch_entries = (. - subarch_entries) / 4
14728 .previous
14729 #else
14730 @@ -312,6 +382,7 @@ default_entry:
14731 orl %edx,%eax
14732 movl %eax,%cr4
14733
14734 +#ifdef CONFIG_X86_PAE
14735 testb $X86_CR4_PAE, %al # check if PAE is enabled
14736 jz 6f
14737
14738 @@ -340,6 +411,9 @@ default_entry:
14739 /* Make changes effective */
14740 wrmsr
14741
14742 + btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
14743 +#endif
14744 +
14745 6:
14746
14747 /*
14748 @@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP
14749 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
14750 movl %eax,%ss # after changing gdt.
14751
14752 - movl $(__USER_DS),%eax # DS/ES contains default USER segment
14753 +# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
14754 movl %eax,%ds
14755 movl %eax,%es
14756
14757 @@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP
14758 */
14759 cmpb $0,ready
14760 jne 1f
14761 - movl $gdt_page,%eax
14762 + movl $cpu_gdt_table,%eax
14763 movl $stack_canary,%ecx
14764 +#ifdef CONFIG_SMP
14765 + addl $__per_cpu_load,%ecx
14766 +#endif
14767 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
14768 shrl $16, %ecx
14769 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
14770 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
14771 1:
14772 -#endif
14773 movl $(__KERNEL_STACK_CANARY),%eax
14774 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
14775 + movl $(__USER_DS),%eax
14776 +#else
14777 + xorl %eax,%eax
14778 +#endif
14779 movl %eax,%gs
14780
14781 xorl %eax,%eax # Clear LDT
14782 @@ -558,22 +639,22 @@ early_page_fault:
14783 jmp early_fault
14784
14785 early_fault:
14786 - cld
14787 #ifdef CONFIG_PRINTK
14788 + cmpl $1,%ss:early_recursion_flag
14789 + je hlt_loop
14790 + incl %ss:early_recursion_flag
14791 + cld
14792 pusha
14793 movl $(__KERNEL_DS),%eax
14794 movl %eax,%ds
14795 movl %eax,%es
14796 - cmpl $2,early_recursion_flag
14797 - je hlt_loop
14798 - incl early_recursion_flag
14799 movl %cr2,%eax
14800 pushl %eax
14801 pushl %edx /* trapno */
14802 pushl $fault_msg
14803 call printk
14804 +; call dump_stack
14805 #endif
14806 - call dump_stack
14807 hlt_loop:
14808 hlt
14809 jmp hlt_loop
14810 @@ -581,8 +662,11 @@ hlt_loop:
14811 /* This is the default interrupt "handler" :-) */
14812 ALIGN
14813 ignore_int:
14814 - cld
14815 #ifdef CONFIG_PRINTK
14816 + cmpl $2,%ss:early_recursion_flag
14817 + je hlt_loop
14818 + incl %ss:early_recursion_flag
14819 + cld
14820 pushl %eax
14821 pushl %ecx
14822 pushl %edx
14823 @@ -591,9 +675,6 @@ ignore_int:
14824 movl $(__KERNEL_DS),%eax
14825 movl %eax,%ds
14826 movl %eax,%es
14827 - cmpl $2,early_recursion_flag
14828 - je hlt_loop
14829 - incl early_recursion_flag
14830 pushl 16(%esp)
14831 pushl 24(%esp)
14832 pushl 32(%esp)
14833 @@ -622,29 +703,43 @@ ENTRY(initial_code)
14834 /*
14835 * BSS section
14836 */
14837 -__PAGE_ALIGNED_BSS
14838 - .align PAGE_SIZE
14839 #ifdef CONFIG_X86_PAE
14840 +.section .initial_pg_pmd,"a",@progbits
14841 initial_pg_pmd:
14842 .fill 1024*KPMDS,4,0
14843 #else
14844 +.section .initial_page_table,"a",@progbits
14845 ENTRY(initial_page_table)
14846 .fill 1024,4,0
14847 #endif
14848 +.section .initial_pg_fixmap,"a",@progbits
14849 initial_pg_fixmap:
14850 .fill 1024,4,0
14851 +.section .empty_zero_page,"a",@progbits
14852 ENTRY(empty_zero_page)
14853 .fill 4096,1,0
14854 +.section .swapper_pg_dir,"a",@progbits
14855 ENTRY(swapper_pg_dir)
14856 +#ifdef CONFIG_X86_PAE
14857 + .fill 4,8,0
14858 +#else
14859 .fill 1024,4,0
14860 +#endif
14861 +
14862 +/*
14863 + * The IDT has to be page-aligned to simplify the Pentium
14864 + * F0 0F bug workaround.. We have a special link segment
14865 + * for this.
14866 + */
14867 +.section .idt,"a",@progbits
14868 +ENTRY(idt_table)
14869 + .fill 256,8,0
14870
14871 /*
14872 * This starts the data section.
14873 */
14874 #ifdef CONFIG_X86_PAE
14875 -__PAGE_ALIGNED_DATA
14876 - /* Page-aligned for the benefit of paravirt? */
14877 - .align PAGE_SIZE
14878 +.section .initial_page_table,"a",@progbits
14879 ENTRY(initial_page_table)
14880 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
14881 # if KPMDS == 3
14882 @@ -663,18 +758,27 @@ ENTRY(initial_page_table)
14883 # error "Kernel PMDs should be 1, 2 or 3"
14884 # endif
14885 .align PAGE_SIZE /* needs to be page-sized too */
14886 +
14887 +#ifdef CONFIG_PAX_PER_CPU_PGD
14888 +ENTRY(cpu_pgd)
14889 + .rept NR_CPUS
14890 + .fill 4,8,0
14891 + .endr
14892 +#endif
14893 +
14894 #endif
14895
14896 .data
14897 .balign 4
14898 ENTRY(stack_start)
14899 - .long init_thread_union+THREAD_SIZE
14900 + .long init_thread_union+THREAD_SIZE-8
14901
14902 +ready: .byte 0
14903 +
14904 +.section .rodata,"a",@progbits
14905 early_recursion_flag:
14906 .long 0
14907
14908 -ready: .byte 0
14909 -
14910 int_msg:
14911 .asciz "Unknown interrupt or fault at: %p %p %p\n"
14912
14913 @@ -707,7 +811,7 @@ fault_msg:
14914 .word 0 # 32 bit align gdt_desc.address
14915 boot_gdt_descr:
14916 .word __BOOT_DS+7
14917 - .long boot_gdt - __PAGE_OFFSET
14918 + .long pa(boot_gdt)
14919
14920 .word 0 # 32-bit align idt_desc.address
14921 idt_descr:
14922 @@ -718,7 +822,7 @@ idt_descr:
14923 .word 0 # 32 bit align gdt_desc.address
14924 ENTRY(early_gdt_descr)
14925 .word GDT_ENTRIES*8-1
14926 - .long gdt_page /* Overwritten for secondary CPUs */
14927 + .long cpu_gdt_table /* Overwritten for secondary CPUs */
14928
14929 /*
14930 * The boot_gdt must mirror the equivalent in setup.S and is
14931 @@ -727,5 +831,65 @@ ENTRY(early_gdt_descr)
14932 .align L1_CACHE_BYTES
14933 ENTRY(boot_gdt)
14934 .fill GDT_ENTRY_BOOT_CS,8,0
14935 - .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
14936 - .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
14937 + .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
14938 + .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
14939 +
14940 + .align PAGE_SIZE_asm
14941 +ENTRY(cpu_gdt_table)
14942 + .rept NR_CPUS
14943 + .quad 0x0000000000000000 /* NULL descriptor */
14944 + .quad 0x0000000000000000 /* 0x0b reserved */
14945 + .quad 0x0000000000000000 /* 0x13 reserved */
14946 + .quad 0x0000000000000000 /* 0x1b reserved */
14947 +
14948 +#ifdef CONFIG_PAX_KERNEXEC
14949 + .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
14950 +#else
14951 + .quad 0x0000000000000000 /* 0x20 unused */
14952 +#endif
14953 +
14954 + .quad 0x0000000000000000 /* 0x28 unused */
14955 + .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
14956 + .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
14957 + .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
14958 + .quad 0x0000000000000000 /* 0x4b reserved */
14959 + .quad 0x0000000000000000 /* 0x53 reserved */
14960 + .quad 0x0000000000000000 /* 0x5b reserved */
14961 +
14962 + .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
14963 + .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
14964 + .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
14965 + .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
14966 +
14967 + .quad 0x0000000000000000 /* 0x80 TSS descriptor */
14968 + .quad 0x0000000000000000 /* 0x88 LDT descriptor */
14969 +
14970 + /*
14971 + * Segments used for calling PnP BIOS have byte granularity.
14972 + * The code segments and data segments have fixed 64k limits,
14973 + * the transfer segment sizes are set at run time.
14974 + */
14975 + .quad 0x00409b000000ffff /* 0x90 32-bit code */
14976 + .quad 0x00009b000000ffff /* 0x98 16-bit code */
14977 + .quad 0x000093000000ffff /* 0xa0 16-bit data */
14978 + .quad 0x0000930000000000 /* 0xa8 16-bit data */
14979 + .quad 0x0000930000000000 /* 0xb0 16-bit data */
14980 +
14981 + /*
14982 + * The APM segments have byte granularity and their bases
14983 + * are set at run time. All have 64k limits.
14984 + */
14985 + .quad 0x00409b000000ffff /* 0xb8 APM CS code */
14986 + .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
14987 + .quad 0x004093000000ffff /* 0xc8 APM DS data */
14988 +
14989 + .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
14990 + .quad 0x0040930000000000 /* 0xd8 - PERCPU */
14991 + .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
14992 + .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
14993 + .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
14994 + .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
14995 +
14996 + /* Be sure this is zeroed to avoid false validations in Xen */
14997 + .fill PAGE_SIZE_asm - GDT_SIZE,1,0
14998 + .endr
14999 diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
15000 index e11e394..9aebc5d 100644
15001 --- a/arch/x86/kernel/head_64.S
15002 +++ b/arch/x86/kernel/head_64.S
15003 @@ -19,6 +19,8 @@
15004 #include <asm/cache.h>
15005 #include <asm/processor-flags.h>
15006 #include <asm/percpu.h>
15007 +#include <asm/cpufeature.h>
15008 +#include <asm/alternative-asm.h>
15009
15010 #ifdef CONFIG_PARAVIRT
15011 #include <asm/asm-offsets.h>
15012 @@ -38,6 +40,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
15013 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
15014 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
15015 L3_START_KERNEL = pud_index(__START_KERNEL_map)
15016 +L4_VMALLOC_START = pgd_index(VMALLOC_START)
15017 +L3_VMALLOC_START = pud_index(VMALLOC_START)
15018 +L4_VMALLOC_END = pgd_index(VMALLOC_END)
15019 +L3_VMALLOC_END = pud_index(VMALLOC_END)
15020 +L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
15021 +L3_VMEMMAP_START = pud_index(VMEMMAP_START)
15022
15023 .text
15024 __HEAD
15025 @@ -85,35 +93,23 @@ startup_64:
15026 */
15027 addq %rbp, init_level4_pgt + 0(%rip)
15028 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
15029 + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
15030 + addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
15031 + addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
15032 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
15033
15034 addq %rbp, level3_ident_pgt + 0(%rip)
15035 +#ifndef CONFIG_XEN
15036 + addq %rbp, level3_ident_pgt + 8(%rip)
15037 +#endif
15038
15039 - addq %rbp, level3_kernel_pgt + (510*8)(%rip)
15040 - addq %rbp, level3_kernel_pgt + (511*8)(%rip)
15041 + addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
15042 +
15043 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
15044 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
15045
15046 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
15047 -
15048 - /* Add an Identity mapping if I am above 1G */
15049 - leaq _text(%rip), %rdi
15050 - andq $PMD_PAGE_MASK, %rdi
15051 -
15052 - movq %rdi, %rax
15053 - shrq $PUD_SHIFT, %rax
15054 - andq $(PTRS_PER_PUD - 1), %rax
15055 - jz ident_complete
15056 -
15057 - leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
15058 - leaq level3_ident_pgt(%rip), %rbx
15059 - movq %rdx, 0(%rbx, %rax, 8)
15060 -
15061 - movq %rdi, %rax
15062 - shrq $PMD_SHIFT, %rax
15063 - andq $(PTRS_PER_PMD - 1), %rax
15064 - leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
15065 - leaq level2_spare_pgt(%rip), %rbx
15066 - movq %rdx, 0(%rbx, %rax, 8)
15067 -ident_complete:
15068 + addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
15069
15070 /*
15071 * Fixup the kernel text+data virtual addresses. Note that
15072 @@ -160,8 +156,8 @@ ENTRY(secondary_startup_64)
15073 * after the boot processor executes this code.
15074 */
15075
15076 - /* Enable PAE mode and PGE */
15077 - movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
15078 + /* Enable PAE mode and PSE/PGE */
15079 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
15080 movq %rax, %cr4
15081
15082 /* Setup early boot stage 4 level pagetables. */
15083 @@ -183,9 +179,17 @@ ENTRY(secondary_startup_64)
15084 movl $MSR_EFER, %ecx
15085 rdmsr
15086 btsl $_EFER_SCE, %eax /* Enable System Call */
15087 - btl $20,%edi /* No Execute supported? */
15088 + btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
15089 jnc 1f
15090 btsl $_EFER_NX, %eax
15091 + leaq init_level4_pgt(%rip), %rdi
15092 +#ifndef CONFIG_EFI
15093 + btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
15094 +#endif
15095 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
15096 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
15097 + btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
15098 + btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
15099 1: wrmsr /* Make changes effective */
15100
15101 /* Setup cr0 */
15102 @@ -247,6 +251,7 @@ ENTRY(secondary_startup_64)
15103 * jump. In addition we need to ensure %cs is set so we make this
15104 * a far return.
15105 */
15106 + pax_set_fptr_mask
15107 movq initial_code(%rip),%rax
15108 pushq $0 # fake return address to stop unwinder
15109 pushq $__KERNEL_CS # set correct cs
15110 @@ -269,7 +274,7 @@ ENTRY(secondary_startup_64)
15111 bad_address:
15112 jmp bad_address
15113
15114 - .section ".init.text","ax"
15115 + __INIT
15116 #ifdef CONFIG_EARLY_PRINTK
15117 .globl early_idt_handlers
15118 early_idt_handlers:
15119 @@ -314,18 +319,23 @@ ENTRY(early_idt_handler)
15120 #endif /* EARLY_PRINTK */
15121 1: hlt
15122 jmp 1b
15123 + .previous
15124
15125 #ifdef CONFIG_EARLY_PRINTK
15126 + __INITDATA
15127 early_recursion_flag:
15128 .long 0
15129 + .previous
15130
15131 + .section .rodata,"a",@progbits
15132 early_idt_msg:
15133 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
15134 early_idt_ripmsg:
15135 .asciz "RIP %s\n"
15136 + .previous
15137 #endif /* CONFIG_EARLY_PRINTK */
15138 - .previous
15139
15140 + .section .rodata,"a",@progbits
15141 #define NEXT_PAGE(name) \
15142 .balign PAGE_SIZE; \
15143 ENTRY(name)
15144 @@ -338,7 +348,6 @@ ENTRY(name)
15145 i = i + 1 ; \
15146 .endr
15147
15148 - .data
15149 /*
15150 * This default setting generates an ident mapping at address 0x100000
15151 * and a mapping for the kernel that precisely maps virtual address
15152 @@ -349,13 +358,41 @@ NEXT_PAGE(init_level4_pgt)
15153 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15154 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
15155 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15156 + .org init_level4_pgt + L4_VMALLOC_START*8, 0
15157 + .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
15158 + .org init_level4_pgt + L4_VMALLOC_END*8, 0
15159 + .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
15160 + .org init_level4_pgt + L4_VMEMMAP_START*8, 0
15161 + .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
15162 .org init_level4_pgt + L4_START_KERNEL*8, 0
15163 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
15164 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
15165
15166 +#ifdef CONFIG_PAX_PER_CPU_PGD
15167 +NEXT_PAGE(cpu_pgd)
15168 + .rept NR_CPUS
15169 + .fill 512,8,0
15170 + .endr
15171 +#endif
15172 +
15173 NEXT_PAGE(level3_ident_pgt)
15174 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15175 +#ifdef CONFIG_XEN
15176 .fill 511,8,0
15177 +#else
15178 + .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
15179 + .fill 510,8,0
15180 +#endif
15181 +
15182 +NEXT_PAGE(level3_vmalloc_start_pgt)
15183 + .fill 512,8,0
15184 +
15185 +NEXT_PAGE(level3_vmalloc_end_pgt)
15186 + .fill 512,8,0
15187 +
15188 +NEXT_PAGE(level3_vmemmap_pgt)
15189 + .fill L3_VMEMMAP_START,8,0
15190 + .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
15191
15192 NEXT_PAGE(level3_kernel_pgt)
15193 .fill L3_START_KERNEL,8,0
15194 @@ -363,20 +400,23 @@ NEXT_PAGE(level3_kernel_pgt)
15195 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
15196 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
15197
15198 +NEXT_PAGE(level2_vmemmap_pgt)
15199 + .fill 512,8,0
15200 +
15201 NEXT_PAGE(level2_fixmap_pgt)
15202 - .fill 506,8,0
15203 - .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
15204 - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
15205 - .fill 5,8,0
15206 + .fill 507,8,0
15207 + .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
15208 + /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
15209 + .fill 4,8,0
15210
15211 -NEXT_PAGE(level1_fixmap_pgt)
15212 +NEXT_PAGE(level1_vsyscall_pgt)
15213 .fill 512,8,0
15214
15215 -NEXT_PAGE(level2_ident_pgt)
15216 - /* Since I easily can, map the first 1G.
15217 + /* Since I easily can, map the first 2G.
15218 * Don't set NX because code runs from these pages.
15219 */
15220 - PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
15221 +NEXT_PAGE(level2_ident_pgt)
15222 + PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
15223
15224 NEXT_PAGE(level2_kernel_pgt)
15225 /*
15226 @@ -389,33 +429,55 @@ NEXT_PAGE(level2_kernel_pgt)
15227 * If you want to increase this then increase MODULES_VADDR
15228 * too.)
15229 */
15230 - PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
15231 - KERNEL_IMAGE_SIZE/PMD_SIZE)
15232 -
15233 -NEXT_PAGE(level2_spare_pgt)
15234 - .fill 512, 8, 0
15235 + PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
15236
15237 #undef PMDS
15238 #undef NEXT_PAGE
15239
15240 - .data
15241 + .align PAGE_SIZE
15242 +ENTRY(cpu_gdt_table)
15243 + .rept NR_CPUS
15244 + .quad 0x0000000000000000 /* NULL descriptor */
15245 + .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
15246 + .quad 0x00af9b000000ffff /* __KERNEL_CS */
15247 + .quad 0x00cf93000000ffff /* __KERNEL_DS */
15248 + .quad 0x00cffb000000ffff /* __USER32_CS */
15249 + .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
15250 + .quad 0x00affb000000ffff /* __USER_CS */
15251 +
15252 +#ifdef CONFIG_PAX_KERNEXEC
15253 + .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
15254 +#else
15255 + .quad 0x0 /* unused */
15256 +#endif
15257 +
15258 + .quad 0,0 /* TSS */
15259 + .quad 0,0 /* LDT */
15260 + .quad 0,0,0 /* three TLS descriptors */
15261 + .quad 0x0000f40000000000 /* node/CPU stored in limit */
15262 + /* asm/segment.h:GDT_ENTRIES must match this */
15263 +
15264 + /* zero the remaining page */
15265 + .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
15266 + .endr
15267 +
15268 .align 16
15269 .globl early_gdt_descr
15270 early_gdt_descr:
15271 .word GDT_ENTRIES*8-1
15272 early_gdt_descr_base:
15273 - .quad INIT_PER_CPU_VAR(gdt_page)
15274 + .quad cpu_gdt_table
15275
15276 ENTRY(phys_base)
15277 /* This must match the first entry in level2_kernel_pgt */
15278 .quad 0x0000000000000000
15279
15280 #include "../../x86/xen/xen-head.S"
15281 -
15282 - .section .bss, "aw", @nobits
15283 +
15284 + .section .rodata,"a",@progbits
15285 .align L1_CACHE_BYTES
15286 ENTRY(idt_table)
15287 - .skip IDT_ENTRIES * 16
15288 + .fill 512,8,0
15289
15290 __PAGE_ALIGNED_BSS
15291 .align PAGE_SIZE
15292 diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
15293 index 9c3bd4a..e1d9b35 100644
15294 --- a/arch/x86/kernel/i386_ksyms_32.c
15295 +++ b/arch/x86/kernel/i386_ksyms_32.c
15296 @@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
15297 EXPORT_SYMBOL(cmpxchg8b_emu);
15298 #endif
15299
15300 +EXPORT_SYMBOL_GPL(cpu_gdt_table);
15301 +
15302 /* Networking helper routines. */
15303 EXPORT_SYMBOL(csum_partial_copy_generic);
15304 +EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
15305 +EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
15306
15307 EXPORT_SYMBOL(__get_user_1);
15308 EXPORT_SYMBOL(__get_user_2);
15309 @@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
15310
15311 EXPORT_SYMBOL(csum_partial);
15312 EXPORT_SYMBOL(empty_zero_page);
15313 +
15314 +#ifdef CONFIG_PAX_KERNEXEC
15315 +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
15316 +#endif
15317 diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
15318 index 6104852..6114160 100644
15319 --- a/arch/x86/kernel/i8259.c
15320 +++ b/arch/x86/kernel/i8259.c
15321 @@ -210,7 +210,7 @@ spurious_8259A_irq:
15322 "spurious 8259A interrupt: IRQ%d.\n", irq);
15323 spurious_irq_mask |= irqmask;
15324 }
15325 - atomic_inc(&irq_err_count);
15326 + atomic_inc_unchecked(&irq_err_count);
15327 /*
15328 * Theoretically we do not have to handle this IRQ,
15329 * but in Linux this does not cause problems and is
15330 diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
15331 index 43e9ccf..44ccf6f 100644
15332 --- a/arch/x86/kernel/init_task.c
15333 +++ b/arch/x86/kernel/init_task.c
15334 @@ -20,8 +20,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
15335 * way process stacks are handled. This is done by having a special
15336 * "init_task" linker map entry..
15337 */
15338 -union thread_union init_thread_union __init_task_data =
15339 - { INIT_THREAD_INFO(init_task) };
15340 +union thread_union init_thread_union __init_task_data;
15341
15342 /*
15343 * Initial task structure.
15344 @@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
15345 * section. Since TSS's are completely CPU-local, we want them
15346 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
15347 */
15348 -DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
15349 -
15350 +struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
15351 +EXPORT_SYMBOL(init_tss);
15352 diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
15353 index 8c96897..be66bfa 100644
15354 --- a/arch/x86/kernel/ioport.c
15355 +++ b/arch/x86/kernel/ioport.c
15356 @@ -6,6 +6,7 @@
15357 #include <linux/sched.h>
15358 #include <linux/kernel.h>
15359 #include <linux/capability.h>
15360 +#include <linux/security.h>
15361 #include <linux/errno.h>
15362 #include <linux/types.h>
15363 #include <linux/ioport.h>
15364 @@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
15365
15366 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
15367 return -EINVAL;
15368 +#ifdef CONFIG_GRKERNSEC_IO
15369 + if (turn_on && grsec_disable_privio) {
15370 + gr_handle_ioperm();
15371 + return -EPERM;
15372 + }
15373 +#endif
15374 if (turn_on && !capable(CAP_SYS_RAWIO))
15375 return -EPERM;
15376
15377 @@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
15378 * because the ->io_bitmap_max value must match the bitmap
15379 * contents:
15380 */
15381 - tss = &per_cpu(init_tss, get_cpu());
15382 + tss = init_tss + get_cpu();
15383
15384 if (turn_on)
15385 bitmap_clear(t->io_bitmap_ptr, from, num);
15386 @@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct pt_regs *regs)
15387 return -EINVAL;
15388 /* Trying to gain more privileges? */
15389 if (level > old) {
15390 +#ifdef CONFIG_GRKERNSEC_IO
15391 + if (grsec_disable_privio) {
15392 + gr_handle_iopl();
15393 + return -EPERM;
15394 + }
15395 +#endif
15396 if (!capable(CAP_SYS_RAWIO))
15397 return -EPERM;
15398 }
15399 diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
15400 index 429e0c9..17b3ece 100644
15401 --- a/arch/x86/kernel/irq.c
15402 +++ b/arch/x86/kernel/irq.c
15403 @@ -18,7 +18,7 @@
15404 #include <asm/mce.h>
15405 #include <asm/hw_irq.h>
15406
15407 -atomic_t irq_err_count;
15408 +atomic_unchecked_t irq_err_count;
15409
15410 /* Function pointer for generic interrupt vector handling */
15411 void (*x86_platform_ipi_callback)(void) = NULL;
15412 @@ -117,9 +117,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
15413 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
15414 seq_printf(p, " Machine check polls\n");
15415 #endif
15416 - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
15417 + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
15418 #if defined(CONFIG_X86_IO_APIC)
15419 - seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
15420 + seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
15421 #endif
15422 return 0;
15423 }
15424 @@ -159,10 +159,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
15425
15426 u64 arch_irq_stat(void)
15427 {
15428 - u64 sum = atomic_read(&irq_err_count);
15429 + u64 sum = atomic_read_unchecked(&irq_err_count);
15430
15431 #ifdef CONFIG_X86_IO_APIC
15432 - sum += atomic_read(&irq_mis_count);
15433 + sum += atomic_read_unchecked(&irq_mis_count);
15434 #endif
15435 return sum;
15436 }
15437 diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
15438 index 7209070..cbcd71a 100644
15439 --- a/arch/x86/kernel/irq_32.c
15440 +++ b/arch/x86/kernel/irq_32.c
15441 @@ -36,7 +36,7 @@ static int check_stack_overflow(void)
15442 __asm__ __volatile__("andl %%esp,%0" :
15443 "=r" (sp) : "0" (THREAD_SIZE - 1));
15444
15445 - return sp < (sizeof(struct thread_info) + STACK_WARN);
15446 + return sp < STACK_WARN;
15447 }
15448
15449 static void print_stack_overflow(void)
15450 @@ -54,8 +54,8 @@ static inline void print_stack_overflow(void) { }
15451 * per-CPU IRQ handling contexts (thread information and stack)
15452 */
15453 union irq_ctx {
15454 - struct thread_info tinfo;
15455 - u32 stack[THREAD_SIZE/sizeof(u32)];
15456 + unsigned long previous_esp;
15457 + u32 stack[THREAD_SIZE/sizeof(u32)];
15458 } __attribute__((aligned(THREAD_SIZE)));
15459
15460 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
15461 @@ -75,10 +75,9 @@ static void call_on_stack(void *func, void *stack)
15462 static inline int
15463 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15464 {
15465 - union irq_ctx *curctx, *irqctx;
15466 + union irq_ctx *irqctx;
15467 u32 *isp, arg1, arg2;
15468
15469 - curctx = (union irq_ctx *) current_thread_info();
15470 irqctx = __this_cpu_read(hardirq_ctx);
15471
15472 /*
15473 @@ -87,21 +86,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15474 * handler) we can't do that and just have to keep using the
15475 * current stack (which is the irq stack already after all)
15476 */
15477 - if (unlikely(curctx == irqctx))
15478 + if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
15479 return 0;
15480
15481 /* build the stack frame on the IRQ stack */
15482 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
15483 - irqctx->tinfo.task = curctx->tinfo.task;
15484 - irqctx->tinfo.previous_esp = current_stack_pointer;
15485 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
15486 + irqctx->previous_esp = current_stack_pointer;
15487
15488 - /*
15489 - * Copy the softirq bits in preempt_count so that the
15490 - * softirq checks work in the hardirq context.
15491 - */
15492 - irqctx->tinfo.preempt_count =
15493 - (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
15494 - (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
15495 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15496 + __set_fs(MAKE_MM_SEG(0));
15497 +#endif
15498
15499 if (unlikely(overflow))
15500 call_on_stack(print_stack_overflow, isp);
15501 @@ -113,6 +107,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15502 : "0" (irq), "1" (desc), "2" (isp),
15503 "D" (desc->handle_irq)
15504 : "memory", "cc", "ecx");
15505 +
15506 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15507 + __set_fs(current_thread_info()->addr_limit);
15508 +#endif
15509 +
15510 return 1;
15511 }
15512
15513 @@ -121,29 +120,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15514 */
15515 void __cpuinit irq_ctx_init(int cpu)
15516 {
15517 - union irq_ctx *irqctx;
15518 -
15519 if (per_cpu(hardirq_ctx, cpu))
15520 return;
15521
15522 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
15523 - THREAD_FLAGS,
15524 - THREAD_ORDER));
15525 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
15526 - irqctx->tinfo.cpu = cpu;
15527 - irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
15528 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
15529 -
15530 - per_cpu(hardirq_ctx, cpu) = irqctx;
15531 -
15532 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
15533 - THREAD_FLAGS,
15534 - THREAD_ORDER));
15535 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
15536 - irqctx->tinfo.cpu = cpu;
15537 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
15538 -
15539 - per_cpu(softirq_ctx, cpu) = irqctx;
15540 + per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
15541 + per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
15542
15543 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
15544 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
15545 @@ -152,7 +133,6 @@ void __cpuinit irq_ctx_init(int cpu)
15546 asmlinkage void do_softirq(void)
15547 {
15548 unsigned long flags;
15549 - struct thread_info *curctx;
15550 union irq_ctx *irqctx;
15551 u32 *isp;
15552
15553 @@ -162,15 +142,22 @@ asmlinkage void do_softirq(void)
15554 local_irq_save(flags);
15555
15556 if (local_softirq_pending()) {
15557 - curctx = current_thread_info();
15558 irqctx = __this_cpu_read(softirq_ctx);
15559 - irqctx->tinfo.task = curctx->task;
15560 - irqctx->tinfo.previous_esp = current_stack_pointer;
15561 + irqctx->previous_esp = current_stack_pointer;
15562
15563 /* build the stack frame on the softirq stack */
15564 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
15565 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
15566 +
15567 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15568 + __set_fs(MAKE_MM_SEG(0));
15569 +#endif
15570
15571 call_on_stack(__do_softirq, isp);
15572 +
15573 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15574 + __set_fs(current_thread_info()->addr_limit);
15575 +#endif
15576 +
15577 /*
15578 * Shouldn't happen, we returned above if in_interrupt():
15579 */
15580 diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
15581 index 69bca46..0bac999 100644
15582 --- a/arch/x86/kernel/irq_64.c
15583 +++ b/arch/x86/kernel/irq_64.c
15584 @@ -38,7 +38,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
15585 #ifdef CONFIG_DEBUG_STACKOVERFLOW
15586 u64 curbase = (u64)task_stack_page(current);
15587
15588 - if (user_mode_vm(regs))
15589 + if (user_mode(regs))
15590 return;
15591
15592 WARN_ONCE(regs->sp >= curbase &&
15593 diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
15594 index faba577..93b9e71 100644
15595 --- a/arch/x86/kernel/kgdb.c
15596 +++ b/arch/x86/kernel/kgdb.c
15597 @@ -124,11 +124,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
15598 #ifdef CONFIG_X86_32
15599 switch (regno) {
15600 case GDB_SS:
15601 - if (!user_mode_vm(regs))
15602 + if (!user_mode(regs))
15603 *(unsigned long *)mem = __KERNEL_DS;
15604 break;
15605 case GDB_SP:
15606 - if (!user_mode_vm(regs))
15607 + if (!user_mode(regs))
15608 *(unsigned long *)mem = kernel_stack_pointer(regs);
15609 break;
15610 case GDB_GS:
15611 @@ -473,12 +473,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
15612 case 'k':
15613 /* clear the trace bit */
15614 linux_regs->flags &= ~X86_EFLAGS_TF;
15615 - atomic_set(&kgdb_cpu_doing_single_step, -1);
15616 + atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
15617
15618 /* set the trace bit if we're stepping */
15619 if (remcomInBuffer[0] == 's') {
15620 linux_regs->flags |= X86_EFLAGS_TF;
15621 - atomic_set(&kgdb_cpu_doing_single_step,
15622 + atomic_set_unchecked(&kgdb_cpu_doing_single_step,
15623 raw_smp_processor_id());
15624 }
15625
15626 @@ -543,7 +543,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
15627
15628 switch (cmd) {
15629 case DIE_DEBUG:
15630 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
15631 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
15632 if (user_mode(regs))
15633 return single_step_cont(regs, args);
15634 break;
15635 diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
15636 index 7da647d..5d3c4c1 100644
15637 --- a/arch/x86/kernel/kprobes.c
15638 +++ b/arch/x86/kernel/kprobes.c
15639 @@ -118,8 +118,11 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
15640 } __attribute__((packed)) *insn;
15641
15642 insn = (struct __arch_relative_insn *)from;
15643 +
15644 + pax_open_kernel();
15645 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
15646 insn->op = op;
15647 + pax_close_kernel();
15648 }
15649
15650 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
15651 @@ -156,7 +159,7 @@ static int __kprobes can_boost(kprobe_opcode_t *opcodes)
15652 kprobe_opcode_t opcode;
15653 kprobe_opcode_t *orig_opcodes = opcodes;
15654
15655 - if (search_exception_tables((unsigned long)opcodes))
15656 + if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
15657 return 0; /* Page fault may occur on this address. */
15658
15659 retry:
15660 @@ -317,7 +320,9 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
15661 }
15662 }
15663 insn_get_length(&insn);
15664 + pax_open_kernel();
15665 memcpy(dest, insn.kaddr, insn.length);
15666 + pax_close_kernel();
15667
15668 #ifdef CONFIG_X86_64
15669 if (insn_rip_relative(&insn)) {
15670 @@ -341,7 +346,9 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
15671 (u8 *) dest;
15672 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
15673 disp = (u8 *) dest + insn_offset_displacement(&insn);
15674 + pax_open_kernel();
15675 *(s32 *) disp = (s32) newdisp;
15676 + pax_close_kernel();
15677 }
15678 #endif
15679 return insn.length;
15680 @@ -355,12 +362,12 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p)
15681 */
15682 __copy_instruction(p->ainsn.insn, p->addr, 0);
15683
15684 - if (can_boost(p->addr))
15685 + if (can_boost(ktla_ktva(p->addr)))
15686 p->ainsn.boostable = 0;
15687 else
15688 p->ainsn.boostable = -1;
15689
15690 - p->opcode = *p->addr;
15691 + p->opcode = *(ktla_ktva(p->addr));
15692 }
15693
15694 int __kprobes arch_prepare_kprobe(struct kprobe *p)
15695 @@ -477,7 +484,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
15696 * nor set current_kprobe, because it doesn't use single
15697 * stepping.
15698 */
15699 - regs->ip = (unsigned long)p->ainsn.insn;
15700 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
15701 preempt_enable_no_resched();
15702 return;
15703 }
15704 @@ -496,7 +503,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
15705 if (p->opcode == BREAKPOINT_INSTRUCTION)
15706 regs->ip = (unsigned long)p->addr;
15707 else
15708 - regs->ip = (unsigned long)p->ainsn.insn;
15709 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
15710 }
15711
15712 /*
15713 @@ -575,7 +582,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
15714 setup_singlestep(p, regs, kcb, 0);
15715 return 1;
15716 }
15717 - } else if (*addr != BREAKPOINT_INSTRUCTION) {
15718 + } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
15719 /*
15720 * The breakpoint instruction was removed right
15721 * after we hit it. Another cpu has removed
15722 @@ -683,6 +690,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
15723 " movq %rax, 152(%rsp)\n"
15724 RESTORE_REGS_STRING
15725 " popfq\n"
15726 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
15727 + " btsq $63,(%rsp)\n"
15728 +#endif
15729 #else
15730 " pushf\n"
15731 SAVE_REGS_STRING
15732 @@ -820,7 +830,7 @@ static void __kprobes resume_execution(struct kprobe *p,
15733 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
15734 {
15735 unsigned long *tos = stack_addr(regs);
15736 - unsigned long copy_ip = (unsigned long)p->ainsn.insn;
15737 + unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
15738 unsigned long orig_ip = (unsigned long)p->addr;
15739 kprobe_opcode_t *insn = p->ainsn.insn;
15740
15741 @@ -1002,7 +1012,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
15742 struct die_args *args = data;
15743 int ret = NOTIFY_DONE;
15744
15745 - if (args->regs && user_mode_vm(args->regs))
15746 + if (args->regs && user_mode(args->regs))
15747 return ret;
15748
15749 switch (val) {
15750 @@ -1384,7 +1394,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
15751 * Verify if the address gap is in 2GB range, because this uses
15752 * a relative jump.
15753 */
15754 - rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
15755 + rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
15756 if (abs(rel) > 0x7fffffff)
15757 return -ERANGE;
15758
15759 @@ -1405,11 +1415,11 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
15760 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
15761
15762 /* Set probe function call */
15763 - synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
15764 + synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
15765
15766 /* Set returning jmp instruction at the tail of out-of-line buffer */
15767 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
15768 - (u8 *)op->kp.addr + op->optinsn.size);
15769 + (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
15770
15771 flush_icache_range((unsigned long) buf,
15772 (unsigned long) buf + TMPL_END_IDX +
15773 @@ -1431,7 +1441,7 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
15774 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
15775
15776 /* Backup instructions which will be replaced by jump address */
15777 - memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
15778 + memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
15779 RELATIVE_ADDR_SIZE);
15780
15781 insn_buf[0] = RELATIVEJUMP_OPCODE;
15782 diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
15783 index a9c2116..a52d4fc 100644
15784 --- a/arch/x86/kernel/kvm.c
15785 +++ b/arch/x86/kernel/kvm.c
15786 @@ -437,6 +437,7 @@ static void __init paravirt_ops_setup(void)
15787 pv_mmu_ops.set_pud = kvm_set_pud;
15788 #if PAGETABLE_LEVELS == 4
15789 pv_mmu_ops.set_pgd = kvm_set_pgd;
15790 + pv_mmu_ops.set_pgd_batched = kvm_set_pgd;
15791 #endif
15792 #endif
15793 pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
15794 diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
15795 index ea69726..604d066 100644
15796 --- a/arch/x86/kernel/ldt.c
15797 +++ b/arch/x86/kernel/ldt.c
15798 @@ -67,13 +67,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
15799 if (reload) {
15800 #ifdef CONFIG_SMP
15801 preempt_disable();
15802 - load_LDT(pc);
15803 + load_LDT_nolock(pc);
15804 if (!cpumask_equal(mm_cpumask(current->mm),
15805 cpumask_of(smp_processor_id())))
15806 smp_call_function(flush_ldt, current->mm, 1);
15807 preempt_enable();
15808 #else
15809 - load_LDT(pc);
15810 + load_LDT_nolock(pc);
15811 #endif
15812 }
15813 if (oldsize) {
15814 @@ -95,7 +95,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
15815 return err;
15816
15817 for (i = 0; i < old->size; i++)
15818 - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
15819 + write_ldt_entry(new->ldt, i, old->ldt + i);
15820 return 0;
15821 }
15822
15823 @@ -116,6 +116,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
15824 retval = copy_ldt(&mm->context, &old_mm->context);
15825 mutex_unlock(&old_mm->context.lock);
15826 }
15827 +
15828 + if (tsk == current) {
15829 + mm->context.vdso = 0;
15830 +
15831 +#ifdef CONFIG_X86_32
15832 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
15833 + mm->context.user_cs_base = 0UL;
15834 + mm->context.user_cs_limit = ~0UL;
15835 +
15836 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
15837 + cpus_clear(mm->context.cpu_user_cs_mask);
15838 +#endif
15839 +
15840 +#endif
15841 +#endif
15842 +
15843 + }
15844 +
15845 return retval;
15846 }
15847
15848 @@ -230,6 +248,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
15849 }
15850 }
15851
15852 +#ifdef CONFIG_PAX_SEGMEXEC
15853 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
15854 + error = -EINVAL;
15855 + goto out_unlock;
15856 + }
15857 +#endif
15858 +
15859 fill_ldt(&ldt, &ldt_info);
15860 if (oldmode)
15861 ldt.avl = 0;
15862 diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
15863 index a3fa43b..8966f4c 100644
15864 --- a/arch/x86/kernel/machine_kexec_32.c
15865 +++ b/arch/x86/kernel/machine_kexec_32.c
15866 @@ -27,7 +27,7 @@
15867 #include <asm/cacheflush.h>
15868 #include <asm/debugreg.h>
15869
15870 -static void set_idt(void *newidt, __u16 limit)
15871 +static void set_idt(struct desc_struct *newidt, __u16 limit)
15872 {
15873 struct desc_ptr curidt;
15874
15875 @@ -39,7 +39,7 @@ static void set_idt(void *newidt, __u16 limit)
15876 }
15877
15878
15879 -static void set_gdt(void *newgdt, __u16 limit)
15880 +static void set_gdt(struct desc_struct *newgdt, __u16 limit)
15881 {
15882 struct desc_ptr curgdt;
15883
15884 @@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
15885 }
15886
15887 control_page = page_address(image->control_code_page);
15888 - memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
15889 + memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
15890
15891 relocate_kernel_ptr = control_page;
15892 page_list[PA_CONTROL_PAGE] = __pa(control_page);
15893 diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
15894 index 3ca42d0..7cff8cc 100644
15895 --- a/arch/x86/kernel/microcode_intel.c
15896 +++ b/arch/x86/kernel/microcode_intel.c
15897 @@ -436,13 +436,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
15898
15899 static int get_ucode_user(void *to, const void *from, size_t n)
15900 {
15901 - return copy_from_user(to, from, n);
15902 + return copy_from_user(to, (const void __force_user *)from, n);
15903 }
15904
15905 static enum ucode_state
15906 request_microcode_user(int cpu, const void __user *buf, size_t size)
15907 {
15908 - return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
15909 + return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
15910 }
15911
15912 static void microcode_fini_cpu(int cpu)
15913 diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
15914 index 925179f..267ac7a 100644
15915 --- a/arch/x86/kernel/module.c
15916 +++ b/arch/x86/kernel/module.c
15917 @@ -36,15 +36,60 @@
15918 #define DEBUGP(fmt...)
15919 #endif
15920
15921 -void *module_alloc(unsigned long size)
15922 +static inline void *__module_alloc(unsigned long size, pgprot_t prot)
15923 {
15924 - if (PAGE_ALIGN(size) > MODULES_LEN)
15925 + if (size == 0 || PAGE_ALIGN(size) > MODULES_LEN)
15926 return NULL;
15927 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
15928 - GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
15929 + GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
15930 -1, __builtin_return_address(0));
15931 }
15932
15933 +void *module_alloc(unsigned long size)
15934 +{
15935 +
15936 +#ifdef CONFIG_PAX_KERNEXEC
15937 + return __module_alloc(size, PAGE_KERNEL);
15938 +#else
15939 + return __module_alloc(size, PAGE_KERNEL_EXEC);
15940 +#endif
15941 +
15942 +}
15943 +
15944 +#ifdef CONFIG_PAX_KERNEXEC
15945 +#ifdef CONFIG_X86_32
15946 +void *module_alloc_exec(unsigned long size)
15947 +{
15948 + struct vm_struct *area;
15949 +
15950 + if (size == 0)
15951 + return NULL;
15952 +
15953 + area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
15954 + return area ? area->addr : NULL;
15955 +}
15956 +EXPORT_SYMBOL(module_alloc_exec);
15957 +
15958 +void module_free_exec(struct module *mod, void *module_region)
15959 +{
15960 + vunmap(module_region);
15961 +}
15962 +EXPORT_SYMBOL(module_free_exec);
15963 +#else
15964 +void module_free_exec(struct module *mod, void *module_region)
15965 +{
15966 + module_free(mod, module_region);
15967 +}
15968 +EXPORT_SYMBOL(module_free_exec);
15969 +
15970 +void *module_alloc_exec(unsigned long size)
15971 +{
15972 + return __module_alloc(size, PAGE_KERNEL_RX);
15973 +}
15974 +EXPORT_SYMBOL(module_alloc_exec);
15975 +#endif
15976 +#endif
15977 +
15978 #ifdef CONFIG_X86_32
15979 int apply_relocate(Elf32_Shdr *sechdrs,
15980 const char *strtab,
15981 @@ -55,14 +100,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
15982 unsigned int i;
15983 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
15984 Elf32_Sym *sym;
15985 - uint32_t *location;
15986 + uint32_t *plocation, location;
15987
15988 DEBUGP("Applying relocate section %u to %u\n", relsec,
15989 sechdrs[relsec].sh_info);
15990 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
15991 /* This is where to make the change */
15992 - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
15993 - + rel[i].r_offset;
15994 + plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
15995 + location = (uint32_t)plocation;
15996 + if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
15997 + plocation = ktla_ktva((void *)plocation);
15998 /* This is the symbol it is referring to. Note that all
15999 undefined symbols have been resolved. */
16000 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
16001 @@ -71,11 +118,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
16002 switch (ELF32_R_TYPE(rel[i].r_info)) {
16003 case R_386_32:
16004 /* We add the value into the location given */
16005 - *location += sym->st_value;
16006 + pax_open_kernel();
16007 + *plocation += sym->st_value;
16008 + pax_close_kernel();
16009 break;
16010 case R_386_PC32:
16011 /* Add the value, subtract its postition */
16012 - *location += sym->st_value - (uint32_t)location;
16013 + pax_open_kernel();
16014 + *plocation += sym->st_value - location;
16015 + pax_close_kernel();
16016 break;
16017 default:
16018 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
16019 @@ -120,21 +171,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
16020 case R_X86_64_NONE:
16021 break;
16022 case R_X86_64_64:
16023 + pax_open_kernel();
16024 *(u64 *)loc = val;
16025 + pax_close_kernel();
16026 break;
16027 case R_X86_64_32:
16028 + pax_open_kernel();
16029 *(u32 *)loc = val;
16030 + pax_close_kernel();
16031 if (val != *(u32 *)loc)
16032 goto overflow;
16033 break;
16034 case R_X86_64_32S:
16035 + pax_open_kernel();
16036 *(s32 *)loc = val;
16037 + pax_close_kernel();
16038 if ((s64)val != *(s32 *)loc)
16039 goto overflow;
16040 break;
16041 case R_X86_64_PC32:
16042 val -= (u64)loc;
16043 + pax_open_kernel();
16044 *(u32 *)loc = val;
16045 + pax_close_kernel();
16046 +
16047 #if 0
16048 if ((s64)val != *(s32 *)loc)
16049 goto overflow;
16050 diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
16051 index e88f37b..1353db6 100644
16052 --- a/arch/x86/kernel/nmi.c
16053 +++ b/arch/x86/kernel/nmi.c
16054 @@ -408,6 +408,17 @@ static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
16055 dotraplinkage notrace __kprobes void
16056 do_nmi(struct pt_regs *regs, long error_code)
16057 {
16058 +
16059 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16060 + if (!user_mode(regs)) {
16061 + unsigned long cs = regs->cs & 0xFFFF;
16062 + unsigned long ip = ktva_ktla(regs->ip);
16063 +
16064 + if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
16065 + regs->ip = ip;
16066 + }
16067 +#endif
16068 +
16069 nmi_enter();
16070
16071 inc_irq_stat(__nmi_count);
16072 diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
16073 index 676b8c7..870ba04 100644
16074 --- a/arch/x86/kernel/paravirt-spinlocks.c
16075 +++ b/arch/x86/kernel/paravirt-spinlocks.c
16076 @@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
16077 arch_spin_lock(lock);
16078 }
16079
16080 -struct pv_lock_ops pv_lock_ops = {
16081 +struct pv_lock_ops pv_lock_ops __read_only = {
16082 #ifdef CONFIG_SMP
16083 .spin_is_locked = __ticket_spin_is_locked,
16084 .spin_is_contended = __ticket_spin_is_contended,
16085 diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
16086 index d90272e..6bb013b 100644
16087 --- a/arch/x86/kernel/paravirt.c
16088 +++ b/arch/x86/kernel/paravirt.c
16089 @@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
16090 {
16091 return x;
16092 }
16093 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
16094 +PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
16095 +#endif
16096
16097 void __init default_banner(void)
16098 {
16099 @@ -145,15 +148,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
16100 if (opfunc == NULL)
16101 /* If there's no function, patch it with a ud2a (BUG) */
16102 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
16103 - else if (opfunc == _paravirt_nop)
16104 + else if (opfunc == (void *)_paravirt_nop)
16105 /* If the operation is a nop, then nop the callsite */
16106 ret = paravirt_patch_nop();
16107
16108 /* identity functions just return their single argument */
16109 - else if (opfunc == _paravirt_ident_32)
16110 + else if (opfunc == (void *)_paravirt_ident_32)
16111 ret = paravirt_patch_ident_32(insnbuf, len);
16112 - else if (opfunc == _paravirt_ident_64)
16113 + else if (opfunc == (void *)_paravirt_ident_64)
16114 ret = paravirt_patch_ident_64(insnbuf, len);
16115 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
16116 + else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
16117 + ret = paravirt_patch_ident_64(insnbuf, len);
16118 +#endif
16119
16120 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
16121 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
16122 @@ -178,7 +185,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
16123 if (insn_len > len || start == NULL)
16124 insn_len = len;
16125 else
16126 - memcpy(insnbuf, start, insn_len);
16127 + memcpy(insnbuf, ktla_ktva(start), insn_len);
16128
16129 return insn_len;
16130 }
16131 @@ -302,7 +309,7 @@ void arch_flush_lazy_mmu_mode(void)
16132 preempt_enable();
16133 }
16134
16135 -struct pv_info pv_info = {
16136 +struct pv_info pv_info __read_only = {
16137 .name = "bare hardware",
16138 .paravirt_enabled = 0,
16139 .kernel_rpl = 0,
16140 @@ -313,16 +320,16 @@ struct pv_info pv_info = {
16141 #endif
16142 };
16143
16144 -struct pv_init_ops pv_init_ops = {
16145 +struct pv_init_ops pv_init_ops __read_only = {
16146 .patch = native_patch,
16147 };
16148
16149 -struct pv_time_ops pv_time_ops = {
16150 +struct pv_time_ops pv_time_ops __read_only = {
16151 .sched_clock = native_sched_clock,
16152 .steal_clock = native_steal_clock,
16153 };
16154
16155 -struct pv_irq_ops pv_irq_ops = {
16156 +struct pv_irq_ops pv_irq_ops __read_only = {
16157 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
16158 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
16159 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
16160 @@ -334,7 +341,7 @@ struct pv_irq_ops pv_irq_ops = {
16161 #endif
16162 };
16163
16164 -struct pv_cpu_ops pv_cpu_ops = {
16165 +struct pv_cpu_ops pv_cpu_ops __read_only = {
16166 .cpuid = native_cpuid,
16167 .get_debugreg = native_get_debugreg,
16168 .set_debugreg = native_set_debugreg,
16169 @@ -395,21 +402,26 @@ struct pv_cpu_ops pv_cpu_ops = {
16170 .end_context_switch = paravirt_nop,
16171 };
16172
16173 -struct pv_apic_ops pv_apic_ops = {
16174 +struct pv_apic_ops pv_apic_ops __read_only = {
16175 #ifdef CONFIG_X86_LOCAL_APIC
16176 .startup_ipi_hook = paravirt_nop,
16177 #endif
16178 };
16179
16180 -#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
16181 +#ifdef CONFIG_X86_32
16182 +#ifdef CONFIG_X86_PAE
16183 +/* 64-bit pagetable entries */
16184 +#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
16185 +#else
16186 /* 32-bit pagetable entries */
16187 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
16188 +#endif
16189 #else
16190 /* 64-bit pagetable entries */
16191 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
16192 #endif
16193
16194 -struct pv_mmu_ops pv_mmu_ops = {
16195 +struct pv_mmu_ops pv_mmu_ops __read_only = {
16196
16197 .read_cr2 = native_read_cr2,
16198 .write_cr2 = native_write_cr2,
16199 @@ -459,6 +471,7 @@ struct pv_mmu_ops pv_mmu_ops = {
16200 .make_pud = PTE_IDENT,
16201
16202 .set_pgd = native_set_pgd,
16203 + .set_pgd_batched = native_set_pgd_batched,
16204 #endif
16205 #endif /* PAGETABLE_LEVELS >= 3 */
16206
16207 @@ -478,6 +491,12 @@ struct pv_mmu_ops pv_mmu_ops = {
16208 },
16209
16210 .set_fixmap = native_set_fixmap,
16211 +
16212 +#ifdef CONFIG_PAX_KERNEXEC
16213 + .pax_open_kernel = native_pax_open_kernel,
16214 + .pax_close_kernel = native_pax_close_kernel,
16215 +#endif
16216 +
16217 };
16218
16219 EXPORT_SYMBOL_GPL(pv_time_ops);
16220 diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
16221 index 35ccf75..7a15747 100644
16222 --- a/arch/x86/kernel/pci-iommu_table.c
16223 +++ b/arch/x86/kernel/pci-iommu_table.c
16224 @@ -2,7 +2,7 @@
16225 #include <asm/iommu_table.h>
16226 #include <linux/string.h>
16227 #include <linux/kallsyms.h>
16228 -
16229 +#include <linux/sched.h>
16230
16231 #define DEBUG 1
16232
16233 diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
16234 index ee5d4fb..426649b 100644
16235 --- a/arch/x86/kernel/process.c
16236 +++ b/arch/x86/kernel/process.c
16237 @@ -48,16 +48,33 @@ void free_thread_xstate(struct task_struct *tsk)
16238
16239 void free_thread_info(struct thread_info *ti)
16240 {
16241 - free_thread_xstate(ti->task);
16242 free_pages((unsigned long)ti, THREAD_ORDER);
16243 }
16244
16245 +static struct kmem_cache *task_struct_cachep;
16246 +
16247 void arch_task_cache_init(void)
16248 {
16249 - task_xstate_cachep =
16250 - kmem_cache_create("task_xstate", xstate_size,
16251 + /* create a slab on which task_structs can be allocated */
16252 + task_struct_cachep =
16253 + kmem_cache_create("task_struct", sizeof(struct task_struct),
16254 + ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
16255 +
16256 + task_xstate_cachep =
16257 + kmem_cache_create("task_xstate", xstate_size,
16258 __alignof__(union thread_xstate),
16259 - SLAB_PANIC | SLAB_NOTRACK, NULL);
16260 + SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
16261 +}
16262 +
16263 +struct task_struct *alloc_task_struct_node(int node)
16264 +{
16265 + return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
16266 +}
16267 +
16268 +void free_task_struct(struct task_struct *task)
16269 +{
16270 + free_thread_xstate(task);
16271 + kmem_cache_free(task_struct_cachep, task);
16272 }
16273
16274 /*
16275 @@ -70,7 +87,7 @@ void exit_thread(void)
16276 unsigned long *bp = t->io_bitmap_ptr;
16277
16278 if (bp) {
16279 - struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
16280 + struct tss_struct *tss = init_tss + get_cpu();
16281
16282 t->io_bitmap_ptr = NULL;
16283 clear_thread_flag(TIF_IO_BITMAP);
16284 @@ -106,7 +123,7 @@ void show_regs_common(void)
16285
16286 printk(KERN_CONT "\n");
16287 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
16288 - current->pid, current->comm, print_tainted(),
16289 + task_pid_nr(current), current->comm, print_tainted(),
16290 init_utsname()->release,
16291 (int)strcspn(init_utsname()->version, " "),
16292 init_utsname()->version);
16293 @@ -120,6 +137,9 @@ void flush_thread(void)
16294 {
16295 struct task_struct *tsk = current;
16296
16297 +#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
16298 + loadsegment(gs, 0);
16299 +#endif
16300 flush_ptrace_hw_breakpoint(tsk);
16301 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
16302 /*
16303 @@ -282,10 +302,10 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
16304 regs.di = (unsigned long) arg;
16305
16306 #ifdef CONFIG_X86_32
16307 - regs.ds = __USER_DS;
16308 - regs.es = __USER_DS;
16309 + regs.ds = __KERNEL_DS;
16310 + regs.es = __KERNEL_DS;
16311 regs.fs = __KERNEL_PERCPU;
16312 - regs.gs = __KERNEL_STACK_CANARY;
16313 + savesegment(gs, regs.gs);
16314 #else
16315 regs.ss = __KERNEL_DS;
16316 #endif
16317 @@ -411,7 +431,7 @@ bool set_pm_idle_to_default(void)
16318
16319 return ret;
16320 }
16321 -void stop_this_cpu(void *dummy)
16322 +__noreturn void stop_this_cpu(void *dummy)
16323 {
16324 local_irq_disable();
16325 /*
16326 @@ -653,16 +673,37 @@ static int __init idle_setup(char *str)
16327 }
16328 early_param("idle", idle_setup);
16329
16330 -unsigned long arch_align_stack(unsigned long sp)
16331 +#ifdef CONFIG_PAX_RANDKSTACK
16332 +void pax_randomize_kstack(struct pt_regs *regs)
16333 {
16334 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
16335 - sp -= get_random_int() % 8192;
16336 - return sp & ~0xf;
16337 -}
16338 + struct thread_struct *thread = &current->thread;
16339 + unsigned long time;
16340
16341 -unsigned long arch_randomize_brk(struct mm_struct *mm)
16342 -{
16343 - unsigned long range_end = mm->brk + 0x02000000;
16344 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
16345 -}
16346 + if (!randomize_va_space)
16347 + return;
16348 +
16349 + if (v8086_mode(regs))
16350 + return;
16351
16352 + rdtscl(time);
16353 +
16354 + /* P4 seems to return a 0 LSB, ignore it */
16355 +#ifdef CONFIG_MPENTIUM4
16356 + time &= 0x3EUL;
16357 + time <<= 2;
16358 +#elif defined(CONFIG_X86_64)
16359 + time &= 0xFUL;
16360 + time <<= 4;
16361 +#else
16362 + time &= 0x1FUL;
16363 + time <<= 3;
16364 +#endif
16365 +
16366 + thread->sp0 ^= time;
16367 + load_sp0(init_tss + smp_processor_id(), thread);
16368 +
16369 +#ifdef CONFIG_X86_64
16370 + percpu_write(kernel_stack, thread->sp0);
16371 +#endif
16372 +}
16373 +#endif
16374 diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
16375 index 795b79f..063767a 100644
16376 --- a/arch/x86/kernel/process_32.c
16377 +++ b/arch/x86/kernel/process_32.c
16378 @@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
16379 unsigned long thread_saved_pc(struct task_struct *tsk)
16380 {
16381 return ((unsigned long *)tsk->thread.sp)[3];
16382 +//XXX return tsk->thread.eip;
16383 }
16384
16385 #ifndef CONFIG_SMP
16386 @@ -130,15 +131,14 @@ void __show_regs(struct pt_regs *regs, int all)
16387 unsigned long sp;
16388 unsigned short ss, gs;
16389
16390 - if (user_mode_vm(regs)) {
16391 + if (user_mode(regs)) {
16392 sp = regs->sp;
16393 ss = regs->ss & 0xffff;
16394 - gs = get_user_gs(regs);
16395 } else {
16396 sp = kernel_stack_pointer(regs);
16397 savesegment(ss, ss);
16398 - savesegment(gs, gs);
16399 }
16400 + gs = get_user_gs(regs);
16401
16402 show_regs_common();
16403
16404 @@ -200,13 +200,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
16405 struct task_struct *tsk;
16406 int err;
16407
16408 - childregs = task_pt_regs(p);
16409 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
16410 *childregs = *regs;
16411 childregs->ax = 0;
16412 childregs->sp = sp;
16413
16414 p->thread.sp = (unsigned long) childregs;
16415 p->thread.sp0 = (unsigned long) (childregs+1);
16416 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
16417
16418 p->thread.ip = (unsigned long) ret_from_fork;
16419
16420 @@ -296,7 +297,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16421 struct thread_struct *prev = &prev_p->thread,
16422 *next = &next_p->thread;
16423 int cpu = smp_processor_id();
16424 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
16425 + struct tss_struct *tss = init_tss + cpu;
16426 bool preload_fpu;
16427
16428 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
16429 @@ -331,6 +332,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16430 */
16431 lazy_save_gs(prev->gs);
16432
16433 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16434 + __set_fs(task_thread_info(next_p)->addr_limit);
16435 +#endif
16436 +
16437 /*
16438 * Load the per-thread Thread-Local Storage descriptor.
16439 */
16440 @@ -366,6 +371,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16441 */
16442 arch_end_context_switch(next_p);
16443
16444 + percpu_write(current_task, next_p);
16445 + percpu_write(current_tinfo, &next_p->tinfo);
16446 +
16447 if (preload_fpu)
16448 __math_state_restore();
16449
16450 @@ -375,8 +383,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16451 if (prev->gs | next->gs)
16452 lazy_load_gs(next->gs);
16453
16454 - percpu_write(current_task, next_p);
16455 -
16456 return prev_p;
16457 }
16458
16459 @@ -406,4 +412,3 @@ unsigned long get_wchan(struct task_struct *p)
16460 } while (count++ < 16);
16461 return 0;
16462 }
16463 -
16464 diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
16465 index 3bd7e6e..90b2bcf 100644
16466 --- a/arch/x86/kernel/process_64.c
16467 +++ b/arch/x86/kernel/process_64.c
16468 @@ -89,7 +89,7 @@ static void __exit_idle(void)
16469 void exit_idle(void)
16470 {
16471 /* idle loop has pid 0 */
16472 - if (current->pid)
16473 + if (task_pid_nr(current))
16474 return;
16475 __exit_idle();
16476 }
16477 @@ -264,8 +264,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
16478 struct pt_regs *childregs;
16479 struct task_struct *me = current;
16480
16481 - childregs = ((struct pt_regs *)
16482 - (THREAD_SIZE + task_stack_page(p))) - 1;
16483 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
16484 *childregs = *regs;
16485
16486 childregs->ax = 0;
16487 @@ -277,6 +276,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
16488 p->thread.sp = (unsigned long) childregs;
16489 p->thread.sp0 = (unsigned long) (childregs+1);
16490 p->thread.usersp = me->thread.usersp;
16491 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
16492
16493 set_tsk_thread_flag(p, TIF_FORK);
16494
16495 @@ -379,7 +379,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16496 struct thread_struct *prev = &prev_p->thread;
16497 struct thread_struct *next = &next_p->thread;
16498 int cpu = smp_processor_id();
16499 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
16500 + struct tss_struct *tss = init_tss + cpu;
16501 unsigned fsindex, gsindex;
16502 bool preload_fpu;
16503
16504 @@ -475,10 +475,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16505 prev->usersp = percpu_read(old_rsp);
16506 percpu_write(old_rsp, next->usersp);
16507 percpu_write(current_task, next_p);
16508 + percpu_write(current_tinfo, &next_p->tinfo);
16509
16510 - percpu_write(kernel_stack,
16511 - (unsigned long)task_stack_page(next_p) +
16512 - THREAD_SIZE - KERNEL_STACK_OFFSET);
16513 + percpu_write(kernel_stack, next->sp0);
16514
16515 /*
16516 * Now maybe reload the debug registers and handle I/O bitmaps
16517 @@ -540,12 +539,11 @@ unsigned long get_wchan(struct task_struct *p)
16518 if (!p || p == current || p->state == TASK_RUNNING)
16519 return 0;
16520 stack = (unsigned long)task_stack_page(p);
16521 - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
16522 + if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
16523 return 0;
16524 fp = *(u64 *)(p->thread.sp);
16525 do {
16526 - if (fp < (unsigned long)stack ||
16527 - fp >= (unsigned long)stack+THREAD_SIZE)
16528 + if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
16529 return 0;
16530 ip = *(u64 *)(fp+8);
16531 if (!in_sched_functions(ip))
16532 diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
16533 index 8252879..d3219e0 100644
16534 --- a/arch/x86/kernel/ptrace.c
16535 +++ b/arch/x86/kernel/ptrace.c
16536 @@ -822,7 +822,7 @@ long arch_ptrace(struct task_struct *child, long request,
16537 unsigned long addr, unsigned long data)
16538 {
16539 int ret;
16540 - unsigned long __user *datap = (unsigned long __user *)data;
16541 + unsigned long __user *datap = (__force unsigned long __user *)data;
16542
16543 switch (request) {
16544 /* read the word at location addr in the USER area. */
16545 @@ -907,14 +907,14 @@ long arch_ptrace(struct task_struct *child, long request,
16546 if ((int) addr < 0)
16547 return -EIO;
16548 ret = do_get_thread_area(child, addr,
16549 - (struct user_desc __user *)data);
16550 + (__force struct user_desc __user *) data);
16551 break;
16552
16553 case PTRACE_SET_THREAD_AREA:
16554 if ((int) addr < 0)
16555 return -EIO;
16556 ret = do_set_thread_area(child, addr,
16557 - (struct user_desc __user *)data, 0);
16558 + (__force struct user_desc __user *) data, 0);
16559 break;
16560 #endif
16561
16562 @@ -1331,7 +1331,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
16563 memset(info, 0, sizeof(*info));
16564 info->si_signo = SIGTRAP;
16565 info->si_code = si_code;
16566 - info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
16567 + info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
16568 }
16569
16570 void user_single_step_siginfo(struct task_struct *tsk,
16571 diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
16572 index 42eb330..139955c 100644
16573 --- a/arch/x86/kernel/pvclock.c
16574 +++ b/arch/x86/kernel/pvclock.c
16575 @@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
16576 return pv_tsc_khz;
16577 }
16578
16579 -static atomic64_t last_value = ATOMIC64_INIT(0);
16580 +static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
16581
16582 void pvclock_resume(void)
16583 {
16584 - atomic64_set(&last_value, 0);
16585 + atomic64_set_unchecked(&last_value, 0);
16586 }
16587
16588 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
16589 @@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
16590 * updating at the same time, and one of them could be slightly behind,
16591 * making the assumption that last_value always go forward fail to hold.
16592 */
16593 - last = atomic64_read(&last_value);
16594 + last = atomic64_read_unchecked(&last_value);
16595 do {
16596 if (ret < last)
16597 return last;
16598 - last = atomic64_cmpxchg(&last_value, last, ret);
16599 + last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
16600 } while (unlikely(last != ret));
16601
16602 return ret;
16603 diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
16604 index 37a458b..e63d183 100644
16605 --- a/arch/x86/kernel/reboot.c
16606 +++ b/arch/x86/kernel/reboot.c
16607 @@ -35,7 +35,7 @@ void (*pm_power_off)(void);
16608 EXPORT_SYMBOL(pm_power_off);
16609
16610 static const struct desc_ptr no_idt = {};
16611 -static int reboot_mode;
16612 +static unsigned short reboot_mode;
16613 enum reboot_type reboot_type = BOOT_ACPI;
16614 int reboot_force;
16615
16616 @@ -324,13 +324,17 @@ core_initcall(reboot_init);
16617 extern const unsigned char machine_real_restart_asm[];
16618 extern const u64 machine_real_restart_gdt[3];
16619
16620 -void machine_real_restart(unsigned int type)
16621 +__noreturn void machine_real_restart(unsigned int type)
16622 {
16623 void *restart_va;
16624 unsigned long restart_pa;
16625 - void (*restart_lowmem)(unsigned int);
16626 + void (* __noreturn restart_lowmem)(unsigned int);
16627 u64 *lowmem_gdt;
16628
16629 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
16630 + struct desc_struct *gdt;
16631 +#endif
16632 +
16633 local_irq_disable();
16634
16635 /* Write zero to CMOS register number 0x0f, which the BIOS POST
16636 @@ -356,14 +360,14 @@ void machine_real_restart(unsigned int type)
16637 boot)". This seems like a fairly standard thing that gets set by
16638 REBOOT.COM programs, and the previous reset routine did this
16639 too. */
16640 - *((unsigned short *)0x472) = reboot_mode;
16641 + *(unsigned short *)(__va(0x472)) = reboot_mode;
16642
16643 /* Patch the GDT in the low memory trampoline */
16644 lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
16645
16646 restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
16647 restart_pa = virt_to_phys(restart_va);
16648 - restart_lowmem = (void (*)(unsigned int))restart_pa;
16649 + restart_lowmem = (void *)restart_pa;
16650
16651 /* GDT[0]: GDT self-pointer */
16652 lowmem_gdt[0] =
16653 @@ -374,7 +378,33 @@ void machine_real_restart(unsigned int type)
16654 GDT_ENTRY(0x009b, restart_pa, 0xffff);
16655
16656 /* Jump to the identity-mapped low memory code */
16657 +
16658 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
16659 + gdt = get_cpu_gdt_table(smp_processor_id());
16660 + pax_open_kernel();
16661 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16662 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
16663 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
16664 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
16665 +#endif
16666 +#ifdef CONFIG_PAX_KERNEXEC
16667 + gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
16668 + gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
16669 + gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
16670 + gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
16671 + gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
16672 + gdt[GDT_ENTRY_KERNEL_CS].g = 1;
16673 +#endif
16674 + pax_close_kernel();
16675 +#endif
16676 +
16677 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16678 + asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
16679 + unreachable();
16680 +#else
16681 restart_lowmem(type);
16682 +#endif
16683 +
16684 }
16685 #ifdef CONFIG_APM_MODULE
16686 EXPORT_SYMBOL(machine_real_restart);
16687 @@ -540,7 +570,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
16688 * try to force a triple fault and then cycle between hitting the keyboard
16689 * controller and doing that
16690 */
16691 -static void native_machine_emergency_restart(void)
16692 +__noreturn static void native_machine_emergency_restart(void)
16693 {
16694 int i;
16695 int attempt = 0;
16696 @@ -664,13 +694,13 @@ void native_machine_shutdown(void)
16697 #endif
16698 }
16699
16700 -static void __machine_emergency_restart(int emergency)
16701 +static __noreturn void __machine_emergency_restart(int emergency)
16702 {
16703 reboot_emergency = emergency;
16704 machine_ops.emergency_restart();
16705 }
16706
16707 -static void native_machine_restart(char *__unused)
16708 +static __noreturn void native_machine_restart(char *__unused)
16709 {
16710 printk("machine restart\n");
16711
16712 @@ -679,7 +709,7 @@ static void native_machine_restart(char *__unused)
16713 __machine_emergency_restart(0);
16714 }
16715
16716 -static void native_machine_halt(void)
16717 +static __noreturn void native_machine_halt(void)
16718 {
16719 /* stop other cpus and apics */
16720 machine_shutdown();
16721 @@ -690,7 +720,7 @@ static void native_machine_halt(void)
16722 stop_this_cpu(NULL);
16723 }
16724
16725 -static void native_machine_power_off(void)
16726 +__noreturn static void native_machine_power_off(void)
16727 {
16728 if (pm_power_off) {
16729 if (!reboot_force)
16730 @@ -699,6 +729,7 @@ static void native_machine_power_off(void)
16731 }
16732 /* a fallback in case there is no PM info available */
16733 tboot_shutdown(TB_SHUTDOWN_HALT);
16734 + unreachable();
16735 }
16736
16737 struct machine_ops machine_ops = {
16738 diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
16739 index 7a6f3b3..bed145d7 100644
16740 --- a/arch/x86/kernel/relocate_kernel_64.S
16741 +++ b/arch/x86/kernel/relocate_kernel_64.S
16742 @@ -11,6 +11,7 @@
16743 #include <asm/kexec.h>
16744 #include <asm/processor-flags.h>
16745 #include <asm/pgtable_types.h>
16746 +#include <asm/alternative-asm.h>
16747
16748 /*
16749 * Must be relocatable PIC code callable as a C function
16750 @@ -160,13 +161,14 @@ identity_mapped:
16751 xorq %rbp, %rbp
16752 xorq %r8, %r8
16753 xorq %r9, %r9
16754 - xorq %r10, %r9
16755 + xorq %r10, %r10
16756 xorq %r11, %r11
16757 xorq %r12, %r12
16758 xorq %r13, %r13
16759 xorq %r14, %r14
16760 xorq %r15, %r15
16761
16762 + pax_force_retaddr 0, 1
16763 ret
16764
16765 1:
16766 diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
16767 index cf0ef98..e3f780b 100644
16768 --- a/arch/x86/kernel/setup.c
16769 +++ b/arch/x86/kernel/setup.c
16770 @@ -447,7 +447,7 @@ static void __init parse_setup_data(void)
16771
16772 switch (data->type) {
16773 case SETUP_E820_EXT:
16774 - parse_e820_ext(data);
16775 + parse_e820_ext((struct setup_data __force_kernel *)data);
16776 break;
16777 case SETUP_DTB:
16778 add_dtb(pa_data);
16779 @@ -650,7 +650,7 @@ static void __init trim_bios_range(void)
16780 * area (640->1Mb) as ram even though it is not.
16781 * take them out.
16782 */
16783 - e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
16784 + e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
16785 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
16786 }
16787
16788 @@ -773,14 +773,14 @@ void __init setup_arch(char **cmdline_p)
16789
16790 if (!boot_params.hdr.root_flags)
16791 root_mountflags &= ~MS_RDONLY;
16792 - init_mm.start_code = (unsigned long) _text;
16793 - init_mm.end_code = (unsigned long) _etext;
16794 + init_mm.start_code = ktla_ktva((unsigned long) _text);
16795 + init_mm.end_code = ktla_ktva((unsigned long) _etext);
16796 init_mm.end_data = (unsigned long) _edata;
16797 init_mm.brk = _brk_end;
16798
16799 - code_resource.start = virt_to_phys(_text);
16800 - code_resource.end = virt_to_phys(_etext)-1;
16801 - data_resource.start = virt_to_phys(_etext);
16802 + code_resource.start = virt_to_phys(ktla_ktva(_text));
16803 + code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
16804 + data_resource.start = virt_to_phys(_sdata);
16805 data_resource.end = virt_to_phys(_edata)-1;
16806 bss_resource.start = virt_to_phys(&__bss_start);
16807 bss_resource.end = virt_to_phys(&__bss_stop)-1;
16808 diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
16809 index 71f4727..16dc9f7 100644
16810 --- a/arch/x86/kernel/setup_percpu.c
16811 +++ b/arch/x86/kernel/setup_percpu.c
16812 @@ -21,19 +21,17 @@
16813 #include <asm/cpu.h>
16814 #include <asm/stackprotector.h>
16815
16816 -DEFINE_PER_CPU(int, cpu_number);
16817 +#ifdef CONFIG_SMP
16818 +DEFINE_PER_CPU(unsigned int, cpu_number);
16819 EXPORT_PER_CPU_SYMBOL(cpu_number);
16820 +#endif
16821
16822 -#ifdef CONFIG_X86_64
16823 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
16824 -#else
16825 -#define BOOT_PERCPU_OFFSET 0
16826 -#endif
16827
16828 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
16829 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
16830
16831 -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
16832 +unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
16833 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
16834 };
16835 EXPORT_SYMBOL(__per_cpu_offset);
16836 @@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
16837 {
16838 #ifdef CONFIG_X86_32
16839 struct desc_struct gdt;
16840 + unsigned long base = per_cpu_offset(cpu);
16841
16842 - pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
16843 - 0x2 | DESCTYPE_S, 0x8);
16844 - gdt.s = 1;
16845 + pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
16846 + 0x83 | DESCTYPE_S, 0xC);
16847 write_gdt_entry(get_cpu_gdt_table(cpu),
16848 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
16849 #endif
16850 @@ -207,6 +205,11 @@ void __init setup_per_cpu_areas(void)
16851 /* alrighty, percpu areas up and running */
16852 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
16853 for_each_possible_cpu(cpu) {
16854 +#ifdef CONFIG_CC_STACKPROTECTOR
16855 +#ifdef CONFIG_X86_32
16856 + unsigned long canary = per_cpu(stack_canary.canary, cpu);
16857 +#endif
16858 +#endif
16859 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
16860 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
16861 per_cpu(cpu_number, cpu) = cpu;
16862 @@ -247,6 +250,12 @@ void __init setup_per_cpu_areas(void)
16863 */
16864 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
16865 #endif
16866 +#ifdef CONFIG_CC_STACKPROTECTOR
16867 +#ifdef CONFIG_X86_32
16868 + if (!cpu)
16869 + per_cpu(stack_canary.canary, cpu) = canary;
16870 +#endif
16871 +#endif
16872 /*
16873 * Up to this point, the boot CPU has been using .init.data
16874 * area. Reload any changed state for the boot CPU.
16875 diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
16876 index 54ddaeb2..22c3bdc 100644
16877 --- a/arch/x86/kernel/signal.c
16878 +++ b/arch/x86/kernel/signal.c
16879 @@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsigned long sp)
16880 * Align the stack pointer according to the i386 ABI,
16881 * i.e. so that on function entry ((sp + 4) & 15) == 0.
16882 */
16883 - sp = ((sp + 4) & -16ul) - 4;
16884 + sp = ((sp - 12) & -16ul) - 4;
16885 #else /* !CONFIG_X86_32 */
16886 sp = round_down(sp, 16) - 8;
16887 #endif
16888 @@ -249,11 +249,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
16889 * Return an always-bogus address instead so we will die with SIGSEGV.
16890 */
16891 if (onsigstack && !likely(on_sig_stack(sp)))
16892 - return (void __user *)-1L;
16893 + return (__force void __user *)-1L;
16894
16895 /* save i387 state */
16896 if (used_math() && save_i387_xstate(*fpstate) < 0)
16897 - return (void __user *)-1L;
16898 + return (__force void __user *)-1L;
16899
16900 return (void __user *)sp;
16901 }
16902 @@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
16903 }
16904
16905 if (current->mm->context.vdso)
16906 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
16907 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
16908 else
16909 - restorer = &frame->retcode;
16910 + restorer = (void __user *)&frame->retcode;
16911 if (ka->sa.sa_flags & SA_RESTORER)
16912 restorer = ka->sa.sa_restorer;
16913
16914 @@ -324,7 +324,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
16915 * reasons and because gdb uses it as a signature to notice
16916 * signal handler stack frames.
16917 */
16918 - err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
16919 + err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
16920
16921 if (err)
16922 return -EFAULT;
16923 @@ -378,7 +378,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
16924 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
16925
16926 /* Set up to return from userspace. */
16927 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
16928 + if (current->mm->context.vdso)
16929 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
16930 + else
16931 + restorer = (void __user *)&frame->retcode;
16932 if (ka->sa.sa_flags & SA_RESTORER)
16933 restorer = ka->sa.sa_restorer;
16934 put_user_ex(restorer, &frame->pretcode);
16935 @@ -390,7 +393,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
16936 * reasons and because gdb uses it as a signature to notice
16937 * signal handler stack frames.
16938 */
16939 - put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
16940 + put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
16941 } put_user_catch(err);
16942
16943 if (err)
16944 @@ -769,7 +772,7 @@ static void do_signal(struct pt_regs *regs)
16945 * X86_32: vm86 regs switched out by assembly code before reaching
16946 * here, so testing against kernel CS suffices.
16947 */
16948 - if (!user_mode(regs))
16949 + if (!user_mode_novm(regs))
16950 return;
16951
16952 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
16953 diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
16954 index 9f548cb..caf76f7 100644
16955 --- a/arch/x86/kernel/smpboot.c
16956 +++ b/arch/x86/kernel/smpboot.c
16957 @@ -709,17 +709,20 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
16958 set_idle_for_cpu(cpu, c_idle.idle);
16959 do_rest:
16960 per_cpu(current_task, cpu) = c_idle.idle;
16961 + per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
16962 #ifdef CONFIG_X86_32
16963 /* Stack for startup_32 can be just as for start_secondary onwards */
16964 irq_ctx_init(cpu);
16965 #else
16966 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
16967 initial_gs = per_cpu_offset(cpu);
16968 - per_cpu(kernel_stack, cpu) =
16969 - (unsigned long)task_stack_page(c_idle.idle) -
16970 - KERNEL_STACK_OFFSET + THREAD_SIZE;
16971 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
16972 #endif
16973 +
16974 + pax_open_kernel();
16975 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
16976 + pax_close_kernel();
16977 +
16978 initial_code = (unsigned long)start_secondary;
16979 stack_start = c_idle.idle->thread.sp;
16980
16981 @@ -861,6 +864,12 @@ int __cpuinit native_cpu_up(unsigned int cpu)
16982
16983 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
16984
16985 +#ifdef CONFIG_PAX_PER_CPU_PGD
16986 + clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
16987 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
16988 + KERNEL_PGD_PTRS);
16989 +#endif
16990 +
16991 err = do_boot_cpu(apicid, cpu);
16992 if (err) {
16993 pr_debug("do_boot_cpu failed %d\n", err);
16994 diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
16995 index c346d11..d43b163 100644
16996 --- a/arch/x86/kernel/step.c
16997 +++ b/arch/x86/kernel/step.c
16998 @@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
16999 struct desc_struct *desc;
17000 unsigned long base;
17001
17002 - seg &= ~7UL;
17003 + seg >>= 3;
17004
17005 mutex_lock(&child->mm->context.lock);
17006 - if (unlikely((seg >> 3) >= child->mm->context.size))
17007 + if (unlikely(seg >= child->mm->context.size))
17008 addr = -1L; /* bogus selector, access would fault */
17009 else {
17010 desc = child->mm->context.ldt + seg;
17011 @@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
17012 addr += base;
17013 }
17014 mutex_unlock(&child->mm->context.lock);
17015 - }
17016 + } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
17017 + addr = ktla_ktva(addr);
17018
17019 return addr;
17020 }
17021 @@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
17022 unsigned char opcode[15];
17023 unsigned long addr = convert_ip_to_linear(child, regs);
17024
17025 + if (addr == -EINVAL)
17026 + return 0;
17027 +
17028 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
17029 for (i = 0; i < copied; i++) {
17030 switch (opcode[i]) {
17031 diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
17032 index 0b0cb5f..db6b9ed 100644
17033 --- a/arch/x86/kernel/sys_i386_32.c
17034 +++ b/arch/x86/kernel/sys_i386_32.c
17035 @@ -24,17 +24,224 @@
17036
17037 #include <asm/syscalls.h>
17038
17039 -/*
17040 - * Do a system call from kernel instead of calling sys_execve so we
17041 - * end up with proper pt_regs.
17042 - */
17043 -int kernel_execve(const char *filename,
17044 - const char *const argv[],
17045 - const char *const envp[])
17046 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
17047 {
17048 - long __res;
17049 - asm volatile ("int $0x80"
17050 - : "=a" (__res)
17051 - : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
17052 - return __res;
17053 + unsigned long pax_task_size = TASK_SIZE;
17054 +
17055 +#ifdef CONFIG_PAX_SEGMEXEC
17056 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
17057 + pax_task_size = SEGMEXEC_TASK_SIZE;
17058 +#endif
17059 +
17060 + if (len > pax_task_size || addr > pax_task_size - len)
17061 + return -EINVAL;
17062 +
17063 + return 0;
17064 +}
17065 +
17066 +unsigned long
17067 +arch_get_unmapped_area(struct file *filp, unsigned long addr,
17068 + unsigned long len, unsigned long pgoff, unsigned long flags)
17069 +{
17070 + struct mm_struct *mm = current->mm;
17071 + struct vm_area_struct *vma;
17072 + unsigned long start_addr, pax_task_size = TASK_SIZE;
17073 +
17074 +#ifdef CONFIG_PAX_SEGMEXEC
17075 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
17076 + pax_task_size = SEGMEXEC_TASK_SIZE;
17077 +#endif
17078 +
17079 + pax_task_size -= PAGE_SIZE;
17080 +
17081 + if (len > pax_task_size)
17082 + return -ENOMEM;
17083 +
17084 + if (flags & MAP_FIXED)
17085 + return addr;
17086 +
17087 +#ifdef CONFIG_PAX_RANDMMAP
17088 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17089 +#endif
17090 +
17091 + if (addr) {
17092 + addr = PAGE_ALIGN(addr);
17093 + if (pax_task_size - len >= addr) {
17094 + vma = find_vma(mm, addr);
17095 + if (check_heap_stack_gap(vma, addr, len))
17096 + return addr;
17097 + }
17098 + }
17099 + if (len > mm->cached_hole_size) {
17100 + start_addr = addr = mm->free_area_cache;
17101 + } else {
17102 + start_addr = addr = mm->mmap_base;
17103 + mm->cached_hole_size = 0;
17104 + }
17105 +
17106 +#ifdef CONFIG_PAX_PAGEEXEC
17107 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
17108 + start_addr = 0x00110000UL;
17109 +
17110 +#ifdef CONFIG_PAX_RANDMMAP
17111 + if (mm->pax_flags & MF_PAX_RANDMMAP)
17112 + start_addr += mm->delta_mmap & 0x03FFF000UL;
17113 +#endif
17114 +
17115 + if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
17116 + start_addr = addr = mm->mmap_base;
17117 + else
17118 + addr = start_addr;
17119 + }
17120 +#endif
17121 +
17122 +full_search:
17123 + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
17124 + /* At this point: (!vma || addr < vma->vm_end). */
17125 + if (pax_task_size - len < addr) {
17126 + /*
17127 + * Start a new search - just in case we missed
17128 + * some holes.
17129 + */
17130 + if (start_addr != mm->mmap_base) {
17131 + start_addr = addr = mm->mmap_base;
17132 + mm->cached_hole_size = 0;
17133 + goto full_search;
17134 + }
17135 + return -ENOMEM;
17136 + }
17137 + if (check_heap_stack_gap(vma, addr, len))
17138 + break;
17139 + if (addr + mm->cached_hole_size < vma->vm_start)
17140 + mm->cached_hole_size = vma->vm_start - addr;
17141 + addr = vma->vm_end;
17142 + if (mm->start_brk <= addr && addr < mm->mmap_base) {
17143 + start_addr = addr = mm->mmap_base;
17144 + mm->cached_hole_size = 0;
17145 + goto full_search;
17146 + }
17147 + }
17148 +
17149 + /*
17150 + * Remember the place where we stopped the search:
17151 + */
17152 + mm->free_area_cache = addr + len;
17153 + return addr;
17154 +}
17155 +
17156 +unsigned long
17157 +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17158 + const unsigned long len, const unsigned long pgoff,
17159 + const unsigned long flags)
17160 +{
17161 + struct vm_area_struct *vma;
17162 + struct mm_struct *mm = current->mm;
17163 + unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
17164 +
17165 +#ifdef CONFIG_PAX_SEGMEXEC
17166 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
17167 + pax_task_size = SEGMEXEC_TASK_SIZE;
17168 +#endif
17169 +
17170 + pax_task_size -= PAGE_SIZE;
17171 +
17172 + /* requested length too big for entire address space */
17173 + if (len > pax_task_size)
17174 + return -ENOMEM;
17175 +
17176 + if (flags & MAP_FIXED)
17177 + return addr;
17178 +
17179 +#ifdef CONFIG_PAX_PAGEEXEC
17180 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
17181 + goto bottomup;
17182 +#endif
17183 +
17184 +#ifdef CONFIG_PAX_RANDMMAP
17185 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17186 +#endif
17187 +
17188 + /* requesting a specific address */
17189 + if (addr) {
17190 + addr = PAGE_ALIGN(addr);
17191 + if (pax_task_size - len >= addr) {
17192 + vma = find_vma(mm, addr);
17193 + if (check_heap_stack_gap(vma, addr, len))
17194 + return addr;
17195 + }
17196 + }
17197 +
17198 + /* check if free_area_cache is useful for us */
17199 + if (len <= mm->cached_hole_size) {
17200 + mm->cached_hole_size = 0;
17201 + mm->free_area_cache = mm->mmap_base;
17202 + }
17203 +
17204 + /* either no address requested or can't fit in requested address hole */
17205 + addr = mm->free_area_cache;
17206 +
17207 + /* make sure it can fit in the remaining address space */
17208 + if (addr > len) {
17209 + vma = find_vma(mm, addr-len);
17210 + if (check_heap_stack_gap(vma, addr - len, len))
17211 + /* remember the address as a hint for next time */
17212 + return (mm->free_area_cache = addr-len);
17213 + }
17214 +
17215 + if (mm->mmap_base < len)
17216 + goto bottomup;
17217 +
17218 + addr = mm->mmap_base-len;
17219 +
17220 + do {
17221 + /*
17222 + * Lookup failure means no vma is above this address,
17223 + * else if new region fits below vma->vm_start,
17224 + * return with success:
17225 + */
17226 + vma = find_vma(mm, addr);
17227 + if (check_heap_stack_gap(vma, addr, len))
17228 + /* remember the address as a hint for next time */
17229 + return (mm->free_area_cache = addr);
17230 +
17231 + /* remember the largest hole we saw so far */
17232 + if (addr + mm->cached_hole_size < vma->vm_start)
17233 + mm->cached_hole_size = vma->vm_start - addr;
17234 +
17235 + /* try just below the current vma->vm_start */
17236 + addr = skip_heap_stack_gap(vma, len);
17237 + } while (!IS_ERR_VALUE(addr));
17238 +
17239 +bottomup:
17240 + /*
17241 + * A failed mmap() very likely causes application failure,
17242 + * so fall back to the bottom-up function here. This scenario
17243 + * can happen with large stack limits and large mmap()
17244 + * allocations.
17245 + */
17246 +
17247 +#ifdef CONFIG_PAX_SEGMEXEC
17248 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
17249 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
17250 + else
17251 +#endif
17252 +
17253 + mm->mmap_base = TASK_UNMAPPED_BASE;
17254 +
17255 +#ifdef CONFIG_PAX_RANDMMAP
17256 + if (mm->pax_flags & MF_PAX_RANDMMAP)
17257 + mm->mmap_base += mm->delta_mmap;
17258 +#endif
17259 +
17260 + mm->free_area_cache = mm->mmap_base;
17261 + mm->cached_hole_size = ~0UL;
17262 + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
17263 + /*
17264 + * Restore the topdown base:
17265 + */
17266 + mm->mmap_base = base;
17267 + mm->free_area_cache = base;
17268 + mm->cached_hole_size = ~0UL;
17269 +
17270 + return addr;
17271 }
17272 diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
17273 index 0514890..3dbebce 100644
17274 --- a/arch/x86/kernel/sys_x86_64.c
17275 +++ b/arch/x86/kernel/sys_x86_64.c
17276 @@ -95,8 +95,8 @@ out:
17277 return error;
17278 }
17279
17280 -static void find_start_end(unsigned long flags, unsigned long *begin,
17281 - unsigned long *end)
17282 +static void find_start_end(struct mm_struct *mm, unsigned long flags,
17283 + unsigned long *begin, unsigned long *end)
17284 {
17285 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
17286 unsigned long new_begin;
17287 @@ -115,7 +115,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
17288 *begin = new_begin;
17289 }
17290 } else {
17291 - *begin = TASK_UNMAPPED_BASE;
17292 + *begin = mm->mmap_base;
17293 *end = TASK_SIZE;
17294 }
17295 }
17296 @@ -132,16 +132,19 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
17297 if (flags & MAP_FIXED)
17298 return addr;
17299
17300 - find_start_end(flags, &begin, &end);
17301 + find_start_end(mm, flags, &begin, &end);
17302
17303 if (len > end)
17304 return -ENOMEM;
17305
17306 +#ifdef CONFIG_PAX_RANDMMAP
17307 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17308 +#endif
17309 +
17310 if (addr) {
17311 addr = PAGE_ALIGN(addr);
17312 vma = find_vma(mm, addr);
17313 - if (end - len >= addr &&
17314 - (!vma || addr + len <= vma->vm_start))
17315 + if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
17316 return addr;
17317 }
17318 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
17319 @@ -172,7 +175,7 @@ full_search:
17320 }
17321 return -ENOMEM;
17322 }
17323 - if (!vma || addr + len <= vma->vm_start) {
17324 + if (check_heap_stack_gap(vma, addr, len)) {
17325 /*
17326 * Remember the place where we stopped the search:
17327 */
17328 @@ -195,7 +198,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17329 {
17330 struct vm_area_struct *vma;
17331 struct mm_struct *mm = current->mm;
17332 - unsigned long addr = addr0;
17333 + unsigned long base = mm->mmap_base, addr = addr0;
17334
17335 /* requested length too big for entire address space */
17336 if (len > TASK_SIZE)
17337 @@ -208,13 +211,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17338 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
17339 goto bottomup;
17340
17341 +#ifdef CONFIG_PAX_RANDMMAP
17342 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17343 +#endif
17344 +
17345 /* requesting a specific address */
17346 if (addr) {
17347 addr = PAGE_ALIGN(addr);
17348 - vma = find_vma(mm, addr);
17349 - if (TASK_SIZE - len >= addr &&
17350 - (!vma || addr + len <= vma->vm_start))
17351 - return addr;
17352 + if (TASK_SIZE - len >= addr) {
17353 + vma = find_vma(mm, addr);
17354 + if (check_heap_stack_gap(vma, addr, len))
17355 + return addr;
17356 + }
17357 }
17358
17359 /* check if free_area_cache is useful for us */
17360 @@ -232,7 +240,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17361 ALIGN_TOPDOWN);
17362
17363 vma = find_vma(mm, tmp_addr);
17364 - if (!vma || tmp_addr + len <= vma->vm_start)
17365 + if (check_heap_stack_gap(vma, tmp_addr, len))
17366 /* remember the address as a hint for next time */
17367 return mm->free_area_cache = tmp_addr;
17368 }
17369 @@ -251,7 +259,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17370 * return with success:
17371 */
17372 vma = find_vma(mm, addr);
17373 - if (!vma || addr+len <= vma->vm_start)
17374 + if (check_heap_stack_gap(vma, addr, len))
17375 /* remember the address as a hint for next time */
17376 return mm->free_area_cache = addr;
17377
17378 @@ -260,8 +268,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17379 mm->cached_hole_size = vma->vm_start - addr;
17380
17381 /* try just below the current vma->vm_start */
17382 - addr = vma->vm_start-len;
17383 - } while (len < vma->vm_start);
17384 + addr = skip_heap_stack_gap(vma, len);
17385 + } while (!IS_ERR_VALUE(addr));
17386
17387 bottomup:
17388 /*
17389 @@ -270,13 +278,21 @@ bottomup:
17390 * can happen with large stack limits and large mmap()
17391 * allocations.
17392 */
17393 + mm->mmap_base = TASK_UNMAPPED_BASE;
17394 +
17395 +#ifdef CONFIG_PAX_RANDMMAP
17396 + if (mm->pax_flags & MF_PAX_RANDMMAP)
17397 + mm->mmap_base += mm->delta_mmap;
17398 +#endif
17399 +
17400 + mm->free_area_cache = mm->mmap_base;
17401 mm->cached_hole_size = ~0UL;
17402 - mm->free_area_cache = TASK_UNMAPPED_BASE;
17403 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
17404 /*
17405 * Restore the topdown base:
17406 */
17407 - mm->free_area_cache = mm->mmap_base;
17408 + mm->mmap_base = base;
17409 + mm->free_area_cache = base;
17410 mm->cached_hole_size = ~0UL;
17411
17412 return addr;
17413 diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
17414 index 9a0e312..e6f66f2 100644
17415 --- a/arch/x86/kernel/syscall_table_32.S
17416 +++ b/arch/x86/kernel/syscall_table_32.S
17417 @@ -1,3 +1,4 @@
17418 +.section .rodata,"a",@progbits
17419 ENTRY(sys_call_table)
17420 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
17421 .long sys_exit
17422 diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
17423 index e2410e2..4fe3fbc 100644
17424 --- a/arch/x86/kernel/tboot.c
17425 +++ b/arch/x86/kernel/tboot.c
17426 @@ -219,7 +219,7 @@ static int tboot_setup_sleep(void)
17427
17428 void tboot_shutdown(u32 shutdown_type)
17429 {
17430 - void (*shutdown)(void);
17431 + void (* __noreturn shutdown)(void);
17432
17433 if (!tboot_enabled())
17434 return;
17435 @@ -241,7 +241,7 @@ void tboot_shutdown(u32 shutdown_type)
17436
17437 switch_to_tboot_pt();
17438
17439 - shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
17440 + shutdown = (void *)tboot->shutdown_entry;
17441 shutdown();
17442
17443 /* should not reach here */
17444 @@ -298,7 +298,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
17445 tboot_shutdown(acpi_shutdown_map[sleep_state]);
17446 }
17447
17448 -static atomic_t ap_wfs_count;
17449 +static atomic_unchecked_t ap_wfs_count;
17450
17451 static int tboot_wait_for_aps(int num_aps)
17452 {
17453 @@ -322,9 +322,9 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
17454 {
17455 switch (action) {
17456 case CPU_DYING:
17457 - atomic_inc(&ap_wfs_count);
17458 + atomic_inc_unchecked(&ap_wfs_count);
17459 if (num_online_cpus() == 1)
17460 - if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
17461 + if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
17462 return NOTIFY_BAD;
17463 break;
17464 }
17465 @@ -343,7 +343,7 @@ static __init int tboot_late_init(void)
17466
17467 tboot_create_trampoline();
17468
17469 - atomic_set(&ap_wfs_count, 0);
17470 + atomic_set_unchecked(&ap_wfs_count, 0);
17471 register_hotcpu_notifier(&tboot_cpu_notifier);
17472 return 0;
17473 }
17474 diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
17475 index dd5fbf4..b7f2232 100644
17476 --- a/arch/x86/kernel/time.c
17477 +++ b/arch/x86/kernel/time.c
17478 @@ -31,9 +31,9 @@ unsigned long profile_pc(struct pt_regs *regs)
17479 {
17480 unsigned long pc = instruction_pointer(regs);
17481
17482 - if (!user_mode_vm(regs) && in_lock_functions(pc)) {
17483 + if (!user_mode(regs) && in_lock_functions(pc)) {
17484 #ifdef CONFIG_FRAME_POINTER
17485 - return *(unsigned long *)(regs->bp + sizeof(long));
17486 + return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
17487 #else
17488 unsigned long *sp =
17489 (unsigned long *)kernel_stack_pointer(regs);
17490 @@ -42,11 +42,17 @@ unsigned long profile_pc(struct pt_regs *regs)
17491 * or above a saved flags. Eflags has bits 22-31 zero,
17492 * kernel addresses don't.
17493 */
17494 +
17495 +#ifdef CONFIG_PAX_KERNEXEC
17496 + return ktla_ktva(sp[0]);
17497 +#else
17498 if (sp[0] >> 22)
17499 return sp[0];
17500 if (sp[1] >> 22)
17501 return sp[1];
17502 #endif
17503 +
17504 +#endif
17505 }
17506 return pc;
17507 }
17508 diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
17509 index 6bb7b85..dd853e1 100644
17510 --- a/arch/x86/kernel/tls.c
17511 +++ b/arch/x86/kernel/tls.c
17512 @@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
17513 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
17514 return -EINVAL;
17515
17516 +#ifdef CONFIG_PAX_SEGMEXEC
17517 + if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
17518 + return -EINVAL;
17519 +#endif
17520 +
17521 set_tls_desc(p, idx, &info, 1);
17522
17523 return 0;
17524 diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
17525 index 451c0a7..e57f551 100644
17526 --- a/arch/x86/kernel/trampoline_32.S
17527 +++ b/arch/x86/kernel/trampoline_32.S
17528 @@ -32,6 +32,12 @@
17529 #include <asm/segment.h>
17530 #include <asm/page_types.h>
17531
17532 +#ifdef CONFIG_PAX_KERNEXEC
17533 +#define ta(X) (X)
17534 +#else
17535 +#define ta(X) ((X) - __PAGE_OFFSET)
17536 +#endif
17537 +
17538 #ifdef CONFIG_SMP
17539
17540 .section ".x86_trampoline","a"
17541 @@ -62,7 +68,7 @@ r_base = .
17542 inc %ax # protected mode (PE) bit
17543 lmsw %ax # into protected mode
17544 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
17545 - ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
17546 + ljmpl $__BOOT_CS, $ta(startup_32_smp)
17547
17548 # These need to be in the same 64K segment as the above;
17549 # hence we don't use the boot_gdt_descr defined in head.S
17550 diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
17551 index 09ff517..df19fbff 100644
17552 --- a/arch/x86/kernel/trampoline_64.S
17553 +++ b/arch/x86/kernel/trampoline_64.S
17554 @@ -90,7 +90,7 @@ startup_32:
17555 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
17556 movl %eax, %ds
17557
17558 - movl $X86_CR4_PAE, %eax
17559 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
17560 movl %eax, %cr4 # Enable PAE mode
17561
17562 # Setup trampoline 4 level pagetables
17563 @@ -138,7 +138,7 @@ tidt:
17564 # so the kernel can live anywhere
17565 .balign 4
17566 tgdt:
17567 - .short tgdt_end - tgdt # gdt limit
17568 + .short tgdt_end - tgdt - 1 # gdt limit
17569 .long tgdt - r_base
17570 .short 0
17571 .quad 0x00cf9b000000ffff # __KERNEL32_CS
17572 diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
17573 index a8e3eb8..c9dbd7d 100644
17574 --- a/arch/x86/kernel/traps.c
17575 +++ b/arch/x86/kernel/traps.c
17576 @@ -70,12 +70,6 @@ asmlinkage int system_call(void);
17577
17578 /* Do we ignore FPU interrupts ? */
17579 char ignore_fpu_irq;
17580 -
17581 -/*
17582 - * The IDT has to be page-aligned to simplify the Pentium
17583 - * F0 0F bug workaround.
17584 - */
17585 -gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
17586 #endif
17587
17588 DECLARE_BITMAP(used_vectors, NR_VECTORS);
17589 @@ -108,13 +102,13 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
17590 }
17591
17592 static void __kprobes
17593 -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
17594 +do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
17595 long error_code, siginfo_t *info)
17596 {
17597 struct task_struct *tsk = current;
17598
17599 #ifdef CONFIG_X86_32
17600 - if (regs->flags & X86_VM_MASK) {
17601 + if (v8086_mode(regs)) {
17602 /*
17603 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
17604 * On nmi (interrupt 2), do_trap should not be called.
17605 @@ -125,7 +119,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
17606 }
17607 #endif
17608
17609 - if (!user_mode(regs))
17610 + if (!user_mode_novm(regs))
17611 goto kernel_trap;
17612
17613 #ifdef CONFIG_X86_32
17614 @@ -148,7 +142,7 @@ trap_signal:
17615 printk_ratelimit()) {
17616 printk(KERN_INFO
17617 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
17618 - tsk->comm, tsk->pid, str,
17619 + tsk->comm, task_pid_nr(tsk), str,
17620 regs->ip, regs->sp, error_code);
17621 print_vma_addr(" in ", regs->ip);
17622 printk("\n");
17623 @@ -165,8 +159,20 @@ kernel_trap:
17624 if (!fixup_exception(regs)) {
17625 tsk->thread.error_code = error_code;
17626 tsk->thread.trap_no = trapnr;
17627 +
17628 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17629 + if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
17630 + str = "PAX: suspicious stack segment fault";
17631 +#endif
17632 +
17633 die(str, regs, error_code);
17634 }
17635 +
17636 +#ifdef CONFIG_PAX_REFCOUNT
17637 + if (trapnr == 4)
17638 + pax_report_refcount_overflow(regs);
17639 +#endif
17640 +
17641 return;
17642
17643 #ifdef CONFIG_X86_32
17644 @@ -255,14 +261,30 @@ do_general_protection(struct pt_regs *regs, long error_code)
17645 conditional_sti(regs);
17646
17647 #ifdef CONFIG_X86_32
17648 - if (regs->flags & X86_VM_MASK)
17649 + if (v8086_mode(regs))
17650 goto gp_in_vm86;
17651 #endif
17652
17653 tsk = current;
17654 - if (!user_mode(regs))
17655 + if (!user_mode_novm(regs))
17656 goto gp_in_kernel;
17657
17658 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
17659 + if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
17660 + struct mm_struct *mm = tsk->mm;
17661 + unsigned long limit;
17662 +
17663 + down_write(&mm->mmap_sem);
17664 + limit = mm->context.user_cs_limit;
17665 + if (limit < TASK_SIZE) {
17666 + track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
17667 + up_write(&mm->mmap_sem);
17668 + return;
17669 + }
17670 + up_write(&mm->mmap_sem);
17671 + }
17672 +#endif
17673 +
17674 tsk->thread.error_code = error_code;
17675 tsk->thread.trap_no = 13;
17676
17677 @@ -295,6 +317,13 @@ gp_in_kernel:
17678 if (notify_die(DIE_GPF, "general protection fault", regs,
17679 error_code, 13, SIGSEGV) == NOTIFY_STOP)
17680 return;
17681 +
17682 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17683 + if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
17684 + die("PAX: suspicious general protection fault", regs, error_code);
17685 + else
17686 +#endif
17687 +
17688 die("general protection fault", regs, error_code);
17689 }
17690
17691 @@ -414,7 +443,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
17692 /* It's safe to allow irq's after DR6 has been saved */
17693 preempt_conditional_sti(regs);
17694
17695 - if (regs->flags & X86_VM_MASK) {
17696 + if (v8086_mode(regs)) {
17697 handle_vm86_trap((struct kernel_vm86_regs *) regs,
17698 error_code, 1);
17699 preempt_conditional_cli(regs);
17700 @@ -428,7 +457,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
17701 * We already checked v86 mode above, so we can check for kernel mode
17702 * by just checking the CPL of CS.
17703 */
17704 - if ((dr6 & DR_STEP) && !user_mode(regs)) {
17705 + if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
17706 tsk->thread.debugreg6 &= ~DR_STEP;
17707 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
17708 regs->flags &= ~X86_EFLAGS_TF;
17709 @@ -457,7 +486,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
17710 return;
17711 conditional_sti(regs);
17712
17713 - if (!user_mode_vm(regs))
17714 + if (!user_mode(regs))
17715 {
17716 if (!fixup_exception(regs)) {
17717 task->thread.error_code = error_code;
17718 @@ -568,7 +597,7 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
17719 void __math_state_restore(void)
17720 {
17721 struct thread_info *thread = current_thread_info();
17722 - struct task_struct *tsk = thread->task;
17723 + struct task_struct *tsk = current;
17724
17725 /*
17726 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
17727 @@ -595,8 +624,7 @@ void __math_state_restore(void)
17728 */
17729 asmlinkage void math_state_restore(void)
17730 {
17731 - struct thread_info *thread = current_thread_info();
17732 - struct task_struct *tsk = thread->task;
17733 + struct task_struct *tsk = current;
17734
17735 if (!tsk_used_math(tsk)) {
17736 local_irq_enable();
17737 diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
17738 index b9242ba..50c5edd 100644
17739 --- a/arch/x86/kernel/verify_cpu.S
17740 +++ b/arch/x86/kernel/verify_cpu.S
17741 @@ -20,6 +20,7 @@
17742 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
17743 * arch/x86/kernel/trampoline_64.S: secondary processor verification
17744 * arch/x86/kernel/head_32.S: processor startup
17745 + * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
17746 *
17747 * verify_cpu, returns the status of longmode and SSE in register %eax.
17748 * 0: Success 1: Failure
17749 diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
17750 index 863f875..4307295 100644
17751 --- a/arch/x86/kernel/vm86_32.c
17752 +++ b/arch/x86/kernel/vm86_32.c
17753 @@ -41,6 +41,7 @@
17754 #include <linux/ptrace.h>
17755 #include <linux/audit.h>
17756 #include <linux/stddef.h>
17757 +#include <linux/grsecurity.h>
17758
17759 #include <asm/uaccess.h>
17760 #include <asm/io.h>
17761 @@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
17762 do_exit(SIGSEGV);
17763 }
17764
17765 - tss = &per_cpu(init_tss, get_cpu());
17766 + tss = init_tss + get_cpu();
17767 current->thread.sp0 = current->thread.saved_sp0;
17768 current->thread.sysenter_cs = __KERNEL_CS;
17769 load_sp0(tss, &current->thread);
17770 @@ -208,6 +209,13 @@ int sys_vm86old(struct vm86_struct __user *v86, struct pt_regs *regs)
17771 struct task_struct *tsk;
17772 int tmp, ret = -EPERM;
17773
17774 +#ifdef CONFIG_GRKERNSEC_VM86
17775 + if (!capable(CAP_SYS_RAWIO)) {
17776 + gr_handle_vm86();
17777 + goto out;
17778 + }
17779 +#endif
17780 +
17781 tsk = current;
17782 if (tsk->thread.saved_sp0)
17783 goto out;
17784 @@ -238,6 +246,14 @@ int sys_vm86(unsigned long cmd, unsigned long arg, struct pt_regs *regs)
17785 int tmp, ret;
17786 struct vm86plus_struct __user *v86;
17787
17788 +#ifdef CONFIG_GRKERNSEC_VM86
17789 + if (!capable(CAP_SYS_RAWIO)) {
17790 + gr_handle_vm86();
17791 + ret = -EPERM;
17792 + goto out;
17793 + }
17794 +#endif
17795 +
17796 tsk = current;
17797 switch (cmd) {
17798 case VM86_REQUEST_IRQ:
17799 @@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
17800 tsk->thread.saved_fs = info->regs32->fs;
17801 tsk->thread.saved_gs = get_user_gs(info->regs32);
17802
17803 - tss = &per_cpu(init_tss, get_cpu());
17804 + tss = init_tss + get_cpu();
17805 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
17806 if (cpu_has_sep)
17807 tsk->thread.sysenter_cs = 0;
17808 @@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
17809 goto cannot_handle;
17810 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
17811 goto cannot_handle;
17812 - intr_ptr = (unsigned long __user *) (i << 2);
17813 + intr_ptr = (__force unsigned long __user *) (i << 2);
17814 if (get_user(segoffs, intr_ptr))
17815 goto cannot_handle;
17816 if ((segoffs >> 16) == BIOSSEG)
17817 diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
17818 index 0f703f1..9e15f64 100644
17819 --- a/arch/x86/kernel/vmlinux.lds.S
17820 +++ b/arch/x86/kernel/vmlinux.lds.S
17821 @@ -26,6 +26,13 @@
17822 #include <asm/page_types.h>
17823 #include <asm/cache.h>
17824 #include <asm/boot.h>
17825 +#include <asm/segment.h>
17826 +
17827 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17828 +#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
17829 +#else
17830 +#define __KERNEL_TEXT_OFFSET 0
17831 +#endif
17832
17833 #undef i386 /* in case the preprocessor is a 32bit one */
17834
17835 @@ -69,30 +76,43 @@ jiffies_64 = jiffies;
17836
17837 PHDRS {
17838 text PT_LOAD FLAGS(5); /* R_E */
17839 +#ifdef CONFIG_X86_32
17840 + module PT_LOAD FLAGS(5); /* R_E */
17841 +#endif
17842 +#ifdef CONFIG_XEN
17843 + rodata PT_LOAD FLAGS(5); /* R_E */
17844 +#else
17845 + rodata PT_LOAD FLAGS(4); /* R__ */
17846 +#endif
17847 data PT_LOAD FLAGS(6); /* RW_ */
17848 -#ifdef CONFIG_X86_64
17849 + init.begin PT_LOAD FLAGS(6); /* RW_ */
17850 #ifdef CONFIG_SMP
17851 percpu PT_LOAD FLAGS(6); /* RW_ */
17852 #endif
17853 + text.init PT_LOAD FLAGS(5); /* R_E */
17854 + text.exit PT_LOAD FLAGS(5); /* R_E */
17855 init PT_LOAD FLAGS(7); /* RWE */
17856 -#endif
17857 note PT_NOTE FLAGS(0); /* ___ */
17858 }
17859
17860 SECTIONS
17861 {
17862 #ifdef CONFIG_X86_32
17863 - . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
17864 - phys_startup_32 = startup_32 - LOAD_OFFSET;
17865 + . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
17866 #else
17867 - . = __START_KERNEL;
17868 - phys_startup_64 = startup_64 - LOAD_OFFSET;
17869 + . = __START_KERNEL;
17870 #endif
17871
17872 /* Text and read-only data */
17873 - .text : AT(ADDR(.text) - LOAD_OFFSET) {
17874 - _text = .;
17875 + .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
17876 /* bootstrapping code */
17877 +#ifdef CONFIG_X86_32
17878 + phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17879 +#else
17880 + phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17881 +#endif
17882 + __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17883 + _text = .;
17884 HEAD_TEXT
17885 #ifdef CONFIG_X86_32
17886 . = ALIGN(PAGE_SIZE);
17887 @@ -108,13 +128,47 @@ SECTIONS
17888 IRQENTRY_TEXT
17889 *(.fixup)
17890 *(.gnu.warning)
17891 - /* End of text section */
17892 - _etext = .;
17893 } :text = 0x9090
17894
17895 - NOTES :text :note
17896 + . += __KERNEL_TEXT_OFFSET;
17897
17898 - EXCEPTION_TABLE(16) :text = 0x9090
17899 +#ifdef CONFIG_X86_32
17900 + . = ALIGN(PAGE_SIZE);
17901 + .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
17902 +
17903 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
17904 + MODULES_EXEC_VADDR = .;
17905 + BYTE(0)
17906 + . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
17907 + . = ALIGN(HPAGE_SIZE);
17908 + MODULES_EXEC_END = . - 1;
17909 +#endif
17910 +
17911 + } :module
17912 +#endif
17913 +
17914 + .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
17915 + /* End of text section */
17916 + _etext = . - __KERNEL_TEXT_OFFSET;
17917 + }
17918 +
17919 +#ifdef CONFIG_X86_32
17920 + . = ALIGN(PAGE_SIZE);
17921 + .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
17922 + *(.idt)
17923 + . = ALIGN(PAGE_SIZE);
17924 + *(.empty_zero_page)
17925 + *(.initial_pg_fixmap)
17926 + *(.initial_pg_pmd)
17927 + *(.initial_page_table)
17928 + *(.swapper_pg_dir)
17929 + } :rodata
17930 +#endif
17931 +
17932 + . = ALIGN(PAGE_SIZE);
17933 + NOTES :rodata :note
17934 +
17935 + EXCEPTION_TABLE(16) :rodata
17936
17937 #if defined(CONFIG_DEBUG_RODATA)
17938 /* .text should occupy whole number of pages */
17939 @@ -126,16 +180,20 @@ SECTIONS
17940
17941 /* Data */
17942 .data : AT(ADDR(.data) - LOAD_OFFSET) {
17943 +
17944 +#ifdef CONFIG_PAX_KERNEXEC
17945 + . = ALIGN(HPAGE_SIZE);
17946 +#else
17947 + . = ALIGN(PAGE_SIZE);
17948 +#endif
17949 +
17950 /* Start of data section */
17951 _sdata = .;
17952
17953 /* init_task */
17954 INIT_TASK_DATA(THREAD_SIZE)
17955
17956 -#ifdef CONFIG_X86_32
17957 - /* 32 bit has nosave before _edata */
17958 NOSAVE_DATA
17959 -#endif
17960
17961 PAGE_ALIGNED_DATA(PAGE_SIZE)
17962
17963 @@ -176,12 +234,19 @@ SECTIONS
17964 #endif /* CONFIG_X86_64 */
17965
17966 /* Init code and data - will be freed after init */
17967 - . = ALIGN(PAGE_SIZE);
17968 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
17969 + BYTE(0)
17970 +
17971 +#ifdef CONFIG_PAX_KERNEXEC
17972 + . = ALIGN(HPAGE_SIZE);
17973 +#else
17974 + . = ALIGN(PAGE_SIZE);
17975 +#endif
17976 +
17977 __init_begin = .; /* paired with __init_end */
17978 - }
17979 + } :init.begin
17980
17981 -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
17982 +#ifdef CONFIG_SMP
17983 /*
17984 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
17985 * output PHDR, so the next output section - .init.text - should
17986 @@ -190,12 +255,27 @@ SECTIONS
17987 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
17988 #endif
17989
17990 - INIT_TEXT_SECTION(PAGE_SIZE)
17991 -#ifdef CONFIG_X86_64
17992 - :init
17993 -#endif
17994 + . = ALIGN(PAGE_SIZE);
17995 + init_begin = .;
17996 + .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
17997 + VMLINUX_SYMBOL(_sinittext) = .;
17998 + INIT_TEXT
17999 + VMLINUX_SYMBOL(_einittext) = .;
18000 + . = ALIGN(PAGE_SIZE);
18001 + } :text.init
18002
18003 - INIT_DATA_SECTION(16)
18004 + /*
18005 + * .exit.text is discard at runtime, not link time, to deal with
18006 + * references from .altinstructions and .eh_frame
18007 + */
18008 + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
18009 + EXIT_TEXT
18010 + . = ALIGN(16);
18011 + } :text.exit
18012 + . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
18013 +
18014 + . = ALIGN(PAGE_SIZE);
18015 + INIT_DATA_SECTION(16) :init
18016
18017 /*
18018 * Code and data for a variety of lowlevel trampolines, to be
18019 @@ -269,19 +349,12 @@ SECTIONS
18020 }
18021
18022 . = ALIGN(8);
18023 - /*
18024 - * .exit.text is discard at runtime, not link time, to deal with
18025 - * references from .altinstructions and .eh_frame
18026 - */
18027 - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
18028 - EXIT_TEXT
18029 - }
18030
18031 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
18032 EXIT_DATA
18033 }
18034
18035 -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
18036 +#ifndef CONFIG_SMP
18037 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
18038 #endif
18039
18040 @@ -300,16 +373,10 @@ SECTIONS
18041 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
18042 __smp_locks = .;
18043 *(.smp_locks)
18044 - . = ALIGN(PAGE_SIZE);
18045 __smp_locks_end = .;
18046 + . = ALIGN(PAGE_SIZE);
18047 }
18048
18049 -#ifdef CONFIG_X86_64
18050 - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
18051 - NOSAVE_DATA
18052 - }
18053 -#endif
18054 -
18055 /* BSS */
18056 . = ALIGN(PAGE_SIZE);
18057 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
18058 @@ -325,6 +392,7 @@ SECTIONS
18059 __brk_base = .;
18060 . += 64 * 1024; /* 64k alignment slop space */
18061 *(.brk_reservation) /* areas brk users have reserved */
18062 + . = ALIGN(HPAGE_SIZE);
18063 __brk_limit = .;
18064 }
18065
18066 @@ -351,13 +419,12 @@ SECTIONS
18067 * for the boot processor.
18068 */
18069 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
18070 -INIT_PER_CPU(gdt_page);
18071 INIT_PER_CPU(irq_stack_union);
18072
18073 /*
18074 * Build-time check on the image size:
18075 */
18076 -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
18077 +. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
18078 "kernel image bigger than KERNEL_IMAGE_SIZE");
18079
18080 #ifdef CONFIG_SMP
18081 diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
18082 index e4d4a22..47ee71f 100644
18083 --- a/arch/x86/kernel/vsyscall_64.c
18084 +++ b/arch/x86/kernel/vsyscall_64.c
18085 @@ -57,15 +57,13 @@ DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
18086 .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
18087 };
18088
18089 -static enum { EMULATE, NATIVE, NONE } vsyscall_mode = NATIVE;
18090 +static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
18091
18092 static int __init vsyscall_setup(char *str)
18093 {
18094 if (str) {
18095 if (!strcmp("emulate", str))
18096 vsyscall_mode = EMULATE;
18097 - else if (!strcmp("native", str))
18098 - vsyscall_mode = NATIVE;
18099 else if (!strcmp("none", str))
18100 vsyscall_mode = NONE;
18101 else
18102 @@ -178,7 +176,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
18103
18104 tsk = current;
18105 if (seccomp_mode(&tsk->seccomp))
18106 - do_exit(SIGKILL);
18107 + do_group_exit(SIGKILL);
18108
18109 switch (vsyscall_nr) {
18110 case 0:
18111 @@ -220,8 +218,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
18112 return true;
18113
18114 sigsegv:
18115 - force_sig(SIGSEGV, current);
18116 - return true;
18117 + do_group_exit(SIGKILL);
18118 }
18119
18120 /*
18121 @@ -274,10 +271,7 @@ void __init map_vsyscall(void)
18122 extern char __vvar_page;
18123 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
18124
18125 - __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
18126 - vsyscall_mode == NATIVE
18127 - ? PAGE_KERNEL_VSYSCALL
18128 - : PAGE_KERNEL_VVAR);
18129 + __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
18130 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
18131 (unsigned long)VSYSCALL_START);
18132
18133 diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
18134 index 9796c2f..f686fbf 100644
18135 --- a/arch/x86/kernel/x8664_ksyms_64.c
18136 +++ b/arch/x86/kernel/x8664_ksyms_64.c
18137 @@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
18138 EXPORT_SYMBOL(copy_user_generic_string);
18139 EXPORT_SYMBOL(copy_user_generic_unrolled);
18140 EXPORT_SYMBOL(__copy_user_nocache);
18141 -EXPORT_SYMBOL(_copy_from_user);
18142 -EXPORT_SYMBOL(_copy_to_user);
18143
18144 EXPORT_SYMBOL(copy_page);
18145 EXPORT_SYMBOL(clear_page);
18146 diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
18147 index a391134..d0b63b6e 100644
18148 --- a/arch/x86/kernel/xsave.c
18149 +++ b/arch/x86/kernel/xsave.c
18150 @@ -130,7 +130,7 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf,
18151 fx_sw_user->xstate_size > fx_sw_user->extended_size)
18152 return -EINVAL;
18153
18154 - err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
18155 + err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
18156 fx_sw_user->extended_size -
18157 FP_XSTATE_MAGIC2_SIZE));
18158 if (err)
18159 @@ -267,7 +267,7 @@ fx_only:
18160 * the other extended state.
18161 */
18162 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
18163 - return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
18164 + return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
18165 }
18166
18167 /*
18168 @@ -299,7 +299,7 @@ int restore_i387_xstate(void __user *buf)
18169 if (use_xsave())
18170 err = restore_user_xstate(buf);
18171 else
18172 - err = fxrstor_checking((__force struct i387_fxsave_struct *)
18173 + err = fxrstor_checking((struct i387_fxsave_struct __force_kernel *)
18174 buf);
18175 if (unlikely(err)) {
18176 /*
18177 diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
18178 index f1e3be1..588efc8 100644
18179 --- a/arch/x86/kvm/emulate.c
18180 +++ b/arch/x86/kvm/emulate.c
18181 @@ -249,6 +249,7 @@ struct gprefix {
18182
18183 #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
18184 do { \
18185 + unsigned long _tmp; \
18186 __asm__ __volatile__ ( \
18187 _PRE_EFLAGS("0", "4", "2") \
18188 _op _suffix " %"_x"3,%1; " \
18189 @@ -263,8 +264,6 @@ struct gprefix {
18190 /* Raw emulation: instruction has two explicit operands. */
18191 #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
18192 do { \
18193 - unsigned long _tmp; \
18194 - \
18195 switch ((ctxt)->dst.bytes) { \
18196 case 2: \
18197 ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
18198 @@ -280,7 +279,6 @@ struct gprefix {
18199
18200 #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
18201 do { \
18202 - unsigned long _tmp; \
18203 switch ((ctxt)->dst.bytes) { \
18204 case 1: \
18205 ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
18206 diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
18207 index 54abb40..a192606 100644
18208 --- a/arch/x86/kvm/lapic.c
18209 +++ b/arch/x86/kvm/lapic.c
18210 @@ -53,7 +53,7 @@
18211 #define APIC_BUS_CYCLE_NS 1
18212
18213 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
18214 -#define apic_debug(fmt, arg...)
18215 +#define apic_debug(fmt, arg...) do {} while (0)
18216
18217 #define APIC_LVT_NUM 6
18218 /* 14 is the version for Xeon and Pentium 8.4.8*/
18219 diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
18220 index f1b36cf..af8a124 100644
18221 --- a/arch/x86/kvm/mmu.c
18222 +++ b/arch/x86/kvm/mmu.c
18223 @@ -3555,7 +3555,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
18224
18225 pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
18226
18227 - invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter);
18228 + invlpg_counter = atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter);
18229
18230 /*
18231 * Assume that the pte write on a page table of the same type
18232 @@ -3587,7 +3587,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
18233 }
18234
18235 spin_lock(&vcpu->kvm->mmu_lock);
18236 - if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
18237 + if (atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
18238 gentry = 0;
18239 kvm_mmu_free_some_pages(vcpu);
18240 ++vcpu->kvm->stat.mmu_pte_write;
18241 diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
18242 index 9299410..ade2f9b 100644
18243 --- a/arch/x86/kvm/paging_tmpl.h
18244 +++ b/arch/x86/kvm/paging_tmpl.h
18245 @@ -197,7 +197,7 @@ retry_walk:
18246 if (unlikely(kvm_is_error_hva(host_addr)))
18247 goto error;
18248
18249 - ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
18250 + ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
18251 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
18252 goto error;
18253
18254 @@ -705,7 +705,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
18255 if (need_flush)
18256 kvm_flush_remote_tlbs(vcpu->kvm);
18257
18258 - atomic_inc(&vcpu->kvm->arch.invlpg_counter);
18259 + atomic_inc_unchecked(&vcpu->kvm->arch.invlpg_counter);
18260
18261 spin_unlock(&vcpu->kvm->mmu_lock);
18262
18263 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
18264 index e32243e..a6e6172 100644
18265 --- a/arch/x86/kvm/svm.c
18266 +++ b/arch/x86/kvm/svm.c
18267 @@ -3400,7 +3400,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
18268 int cpu = raw_smp_processor_id();
18269
18270 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
18271 +
18272 + pax_open_kernel();
18273 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
18274 + pax_close_kernel();
18275 +
18276 load_TR_desc();
18277 }
18278
18279 @@ -3778,6 +3782,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
18280 #endif
18281 #endif
18282
18283 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18284 + __set_fs(current_thread_info()->addr_limit);
18285 +#endif
18286 +
18287 reload_tss(vcpu);
18288
18289 local_irq_disable();
18290 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
18291 index 579a0b5..ed7bbf9 100644
18292 --- a/arch/x86/kvm/vmx.c
18293 +++ b/arch/x86/kvm/vmx.c
18294 @@ -1305,7 +1305,11 @@ static void reload_tss(void)
18295 struct desc_struct *descs;
18296
18297 descs = (void *)gdt->address;
18298 +
18299 + pax_open_kernel();
18300 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
18301 + pax_close_kernel();
18302 +
18303 load_TR_desc();
18304 }
18305
18306 @@ -2633,8 +2637,11 @@ static __init int hardware_setup(void)
18307 if (!cpu_has_vmx_flexpriority())
18308 flexpriority_enabled = 0;
18309
18310 - if (!cpu_has_vmx_tpr_shadow())
18311 - kvm_x86_ops->update_cr8_intercept = NULL;
18312 + if (!cpu_has_vmx_tpr_shadow()) {
18313 + pax_open_kernel();
18314 + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
18315 + pax_close_kernel();
18316 + }
18317
18318 if (enable_ept && !cpu_has_vmx_ept_2m_page())
18319 kvm_disable_largepages();
18320 @@ -3648,7 +3655,7 @@ static void vmx_set_constant_host_state(void)
18321 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
18322
18323 asm("mov $.Lkvm_vmx_return, %0" : "=r"(tmpl));
18324 - vmcs_writel(HOST_RIP, tmpl); /* 22.2.5 */
18325 + vmcs_writel(HOST_RIP, ktla_ktva(tmpl)); /* 22.2.5 */
18326
18327 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
18328 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
18329 @@ -6169,6 +6176,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
18330 "jmp .Lkvm_vmx_return \n\t"
18331 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
18332 ".Lkvm_vmx_return: "
18333 +
18334 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18335 + "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
18336 + ".Lkvm_vmx_return2: "
18337 +#endif
18338 +
18339 /* Save guest registers, load host registers, keep flags */
18340 "mov %0, %c[wordsize](%%"R"sp) \n\t"
18341 "pop %0 \n\t"
18342 @@ -6217,6 +6230,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
18343 #endif
18344 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
18345 [wordsize]"i"(sizeof(ulong))
18346 +
18347 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18348 + ,[cs]"i"(__KERNEL_CS)
18349 +#endif
18350 +
18351 : "cc", "memory"
18352 , R"ax", R"bx", R"di", R"si"
18353 #ifdef CONFIG_X86_64
18354 @@ -6245,7 +6263,16 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
18355 }
18356 }
18357
18358 - asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
18359 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
18360 +
18361 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18362 + loadsegment(fs, __KERNEL_PERCPU);
18363 +#endif
18364 +
18365 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18366 + __set_fs(current_thread_info()->addr_limit);
18367 +#endif
18368 +
18369 vmx->loaded_vmcs->launched = 1;
18370
18371 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
18372 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
18373 index 4c938da..4ddef65 100644
18374 --- a/arch/x86/kvm/x86.c
18375 +++ b/arch/x86/kvm/x86.c
18376 @@ -1345,8 +1345,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
18377 {
18378 struct kvm *kvm = vcpu->kvm;
18379 int lm = is_long_mode(vcpu);
18380 - u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
18381 - : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
18382 + u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
18383 + : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
18384 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
18385 : kvm->arch.xen_hvm_config.blob_size_32;
18386 u32 page_num = data & ~PAGE_MASK;
18387 @@ -2165,6 +2165,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
18388 if (n < msr_list.nmsrs)
18389 goto out;
18390 r = -EFAULT;
18391 + if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
18392 + goto out;
18393 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
18394 num_msrs_to_save * sizeof(u32)))
18395 goto out;
18396 @@ -2340,15 +2342,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
18397 struct kvm_cpuid2 *cpuid,
18398 struct kvm_cpuid_entry2 __user *entries)
18399 {
18400 - int r;
18401 + int r, i;
18402
18403 r = -E2BIG;
18404 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
18405 goto out;
18406 r = -EFAULT;
18407 - if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
18408 - cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
18409 + if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
18410 goto out;
18411 + for (i = 0; i < cpuid->nent; ++i) {
18412 + struct kvm_cpuid_entry2 cpuid_entry;
18413 + if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
18414 + goto out;
18415 + vcpu->arch.cpuid_entries[i] = cpuid_entry;
18416 + }
18417 vcpu->arch.cpuid_nent = cpuid->nent;
18418 kvm_apic_set_version(vcpu);
18419 kvm_x86_ops->cpuid_update(vcpu);
18420 @@ -2363,15 +2370,19 @@ static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
18421 struct kvm_cpuid2 *cpuid,
18422 struct kvm_cpuid_entry2 __user *entries)
18423 {
18424 - int r;
18425 + int r, i;
18426
18427 r = -E2BIG;
18428 if (cpuid->nent < vcpu->arch.cpuid_nent)
18429 goto out;
18430 r = -EFAULT;
18431 - if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
18432 - vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
18433 + if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
18434 goto out;
18435 + for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
18436 + struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
18437 + if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
18438 + goto out;
18439 + }
18440 return 0;
18441
18442 out:
18443 @@ -2746,7 +2757,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
18444 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
18445 struct kvm_interrupt *irq)
18446 {
18447 - if (irq->irq < 0 || irq->irq >= 256)
18448 + if (irq->irq >= 256)
18449 return -EINVAL;
18450 if (irqchip_in_kernel(vcpu->kvm))
18451 return -ENXIO;
18452 @@ -5162,7 +5173,7 @@ static void kvm_set_mmio_spte_mask(void)
18453 kvm_mmu_set_mmio_spte_mask(mask);
18454 }
18455
18456 -int kvm_arch_init(void *opaque)
18457 +int kvm_arch_init(const void *opaque)
18458 {
18459 int r;
18460 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
18461 diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
18462 index cf4603b..7cdde38 100644
18463 --- a/arch/x86/lguest/boot.c
18464 +++ b/arch/x86/lguest/boot.c
18465 @@ -1195,9 +1195,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
18466 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
18467 * Launcher to reboot us.
18468 */
18469 -static void lguest_restart(char *reason)
18470 +static __noreturn void lguest_restart(char *reason)
18471 {
18472 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
18473 + BUG();
18474 }
18475
18476 /*G:050
18477 diff --git a/arch/x86/lib/atomic64_32.c b/arch/x86/lib/atomic64_32.c
18478 index 042f682..c92afb6 100644
18479 --- a/arch/x86/lib/atomic64_32.c
18480 +++ b/arch/x86/lib/atomic64_32.c
18481 @@ -8,18 +8,30 @@
18482
18483 long long atomic64_read_cx8(long long, const atomic64_t *v);
18484 EXPORT_SYMBOL(atomic64_read_cx8);
18485 +long long atomic64_read_unchecked_cx8(long long, const atomic64_unchecked_t *v);
18486 +EXPORT_SYMBOL(atomic64_read_unchecked_cx8);
18487 long long atomic64_set_cx8(long long, const atomic64_t *v);
18488 EXPORT_SYMBOL(atomic64_set_cx8);
18489 +long long atomic64_set_unchecked_cx8(long long, const atomic64_unchecked_t *v);
18490 +EXPORT_SYMBOL(atomic64_set_unchecked_cx8);
18491 long long atomic64_xchg_cx8(long long, unsigned high);
18492 EXPORT_SYMBOL(atomic64_xchg_cx8);
18493 long long atomic64_add_return_cx8(long long a, atomic64_t *v);
18494 EXPORT_SYMBOL(atomic64_add_return_cx8);
18495 +long long atomic64_add_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
18496 +EXPORT_SYMBOL(atomic64_add_return_unchecked_cx8);
18497 long long atomic64_sub_return_cx8(long long a, atomic64_t *v);
18498 EXPORT_SYMBOL(atomic64_sub_return_cx8);
18499 +long long atomic64_sub_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
18500 +EXPORT_SYMBOL(atomic64_sub_return_unchecked_cx8);
18501 long long atomic64_inc_return_cx8(long long a, atomic64_t *v);
18502 EXPORT_SYMBOL(atomic64_inc_return_cx8);
18503 +long long atomic64_inc_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
18504 +EXPORT_SYMBOL(atomic64_inc_return_unchecked_cx8);
18505 long long atomic64_dec_return_cx8(long long a, atomic64_t *v);
18506 EXPORT_SYMBOL(atomic64_dec_return_cx8);
18507 +long long atomic64_dec_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
18508 +EXPORT_SYMBOL(atomic64_dec_return_unchecked_cx8);
18509 long long atomic64_dec_if_positive_cx8(atomic64_t *v);
18510 EXPORT_SYMBOL(atomic64_dec_if_positive_cx8);
18511 int atomic64_inc_not_zero_cx8(atomic64_t *v);
18512 @@ -30,26 +42,46 @@ EXPORT_SYMBOL(atomic64_add_unless_cx8);
18513 #ifndef CONFIG_X86_CMPXCHG64
18514 long long atomic64_read_386(long long, const atomic64_t *v);
18515 EXPORT_SYMBOL(atomic64_read_386);
18516 +long long atomic64_read_unchecked_386(long long, const atomic64_unchecked_t *v);
18517 +EXPORT_SYMBOL(atomic64_read_unchecked_386);
18518 long long atomic64_set_386(long long, const atomic64_t *v);
18519 EXPORT_SYMBOL(atomic64_set_386);
18520 +long long atomic64_set_unchecked_386(long long, const atomic64_unchecked_t *v);
18521 +EXPORT_SYMBOL(atomic64_set_unchecked_386);
18522 long long atomic64_xchg_386(long long, unsigned high);
18523 EXPORT_SYMBOL(atomic64_xchg_386);
18524 long long atomic64_add_return_386(long long a, atomic64_t *v);
18525 EXPORT_SYMBOL(atomic64_add_return_386);
18526 +long long atomic64_add_return_unchecked_386(long long a, atomic64_unchecked_t *v);
18527 +EXPORT_SYMBOL(atomic64_add_return_unchecked_386);
18528 long long atomic64_sub_return_386(long long a, atomic64_t *v);
18529 EXPORT_SYMBOL(atomic64_sub_return_386);
18530 +long long atomic64_sub_return_unchecked_386(long long a, atomic64_unchecked_t *v);
18531 +EXPORT_SYMBOL(atomic64_sub_return_unchecked_386);
18532 long long atomic64_inc_return_386(long long a, atomic64_t *v);
18533 EXPORT_SYMBOL(atomic64_inc_return_386);
18534 +long long atomic64_inc_return_unchecked_386(long long a, atomic64_unchecked_t *v);
18535 +EXPORT_SYMBOL(atomic64_inc_return_unchecked_386);
18536 long long atomic64_dec_return_386(long long a, atomic64_t *v);
18537 EXPORT_SYMBOL(atomic64_dec_return_386);
18538 +long long atomic64_dec_return_unchecked_386(long long a, atomic64_unchecked_t *v);
18539 +EXPORT_SYMBOL(atomic64_dec_return_unchecked_386);
18540 long long atomic64_add_386(long long a, atomic64_t *v);
18541 EXPORT_SYMBOL(atomic64_add_386);
18542 +long long atomic64_add_unchecked_386(long long a, atomic64_unchecked_t *v);
18543 +EXPORT_SYMBOL(atomic64_add_unchecked_386);
18544 long long atomic64_sub_386(long long a, atomic64_t *v);
18545 EXPORT_SYMBOL(atomic64_sub_386);
18546 +long long atomic64_sub_unchecked_386(long long a, atomic64_unchecked_t *v);
18547 +EXPORT_SYMBOL(atomic64_sub_unchecked_386);
18548 long long atomic64_inc_386(long long a, atomic64_t *v);
18549 EXPORT_SYMBOL(atomic64_inc_386);
18550 +long long atomic64_inc_unchecked_386(long long a, atomic64_unchecked_t *v);
18551 +EXPORT_SYMBOL(atomic64_inc_unchecked_386);
18552 long long atomic64_dec_386(long long a, atomic64_t *v);
18553 EXPORT_SYMBOL(atomic64_dec_386);
18554 +long long atomic64_dec_unchecked_386(long long a, atomic64_unchecked_t *v);
18555 +EXPORT_SYMBOL(atomic64_dec_unchecked_386);
18556 long long atomic64_dec_if_positive_386(atomic64_t *v);
18557 EXPORT_SYMBOL(atomic64_dec_if_positive_386);
18558 int atomic64_inc_not_zero_386(atomic64_t *v);
18559 diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
18560 index e8e7e0d..56fd1b0 100644
18561 --- a/arch/x86/lib/atomic64_386_32.S
18562 +++ b/arch/x86/lib/atomic64_386_32.S
18563 @@ -48,6 +48,10 @@ BEGIN(read)
18564 movl (v), %eax
18565 movl 4(v), %edx
18566 RET_ENDP
18567 +BEGIN(read_unchecked)
18568 + movl (v), %eax
18569 + movl 4(v), %edx
18570 +RET_ENDP
18571 #undef v
18572
18573 #define v %esi
18574 @@ -55,6 +59,10 @@ BEGIN(set)
18575 movl %ebx, (v)
18576 movl %ecx, 4(v)
18577 RET_ENDP
18578 +BEGIN(set_unchecked)
18579 + movl %ebx, (v)
18580 + movl %ecx, 4(v)
18581 +RET_ENDP
18582 #undef v
18583
18584 #define v %esi
18585 @@ -70,6 +78,20 @@ RET_ENDP
18586 BEGIN(add)
18587 addl %eax, (v)
18588 adcl %edx, 4(v)
18589 +
18590 +#ifdef CONFIG_PAX_REFCOUNT
18591 + jno 0f
18592 + subl %eax, (v)
18593 + sbbl %edx, 4(v)
18594 + int $4
18595 +0:
18596 + _ASM_EXTABLE(0b, 0b)
18597 +#endif
18598 +
18599 +RET_ENDP
18600 +BEGIN(add_unchecked)
18601 + addl %eax, (v)
18602 + adcl %edx, 4(v)
18603 RET_ENDP
18604 #undef v
18605
18606 @@ -77,6 +99,24 @@ RET_ENDP
18607 BEGIN(add_return)
18608 addl (v), %eax
18609 adcl 4(v), %edx
18610 +
18611 +#ifdef CONFIG_PAX_REFCOUNT
18612 + into
18613 +1234:
18614 + _ASM_EXTABLE(1234b, 2f)
18615 +#endif
18616 +
18617 + movl %eax, (v)
18618 + movl %edx, 4(v)
18619 +
18620 +#ifdef CONFIG_PAX_REFCOUNT
18621 +2:
18622 +#endif
18623 +
18624 +RET_ENDP
18625 +BEGIN(add_return_unchecked)
18626 + addl (v), %eax
18627 + adcl 4(v), %edx
18628 movl %eax, (v)
18629 movl %edx, 4(v)
18630 RET_ENDP
18631 @@ -86,6 +126,20 @@ RET_ENDP
18632 BEGIN(sub)
18633 subl %eax, (v)
18634 sbbl %edx, 4(v)
18635 +
18636 +#ifdef CONFIG_PAX_REFCOUNT
18637 + jno 0f
18638 + addl %eax, (v)
18639 + adcl %edx, 4(v)
18640 + int $4
18641 +0:
18642 + _ASM_EXTABLE(0b, 0b)
18643 +#endif
18644 +
18645 +RET_ENDP
18646 +BEGIN(sub_unchecked)
18647 + subl %eax, (v)
18648 + sbbl %edx, 4(v)
18649 RET_ENDP
18650 #undef v
18651
18652 @@ -96,6 +150,27 @@ BEGIN(sub_return)
18653 sbbl $0, %edx
18654 addl (v), %eax
18655 adcl 4(v), %edx
18656 +
18657 +#ifdef CONFIG_PAX_REFCOUNT
18658 + into
18659 +1234:
18660 + _ASM_EXTABLE(1234b, 2f)
18661 +#endif
18662 +
18663 + movl %eax, (v)
18664 + movl %edx, 4(v)
18665 +
18666 +#ifdef CONFIG_PAX_REFCOUNT
18667 +2:
18668 +#endif
18669 +
18670 +RET_ENDP
18671 +BEGIN(sub_return_unchecked)
18672 + negl %edx
18673 + negl %eax
18674 + sbbl $0, %edx
18675 + addl (v), %eax
18676 + adcl 4(v), %edx
18677 movl %eax, (v)
18678 movl %edx, 4(v)
18679 RET_ENDP
18680 @@ -105,6 +180,20 @@ RET_ENDP
18681 BEGIN(inc)
18682 addl $1, (v)
18683 adcl $0, 4(v)
18684 +
18685 +#ifdef CONFIG_PAX_REFCOUNT
18686 + jno 0f
18687 + subl $1, (v)
18688 + sbbl $0, 4(v)
18689 + int $4
18690 +0:
18691 + _ASM_EXTABLE(0b, 0b)
18692 +#endif
18693 +
18694 +RET_ENDP
18695 +BEGIN(inc_unchecked)
18696 + addl $1, (v)
18697 + adcl $0, 4(v)
18698 RET_ENDP
18699 #undef v
18700
18701 @@ -114,6 +203,26 @@ BEGIN(inc_return)
18702 movl 4(v), %edx
18703 addl $1, %eax
18704 adcl $0, %edx
18705 +
18706 +#ifdef CONFIG_PAX_REFCOUNT
18707 + into
18708 +1234:
18709 + _ASM_EXTABLE(1234b, 2f)
18710 +#endif
18711 +
18712 + movl %eax, (v)
18713 + movl %edx, 4(v)
18714 +
18715 +#ifdef CONFIG_PAX_REFCOUNT
18716 +2:
18717 +#endif
18718 +
18719 +RET_ENDP
18720 +BEGIN(inc_return_unchecked)
18721 + movl (v), %eax
18722 + movl 4(v), %edx
18723 + addl $1, %eax
18724 + adcl $0, %edx
18725 movl %eax, (v)
18726 movl %edx, 4(v)
18727 RET_ENDP
18728 @@ -123,6 +232,20 @@ RET_ENDP
18729 BEGIN(dec)
18730 subl $1, (v)
18731 sbbl $0, 4(v)
18732 +
18733 +#ifdef CONFIG_PAX_REFCOUNT
18734 + jno 0f
18735 + addl $1, (v)
18736 + adcl $0, 4(v)
18737 + int $4
18738 +0:
18739 + _ASM_EXTABLE(0b, 0b)
18740 +#endif
18741 +
18742 +RET_ENDP
18743 +BEGIN(dec_unchecked)
18744 + subl $1, (v)
18745 + sbbl $0, 4(v)
18746 RET_ENDP
18747 #undef v
18748
18749 @@ -132,6 +255,26 @@ BEGIN(dec_return)
18750 movl 4(v), %edx
18751 subl $1, %eax
18752 sbbl $0, %edx
18753 +
18754 +#ifdef CONFIG_PAX_REFCOUNT
18755 + into
18756 +1234:
18757 + _ASM_EXTABLE(1234b, 2f)
18758 +#endif
18759 +
18760 + movl %eax, (v)
18761 + movl %edx, 4(v)
18762 +
18763 +#ifdef CONFIG_PAX_REFCOUNT
18764 +2:
18765 +#endif
18766 +
18767 +RET_ENDP
18768 +BEGIN(dec_return_unchecked)
18769 + movl (v), %eax
18770 + movl 4(v), %edx
18771 + subl $1, %eax
18772 + sbbl $0, %edx
18773 movl %eax, (v)
18774 movl %edx, 4(v)
18775 RET_ENDP
18776 @@ -143,6 +286,13 @@ BEGIN(add_unless)
18777 adcl %edx, %edi
18778 addl (v), %eax
18779 adcl 4(v), %edx
18780 +
18781 +#ifdef CONFIG_PAX_REFCOUNT
18782 + into
18783 +1234:
18784 + _ASM_EXTABLE(1234b, 2f)
18785 +#endif
18786 +
18787 cmpl %eax, %esi
18788 je 3f
18789 1:
18790 @@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
18791 1:
18792 addl $1, %eax
18793 adcl $0, %edx
18794 +
18795 +#ifdef CONFIG_PAX_REFCOUNT
18796 + into
18797 +1234:
18798 + _ASM_EXTABLE(1234b, 2f)
18799 +#endif
18800 +
18801 movl %eax, (v)
18802 movl %edx, 4(v)
18803 movl $1, %eax
18804 @@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
18805 movl 4(v), %edx
18806 subl $1, %eax
18807 sbbl $0, %edx
18808 +
18809 +#ifdef CONFIG_PAX_REFCOUNT
18810 + into
18811 +1234:
18812 + _ASM_EXTABLE(1234b, 1f)
18813 +#endif
18814 +
18815 js 1f
18816 movl %eax, (v)
18817 movl %edx, 4(v)
18818 diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
18819 index 391a083..d658e9f 100644
18820 --- a/arch/x86/lib/atomic64_cx8_32.S
18821 +++ b/arch/x86/lib/atomic64_cx8_32.S
18822 @@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
18823 CFI_STARTPROC
18824
18825 read64 %ecx
18826 + pax_force_retaddr
18827 ret
18828 CFI_ENDPROC
18829 ENDPROC(atomic64_read_cx8)
18830
18831 +ENTRY(atomic64_read_unchecked_cx8)
18832 + CFI_STARTPROC
18833 +
18834 + read64 %ecx
18835 + pax_force_retaddr
18836 + ret
18837 + CFI_ENDPROC
18838 +ENDPROC(atomic64_read_unchecked_cx8)
18839 +
18840 ENTRY(atomic64_set_cx8)
18841 CFI_STARTPROC
18842
18843 @@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
18844 cmpxchg8b (%esi)
18845 jne 1b
18846
18847 + pax_force_retaddr
18848 ret
18849 CFI_ENDPROC
18850 ENDPROC(atomic64_set_cx8)
18851
18852 +ENTRY(atomic64_set_unchecked_cx8)
18853 + CFI_STARTPROC
18854 +
18855 +1:
18856 +/* we don't need LOCK_PREFIX since aligned 64-bit writes
18857 + * are atomic on 586 and newer */
18858 + cmpxchg8b (%esi)
18859 + jne 1b
18860 +
18861 + pax_force_retaddr
18862 + ret
18863 + CFI_ENDPROC
18864 +ENDPROC(atomic64_set_unchecked_cx8)
18865 +
18866 ENTRY(atomic64_xchg_cx8)
18867 CFI_STARTPROC
18868
18869 @@ -62,12 +87,13 @@ ENTRY(atomic64_xchg_cx8)
18870 cmpxchg8b (%esi)
18871 jne 1b
18872
18873 + pax_force_retaddr
18874 ret
18875 CFI_ENDPROC
18876 ENDPROC(atomic64_xchg_cx8)
18877
18878 -.macro addsub_return func ins insc
18879 -ENTRY(atomic64_\func\()_return_cx8)
18880 +.macro addsub_return func ins insc unchecked=""
18881 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
18882 CFI_STARTPROC
18883 SAVE ebp
18884 SAVE ebx
18885 @@ -84,27 +110,44 @@ ENTRY(atomic64_\func\()_return_cx8)
18886 movl %edx, %ecx
18887 \ins\()l %esi, %ebx
18888 \insc\()l %edi, %ecx
18889 +
18890 +.ifb \unchecked
18891 +#ifdef CONFIG_PAX_REFCOUNT
18892 + into
18893 +2:
18894 + _ASM_EXTABLE(2b, 3f)
18895 +#endif
18896 +.endif
18897 +
18898 LOCK_PREFIX
18899 cmpxchg8b (%ebp)
18900 jne 1b
18901 -
18902 -10:
18903 movl %ebx, %eax
18904 movl %ecx, %edx
18905 +
18906 +.ifb \unchecked
18907 +#ifdef CONFIG_PAX_REFCOUNT
18908 +3:
18909 +#endif
18910 +.endif
18911 +
18912 RESTORE edi
18913 RESTORE esi
18914 RESTORE ebx
18915 RESTORE ebp
18916 + pax_force_retaddr
18917 ret
18918 CFI_ENDPROC
18919 -ENDPROC(atomic64_\func\()_return_cx8)
18920 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
18921 .endm
18922
18923 addsub_return add add adc
18924 addsub_return sub sub sbb
18925 +addsub_return add add adc _unchecked
18926 +addsub_return sub sub sbb _unchecked
18927
18928 -.macro incdec_return func ins insc
18929 -ENTRY(atomic64_\func\()_return_cx8)
18930 +.macro incdec_return func ins insc unchecked
18931 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
18932 CFI_STARTPROC
18933 SAVE ebx
18934
18935 @@ -114,21 +157,39 @@ ENTRY(atomic64_\func\()_return_cx8)
18936 movl %edx, %ecx
18937 \ins\()l $1, %ebx
18938 \insc\()l $0, %ecx
18939 +
18940 +.ifb \unchecked
18941 +#ifdef CONFIG_PAX_REFCOUNT
18942 + into
18943 +2:
18944 + _ASM_EXTABLE(2b, 3f)
18945 +#endif
18946 +.endif
18947 +
18948 LOCK_PREFIX
18949 cmpxchg8b (%esi)
18950 jne 1b
18951
18952 -10:
18953 movl %ebx, %eax
18954 movl %ecx, %edx
18955 +
18956 +.ifb \unchecked
18957 +#ifdef CONFIG_PAX_REFCOUNT
18958 +3:
18959 +#endif
18960 +.endif
18961 +
18962 RESTORE ebx
18963 + pax_force_retaddr
18964 ret
18965 CFI_ENDPROC
18966 -ENDPROC(atomic64_\func\()_return_cx8)
18967 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
18968 .endm
18969
18970 incdec_return inc add adc
18971 incdec_return dec sub sbb
18972 +incdec_return inc add adc _unchecked
18973 +incdec_return dec sub sbb _unchecked
18974
18975 ENTRY(atomic64_dec_if_positive_cx8)
18976 CFI_STARTPROC
18977 @@ -140,6 +201,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
18978 movl %edx, %ecx
18979 subl $1, %ebx
18980 sbb $0, %ecx
18981 +
18982 +#ifdef CONFIG_PAX_REFCOUNT
18983 + into
18984 +1234:
18985 + _ASM_EXTABLE(1234b, 2f)
18986 +#endif
18987 +
18988 js 2f
18989 LOCK_PREFIX
18990 cmpxchg8b (%esi)
18991 @@ -149,6 +217,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
18992 movl %ebx, %eax
18993 movl %ecx, %edx
18994 RESTORE ebx
18995 + pax_force_retaddr
18996 ret
18997 CFI_ENDPROC
18998 ENDPROC(atomic64_dec_if_positive_cx8)
18999 @@ -174,6 +243,13 @@ ENTRY(atomic64_add_unless_cx8)
19000 movl %edx, %ecx
19001 addl %esi, %ebx
19002 adcl %edi, %ecx
19003 +
19004 +#ifdef CONFIG_PAX_REFCOUNT
19005 + into
19006 +1234:
19007 + _ASM_EXTABLE(1234b, 3f)
19008 +#endif
19009 +
19010 LOCK_PREFIX
19011 cmpxchg8b (%ebp)
19012 jne 1b
19013 @@ -184,6 +260,7 @@ ENTRY(atomic64_add_unless_cx8)
19014 CFI_ADJUST_CFA_OFFSET -8
19015 RESTORE ebx
19016 RESTORE ebp
19017 + pax_force_retaddr
19018 ret
19019 4:
19020 cmpl %edx, 4(%esp)
19021 @@ -206,6 +283,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
19022 movl %edx, %ecx
19023 addl $1, %ebx
19024 adcl $0, %ecx
19025 +
19026 +#ifdef CONFIG_PAX_REFCOUNT
19027 + into
19028 +1234:
19029 + _ASM_EXTABLE(1234b, 3f)
19030 +#endif
19031 +
19032 LOCK_PREFIX
19033 cmpxchg8b (%esi)
19034 jne 1b
19035 @@ -213,6 +297,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
19036 movl $1, %eax
19037 3:
19038 RESTORE ebx
19039 + pax_force_retaddr
19040 ret
19041 4:
19042 testl %edx, %edx
19043 diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
19044 index 78d16a5..fbcf666 100644
19045 --- a/arch/x86/lib/checksum_32.S
19046 +++ b/arch/x86/lib/checksum_32.S
19047 @@ -28,7 +28,8 @@
19048 #include <linux/linkage.h>
19049 #include <asm/dwarf2.h>
19050 #include <asm/errno.h>
19051 -
19052 +#include <asm/segment.h>
19053 +
19054 /*
19055 * computes a partial checksum, e.g. for TCP/UDP fragments
19056 */
19057 @@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
19058
19059 #define ARGBASE 16
19060 #define FP 12
19061 -
19062 -ENTRY(csum_partial_copy_generic)
19063 +
19064 +ENTRY(csum_partial_copy_generic_to_user)
19065 CFI_STARTPROC
19066 +
19067 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19068 + pushl_cfi %gs
19069 + popl_cfi %es
19070 + jmp csum_partial_copy_generic
19071 +#endif
19072 +
19073 +ENTRY(csum_partial_copy_generic_from_user)
19074 +
19075 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19076 + pushl_cfi %gs
19077 + popl_cfi %ds
19078 +#endif
19079 +
19080 +ENTRY(csum_partial_copy_generic)
19081 subl $4,%esp
19082 CFI_ADJUST_CFA_OFFSET 4
19083 pushl_cfi %edi
19084 @@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
19085 jmp 4f
19086 SRC(1: movw (%esi), %bx )
19087 addl $2, %esi
19088 -DST( movw %bx, (%edi) )
19089 +DST( movw %bx, %es:(%edi) )
19090 addl $2, %edi
19091 addw %bx, %ax
19092 adcl $0, %eax
19093 @@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
19094 SRC(1: movl (%esi), %ebx )
19095 SRC( movl 4(%esi), %edx )
19096 adcl %ebx, %eax
19097 -DST( movl %ebx, (%edi) )
19098 +DST( movl %ebx, %es:(%edi) )
19099 adcl %edx, %eax
19100 -DST( movl %edx, 4(%edi) )
19101 +DST( movl %edx, %es:4(%edi) )
19102
19103 SRC( movl 8(%esi), %ebx )
19104 SRC( movl 12(%esi), %edx )
19105 adcl %ebx, %eax
19106 -DST( movl %ebx, 8(%edi) )
19107 +DST( movl %ebx, %es:8(%edi) )
19108 adcl %edx, %eax
19109 -DST( movl %edx, 12(%edi) )
19110 +DST( movl %edx, %es:12(%edi) )
19111
19112 SRC( movl 16(%esi), %ebx )
19113 SRC( movl 20(%esi), %edx )
19114 adcl %ebx, %eax
19115 -DST( movl %ebx, 16(%edi) )
19116 +DST( movl %ebx, %es:16(%edi) )
19117 adcl %edx, %eax
19118 -DST( movl %edx, 20(%edi) )
19119 +DST( movl %edx, %es:20(%edi) )
19120
19121 SRC( movl 24(%esi), %ebx )
19122 SRC( movl 28(%esi), %edx )
19123 adcl %ebx, %eax
19124 -DST( movl %ebx, 24(%edi) )
19125 +DST( movl %ebx, %es:24(%edi) )
19126 adcl %edx, %eax
19127 -DST( movl %edx, 28(%edi) )
19128 +DST( movl %edx, %es:28(%edi) )
19129
19130 lea 32(%esi), %esi
19131 lea 32(%edi), %edi
19132 @@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
19133 shrl $2, %edx # This clears CF
19134 SRC(3: movl (%esi), %ebx )
19135 adcl %ebx, %eax
19136 -DST( movl %ebx, (%edi) )
19137 +DST( movl %ebx, %es:(%edi) )
19138 lea 4(%esi), %esi
19139 lea 4(%edi), %edi
19140 dec %edx
19141 @@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
19142 jb 5f
19143 SRC( movw (%esi), %cx )
19144 leal 2(%esi), %esi
19145 -DST( movw %cx, (%edi) )
19146 +DST( movw %cx, %es:(%edi) )
19147 leal 2(%edi), %edi
19148 je 6f
19149 shll $16,%ecx
19150 SRC(5: movb (%esi), %cl )
19151 -DST( movb %cl, (%edi) )
19152 +DST( movb %cl, %es:(%edi) )
19153 6: addl %ecx, %eax
19154 adcl $0, %eax
19155 7:
19156 @@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
19157
19158 6001:
19159 movl ARGBASE+20(%esp), %ebx # src_err_ptr
19160 - movl $-EFAULT, (%ebx)
19161 + movl $-EFAULT, %ss:(%ebx)
19162
19163 # zero the complete destination - computing the rest
19164 # is too much work
19165 @@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
19166
19167 6002:
19168 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
19169 - movl $-EFAULT,(%ebx)
19170 + movl $-EFAULT,%ss:(%ebx)
19171 jmp 5000b
19172
19173 .previous
19174
19175 + pushl_cfi %ss
19176 + popl_cfi %ds
19177 + pushl_cfi %ss
19178 + popl_cfi %es
19179 popl_cfi %ebx
19180 CFI_RESTORE ebx
19181 popl_cfi %esi
19182 @@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
19183 popl_cfi %ecx # equivalent to addl $4,%esp
19184 ret
19185 CFI_ENDPROC
19186 -ENDPROC(csum_partial_copy_generic)
19187 +ENDPROC(csum_partial_copy_generic_to_user)
19188
19189 #else
19190
19191 /* Version for PentiumII/PPro */
19192
19193 #define ROUND1(x) \
19194 + nop; nop; nop; \
19195 SRC(movl x(%esi), %ebx ) ; \
19196 addl %ebx, %eax ; \
19197 - DST(movl %ebx, x(%edi) ) ;
19198 + DST(movl %ebx, %es:x(%edi)) ;
19199
19200 #define ROUND(x) \
19201 + nop; nop; nop; \
19202 SRC(movl x(%esi), %ebx ) ; \
19203 adcl %ebx, %eax ; \
19204 - DST(movl %ebx, x(%edi) ) ;
19205 + DST(movl %ebx, %es:x(%edi)) ;
19206
19207 #define ARGBASE 12
19208 -
19209 -ENTRY(csum_partial_copy_generic)
19210 +
19211 +ENTRY(csum_partial_copy_generic_to_user)
19212 CFI_STARTPROC
19213 +
19214 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19215 + pushl_cfi %gs
19216 + popl_cfi %es
19217 + jmp csum_partial_copy_generic
19218 +#endif
19219 +
19220 +ENTRY(csum_partial_copy_generic_from_user)
19221 +
19222 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19223 + pushl_cfi %gs
19224 + popl_cfi %ds
19225 +#endif
19226 +
19227 +ENTRY(csum_partial_copy_generic)
19228 pushl_cfi %ebx
19229 CFI_REL_OFFSET ebx, 0
19230 pushl_cfi %edi
19231 @@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
19232 subl %ebx, %edi
19233 lea -1(%esi),%edx
19234 andl $-32,%edx
19235 - lea 3f(%ebx,%ebx), %ebx
19236 + lea 3f(%ebx,%ebx,2), %ebx
19237 testl %esi, %esi
19238 jmp *%ebx
19239 1: addl $64,%esi
19240 @@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
19241 jb 5f
19242 SRC( movw (%esi), %dx )
19243 leal 2(%esi), %esi
19244 -DST( movw %dx, (%edi) )
19245 +DST( movw %dx, %es:(%edi) )
19246 leal 2(%edi), %edi
19247 je 6f
19248 shll $16,%edx
19249 5:
19250 SRC( movb (%esi), %dl )
19251 -DST( movb %dl, (%edi) )
19252 +DST( movb %dl, %es:(%edi) )
19253 6: addl %edx, %eax
19254 adcl $0, %eax
19255 7:
19256 .section .fixup, "ax"
19257 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
19258 - movl $-EFAULT, (%ebx)
19259 + movl $-EFAULT, %ss:(%ebx)
19260 # zero the complete destination (computing the rest is too much work)
19261 movl ARGBASE+8(%esp),%edi # dst
19262 movl ARGBASE+12(%esp),%ecx # len
19263 @@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
19264 rep; stosb
19265 jmp 7b
19266 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
19267 - movl $-EFAULT, (%ebx)
19268 + movl $-EFAULT, %ss:(%ebx)
19269 jmp 7b
19270 .previous
19271
19272 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19273 + pushl_cfi %ss
19274 + popl_cfi %ds
19275 + pushl_cfi %ss
19276 + popl_cfi %es
19277 +#endif
19278 +
19279 popl_cfi %esi
19280 CFI_RESTORE esi
19281 popl_cfi %edi
19282 @@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
19283 CFI_RESTORE ebx
19284 ret
19285 CFI_ENDPROC
19286 -ENDPROC(csum_partial_copy_generic)
19287 +ENDPROC(csum_partial_copy_generic_to_user)
19288
19289 #undef ROUND
19290 #undef ROUND1
19291 diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
19292 index f2145cf..cea889d 100644
19293 --- a/arch/x86/lib/clear_page_64.S
19294 +++ b/arch/x86/lib/clear_page_64.S
19295 @@ -11,6 +11,7 @@ ENTRY(clear_page_c)
19296 movl $4096/8,%ecx
19297 xorl %eax,%eax
19298 rep stosq
19299 + pax_force_retaddr
19300 ret
19301 CFI_ENDPROC
19302 ENDPROC(clear_page_c)
19303 @@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
19304 movl $4096,%ecx
19305 xorl %eax,%eax
19306 rep stosb
19307 + pax_force_retaddr
19308 ret
19309 CFI_ENDPROC
19310 ENDPROC(clear_page_c_e)
19311 @@ -43,6 +45,7 @@ ENTRY(clear_page)
19312 leaq 64(%rdi),%rdi
19313 jnz .Lloop
19314 nop
19315 + pax_force_retaddr
19316 ret
19317 CFI_ENDPROC
19318 .Lclear_page_end:
19319 @@ -58,7 +61,7 @@ ENDPROC(clear_page)
19320
19321 #include <asm/cpufeature.h>
19322
19323 - .section .altinstr_replacement,"ax"
19324 + .section .altinstr_replacement,"a"
19325 1: .byte 0xeb /* jmp <disp8> */
19326 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
19327 2: .byte 0xeb /* jmp <disp8> */
19328 diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
19329 index 1e572c5..2a162cd 100644
19330 --- a/arch/x86/lib/cmpxchg16b_emu.S
19331 +++ b/arch/x86/lib/cmpxchg16b_emu.S
19332 @@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
19333
19334 popf
19335 mov $1, %al
19336 + pax_force_retaddr
19337 ret
19338
19339 not_same:
19340 popf
19341 xor %al,%al
19342 + pax_force_retaddr
19343 ret
19344
19345 CFI_ENDPROC
19346 diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
19347 index 01c805b..dccb07f 100644
19348 --- a/arch/x86/lib/copy_page_64.S
19349 +++ b/arch/x86/lib/copy_page_64.S
19350 @@ -9,6 +9,7 @@ copy_page_c:
19351 CFI_STARTPROC
19352 movl $4096/8,%ecx
19353 rep movsq
19354 + pax_force_retaddr
19355 ret
19356 CFI_ENDPROC
19357 ENDPROC(copy_page_c)
19358 @@ -39,7 +40,7 @@ ENTRY(copy_page)
19359 movq 16 (%rsi), %rdx
19360 movq 24 (%rsi), %r8
19361 movq 32 (%rsi), %r9
19362 - movq 40 (%rsi), %r10
19363 + movq 40 (%rsi), %r13
19364 movq 48 (%rsi), %r11
19365 movq 56 (%rsi), %r12
19366
19367 @@ -50,7 +51,7 @@ ENTRY(copy_page)
19368 movq %rdx, 16 (%rdi)
19369 movq %r8, 24 (%rdi)
19370 movq %r9, 32 (%rdi)
19371 - movq %r10, 40 (%rdi)
19372 + movq %r13, 40 (%rdi)
19373 movq %r11, 48 (%rdi)
19374 movq %r12, 56 (%rdi)
19375
19376 @@ -69,7 +70,7 @@ ENTRY(copy_page)
19377 movq 16 (%rsi), %rdx
19378 movq 24 (%rsi), %r8
19379 movq 32 (%rsi), %r9
19380 - movq 40 (%rsi), %r10
19381 + movq 40 (%rsi), %r13
19382 movq 48 (%rsi), %r11
19383 movq 56 (%rsi), %r12
19384
19385 @@ -78,7 +79,7 @@ ENTRY(copy_page)
19386 movq %rdx, 16 (%rdi)
19387 movq %r8, 24 (%rdi)
19388 movq %r9, 32 (%rdi)
19389 - movq %r10, 40 (%rdi)
19390 + movq %r13, 40 (%rdi)
19391 movq %r11, 48 (%rdi)
19392 movq %r12, 56 (%rdi)
19393
19394 @@ -95,6 +96,7 @@ ENTRY(copy_page)
19395 CFI_RESTORE r13
19396 addq $3*8,%rsp
19397 CFI_ADJUST_CFA_OFFSET -3*8
19398 + pax_force_retaddr
19399 ret
19400 .Lcopy_page_end:
19401 CFI_ENDPROC
19402 @@ -105,7 +107,7 @@ ENDPROC(copy_page)
19403
19404 #include <asm/cpufeature.h>
19405
19406 - .section .altinstr_replacement,"ax"
19407 + .section .altinstr_replacement,"a"
19408 1: .byte 0xeb /* jmp <disp8> */
19409 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
19410 2:
19411 diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
19412 index 0248402..821c786 100644
19413 --- a/arch/x86/lib/copy_user_64.S
19414 +++ b/arch/x86/lib/copy_user_64.S
19415 @@ -16,6 +16,7 @@
19416 #include <asm/thread_info.h>
19417 #include <asm/cpufeature.h>
19418 #include <asm/alternative-asm.h>
19419 +#include <asm/pgtable.h>
19420
19421 /*
19422 * By placing feature2 after feature1 in altinstructions section, we logically
19423 @@ -29,7 +30,7 @@
19424 .byte 0xe9 /* 32bit jump */
19425 .long \orig-1f /* by default jump to orig */
19426 1:
19427 - .section .altinstr_replacement,"ax"
19428 + .section .altinstr_replacement,"a"
19429 2: .byte 0xe9 /* near jump with 32bit immediate */
19430 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
19431 3: .byte 0xe9 /* near jump with 32bit immediate */
19432 @@ -71,47 +72,20 @@
19433 #endif
19434 .endm
19435
19436 -/* Standard copy_to_user with segment limit checking */
19437 -ENTRY(_copy_to_user)
19438 - CFI_STARTPROC
19439 - GET_THREAD_INFO(%rax)
19440 - movq %rdi,%rcx
19441 - addq %rdx,%rcx
19442 - jc bad_to_user
19443 - cmpq TI_addr_limit(%rax),%rcx
19444 - ja bad_to_user
19445 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
19446 - copy_user_generic_unrolled,copy_user_generic_string, \
19447 - copy_user_enhanced_fast_string
19448 - CFI_ENDPROC
19449 -ENDPROC(_copy_to_user)
19450 -
19451 -/* Standard copy_from_user with segment limit checking */
19452 -ENTRY(_copy_from_user)
19453 - CFI_STARTPROC
19454 - GET_THREAD_INFO(%rax)
19455 - movq %rsi,%rcx
19456 - addq %rdx,%rcx
19457 - jc bad_from_user
19458 - cmpq TI_addr_limit(%rax),%rcx
19459 - ja bad_from_user
19460 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
19461 - copy_user_generic_unrolled,copy_user_generic_string, \
19462 - copy_user_enhanced_fast_string
19463 - CFI_ENDPROC
19464 -ENDPROC(_copy_from_user)
19465 -
19466 .section .fixup,"ax"
19467 /* must zero dest */
19468 ENTRY(bad_from_user)
19469 bad_from_user:
19470 CFI_STARTPROC
19471 + testl %edx,%edx
19472 + js bad_to_user
19473 movl %edx,%ecx
19474 xorl %eax,%eax
19475 rep
19476 stosb
19477 bad_to_user:
19478 movl %edx,%eax
19479 + pax_force_retaddr
19480 ret
19481 CFI_ENDPROC
19482 ENDPROC(bad_from_user)
19483 @@ -141,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
19484 jz 17f
19485 1: movq (%rsi),%r8
19486 2: movq 1*8(%rsi),%r9
19487 -3: movq 2*8(%rsi),%r10
19488 +3: movq 2*8(%rsi),%rax
19489 4: movq 3*8(%rsi),%r11
19490 5: movq %r8,(%rdi)
19491 6: movq %r9,1*8(%rdi)
19492 -7: movq %r10,2*8(%rdi)
19493 +7: movq %rax,2*8(%rdi)
19494 8: movq %r11,3*8(%rdi)
19495 9: movq 4*8(%rsi),%r8
19496 10: movq 5*8(%rsi),%r9
19497 -11: movq 6*8(%rsi),%r10
19498 +11: movq 6*8(%rsi),%rax
19499 12: movq 7*8(%rsi),%r11
19500 13: movq %r8,4*8(%rdi)
19501 14: movq %r9,5*8(%rdi)
19502 -15: movq %r10,6*8(%rdi)
19503 +15: movq %rax,6*8(%rdi)
19504 16: movq %r11,7*8(%rdi)
19505 leaq 64(%rsi),%rsi
19506 leaq 64(%rdi),%rdi
19507 @@ -179,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
19508 decl %ecx
19509 jnz 21b
19510 23: xor %eax,%eax
19511 + pax_force_retaddr
19512 ret
19513
19514 .section .fixup,"ax"
19515 @@ -251,6 +226,7 @@ ENTRY(copy_user_generic_string)
19516 3: rep
19517 movsb
19518 4: xorl %eax,%eax
19519 + pax_force_retaddr
19520 ret
19521
19522 .section .fixup,"ax"
19523 @@ -287,6 +263,7 @@ ENTRY(copy_user_enhanced_fast_string)
19524 1: rep
19525 movsb
19526 2: xorl %eax,%eax
19527 + pax_force_retaddr
19528 ret
19529
19530 .section .fixup,"ax"
19531 diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
19532 index cb0c112..e3a6895 100644
19533 --- a/arch/x86/lib/copy_user_nocache_64.S
19534 +++ b/arch/x86/lib/copy_user_nocache_64.S
19535 @@ -8,12 +8,14 @@
19536
19537 #include <linux/linkage.h>
19538 #include <asm/dwarf2.h>
19539 +#include <asm/alternative-asm.h>
19540
19541 #define FIX_ALIGNMENT 1
19542
19543 #include <asm/current.h>
19544 #include <asm/asm-offsets.h>
19545 #include <asm/thread_info.h>
19546 +#include <asm/pgtable.h>
19547
19548 .macro ALIGN_DESTINATION
19549 #ifdef FIX_ALIGNMENT
19550 @@ -50,6 +52,15 @@
19551 */
19552 ENTRY(__copy_user_nocache)
19553 CFI_STARTPROC
19554 +
19555 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19556 + mov $PAX_USER_SHADOW_BASE,%rcx
19557 + cmp %rcx,%rsi
19558 + jae 1f
19559 + add %rcx,%rsi
19560 +1:
19561 +#endif
19562 +
19563 cmpl $8,%edx
19564 jb 20f /* less then 8 bytes, go to byte copy loop */
19565 ALIGN_DESTINATION
19566 @@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
19567 jz 17f
19568 1: movq (%rsi),%r8
19569 2: movq 1*8(%rsi),%r9
19570 -3: movq 2*8(%rsi),%r10
19571 +3: movq 2*8(%rsi),%rax
19572 4: movq 3*8(%rsi),%r11
19573 5: movnti %r8,(%rdi)
19574 6: movnti %r9,1*8(%rdi)
19575 -7: movnti %r10,2*8(%rdi)
19576 +7: movnti %rax,2*8(%rdi)
19577 8: movnti %r11,3*8(%rdi)
19578 9: movq 4*8(%rsi),%r8
19579 10: movq 5*8(%rsi),%r9
19580 -11: movq 6*8(%rsi),%r10
19581 +11: movq 6*8(%rsi),%rax
19582 12: movq 7*8(%rsi),%r11
19583 13: movnti %r8,4*8(%rdi)
19584 14: movnti %r9,5*8(%rdi)
19585 -15: movnti %r10,6*8(%rdi)
19586 +15: movnti %rax,6*8(%rdi)
19587 16: movnti %r11,7*8(%rdi)
19588 leaq 64(%rsi),%rsi
19589 leaq 64(%rdi),%rdi
19590 @@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
19591 jnz 21b
19592 23: xorl %eax,%eax
19593 sfence
19594 + pax_force_retaddr
19595 ret
19596
19597 .section .fixup,"ax"
19598 diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
19599 index fb903b7..c92b7f7 100644
19600 --- a/arch/x86/lib/csum-copy_64.S
19601 +++ b/arch/x86/lib/csum-copy_64.S
19602 @@ -8,6 +8,7 @@
19603 #include <linux/linkage.h>
19604 #include <asm/dwarf2.h>
19605 #include <asm/errno.h>
19606 +#include <asm/alternative-asm.h>
19607
19608 /*
19609 * Checksum copy with exception handling.
19610 @@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
19611 CFI_RESTORE rbp
19612 addq $7*8, %rsp
19613 CFI_ADJUST_CFA_OFFSET -7*8
19614 + pax_force_retaddr 0, 1
19615 ret
19616 CFI_RESTORE_STATE
19617
19618 diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
19619 index 459b58a..9570bc7 100644
19620 --- a/arch/x86/lib/csum-wrappers_64.c
19621 +++ b/arch/x86/lib/csum-wrappers_64.c
19622 @@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
19623 len -= 2;
19624 }
19625 }
19626 - isum = csum_partial_copy_generic((__force const void *)src,
19627 +
19628 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19629 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
19630 + src += PAX_USER_SHADOW_BASE;
19631 +#endif
19632 +
19633 + isum = csum_partial_copy_generic((const void __force_kernel *)src,
19634 dst, len, isum, errp, NULL);
19635 if (unlikely(*errp))
19636 goto out_err;
19637 @@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
19638 }
19639
19640 *errp = 0;
19641 - return csum_partial_copy_generic(src, (void __force *)dst,
19642 +
19643 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19644 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
19645 + dst += PAX_USER_SHADOW_BASE;
19646 +#endif
19647 +
19648 + return csum_partial_copy_generic(src, (void __force_kernel *)dst,
19649 len, isum, NULL, errp);
19650 }
19651 EXPORT_SYMBOL(csum_partial_copy_to_user);
19652 diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
19653 index 51f1504..ddac4c1 100644
19654 --- a/arch/x86/lib/getuser.S
19655 +++ b/arch/x86/lib/getuser.S
19656 @@ -33,15 +33,38 @@
19657 #include <asm/asm-offsets.h>
19658 #include <asm/thread_info.h>
19659 #include <asm/asm.h>
19660 +#include <asm/segment.h>
19661 +#include <asm/pgtable.h>
19662 +#include <asm/alternative-asm.h>
19663 +
19664 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19665 +#define __copyuser_seg gs;
19666 +#else
19667 +#define __copyuser_seg
19668 +#endif
19669
19670 .text
19671 ENTRY(__get_user_1)
19672 CFI_STARTPROC
19673 +
19674 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19675 GET_THREAD_INFO(%_ASM_DX)
19676 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19677 jae bad_get_user
19678 -1: movzb (%_ASM_AX),%edx
19679 +
19680 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19681 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19682 + cmp %_ASM_DX,%_ASM_AX
19683 + jae 1234f
19684 + add %_ASM_DX,%_ASM_AX
19685 +1234:
19686 +#endif
19687 +
19688 +#endif
19689 +
19690 +1: __copyuser_seg movzb (%_ASM_AX),%edx
19691 xor %eax,%eax
19692 + pax_force_retaddr
19693 ret
19694 CFI_ENDPROC
19695 ENDPROC(__get_user_1)
19696 @@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
19697 ENTRY(__get_user_2)
19698 CFI_STARTPROC
19699 add $1,%_ASM_AX
19700 +
19701 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19702 jc bad_get_user
19703 GET_THREAD_INFO(%_ASM_DX)
19704 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19705 jae bad_get_user
19706 -2: movzwl -1(%_ASM_AX),%edx
19707 +
19708 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19709 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19710 + cmp %_ASM_DX,%_ASM_AX
19711 + jae 1234f
19712 + add %_ASM_DX,%_ASM_AX
19713 +1234:
19714 +#endif
19715 +
19716 +#endif
19717 +
19718 +2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
19719 xor %eax,%eax
19720 + pax_force_retaddr
19721 ret
19722 CFI_ENDPROC
19723 ENDPROC(__get_user_2)
19724 @@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
19725 ENTRY(__get_user_4)
19726 CFI_STARTPROC
19727 add $3,%_ASM_AX
19728 +
19729 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19730 jc bad_get_user
19731 GET_THREAD_INFO(%_ASM_DX)
19732 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19733 jae bad_get_user
19734 -3: mov -3(%_ASM_AX),%edx
19735 +
19736 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19737 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19738 + cmp %_ASM_DX,%_ASM_AX
19739 + jae 1234f
19740 + add %_ASM_DX,%_ASM_AX
19741 +1234:
19742 +#endif
19743 +
19744 +#endif
19745 +
19746 +3: __copyuser_seg mov -3(%_ASM_AX),%edx
19747 xor %eax,%eax
19748 + pax_force_retaddr
19749 ret
19750 CFI_ENDPROC
19751 ENDPROC(__get_user_4)
19752 @@ -80,8 +131,18 @@ ENTRY(__get_user_8)
19753 GET_THREAD_INFO(%_ASM_DX)
19754 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19755 jae bad_get_user
19756 +
19757 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19758 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19759 + cmp %_ASM_DX,%_ASM_AX
19760 + jae 1234f
19761 + add %_ASM_DX,%_ASM_AX
19762 +1234:
19763 +#endif
19764 +
19765 4: movq -7(%_ASM_AX),%_ASM_DX
19766 xor %eax,%eax
19767 + pax_force_retaddr
19768 ret
19769 CFI_ENDPROC
19770 ENDPROC(__get_user_8)
19771 @@ -91,6 +152,7 @@ bad_get_user:
19772 CFI_STARTPROC
19773 xor %edx,%edx
19774 mov $(-EFAULT),%_ASM_AX
19775 + pax_force_retaddr
19776 ret
19777 CFI_ENDPROC
19778 END(bad_get_user)
19779 diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
19780 index 374562e..a75830b 100644
19781 --- a/arch/x86/lib/insn.c
19782 +++ b/arch/x86/lib/insn.c
19783 @@ -21,6 +21,11 @@
19784 #include <linux/string.h>
19785 #include <asm/inat.h>
19786 #include <asm/insn.h>
19787 +#ifdef __KERNEL__
19788 +#include <asm/pgtable_types.h>
19789 +#else
19790 +#define ktla_ktva(addr) addr
19791 +#endif
19792
19793 /* Verify next sizeof(t) bytes can be on the same instruction */
19794 #define validate_next(t, insn, n) \
19795 @@ -49,8 +54,8 @@
19796 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
19797 {
19798 memset(insn, 0, sizeof(*insn));
19799 - insn->kaddr = kaddr;
19800 - insn->next_byte = kaddr;
19801 + insn->kaddr = ktla_ktva(kaddr);
19802 + insn->next_byte = ktla_ktva(kaddr);
19803 insn->x86_64 = x86_64 ? 1 : 0;
19804 insn->opnd_bytes = 4;
19805 if (x86_64)
19806 diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
19807 index 05a95e7..326f2fa 100644
19808 --- a/arch/x86/lib/iomap_copy_64.S
19809 +++ b/arch/x86/lib/iomap_copy_64.S
19810 @@ -17,6 +17,7 @@
19811
19812 #include <linux/linkage.h>
19813 #include <asm/dwarf2.h>
19814 +#include <asm/alternative-asm.h>
19815
19816 /*
19817 * override generic version in lib/iomap_copy.c
19818 @@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
19819 CFI_STARTPROC
19820 movl %edx,%ecx
19821 rep movsd
19822 + pax_force_retaddr
19823 ret
19824 CFI_ENDPROC
19825 ENDPROC(__iowrite32_copy)
19826 diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
19827 index efbf2a0..8893637 100644
19828 --- a/arch/x86/lib/memcpy_64.S
19829 +++ b/arch/x86/lib/memcpy_64.S
19830 @@ -34,6 +34,7 @@
19831 rep movsq
19832 movl %edx, %ecx
19833 rep movsb
19834 + pax_force_retaddr
19835 ret
19836 .Lmemcpy_e:
19837 .previous
19838 @@ -51,6 +52,7 @@
19839
19840 movl %edx, %ecx
19841 rep movsb
19842 + pax_force_retaddr
19843 ret
19844 .Lmemcpy_e_e:
19845 .previous
19846 @@ -81,13 +83,13 @@ ENTRY(memcpy)
19847 */
19848 movq 0*8(%rsi), %r8
19849 movq 1*8(%rsi), %r9
19850 - movq 2*8(%rsi), %r10
19851 + movq 2*8(%rsi), %rcx
19852 movq 3*8(%rsi), %r11
19853 leaq 4*8(%rsi), %rsi
19854
19855 movq %r8, 0*8(%rdi)
19856 movq %r9, 1*8(%rdi)
19857 - movq %r10, 2*8(%rdi)
19858 + movq %rcx, 2*8(%rdi)
19859 movq %r11, 3*8(%rdi)
19860 leaq 4*8(%rdi), %rdi
19861 jae .Lcopy_forward_loop
19862 @@ -110,12 +112,12 @@ ENTRY(memcpy)
19863 subq $0x20, %rdx
19864 movq -1*8(%rsi), %r8
19865 movq -2*8(%rsi), %r9
19866 - movq -3*8(%rsi), %r10
19867 + movq -3*8(%rsi), %rcx
19868 movq -4*8(%rsi), %r11
19869 leaq -4*8(%rsi), %rsi
19870 movq %r8, -1*8(%rdi)
19871 movq %r9, -2*8(%rdi)
19872 - movq %r10, -3*8(%rdi)
19873 + movq %rcx, -3*8(%rdi)
19874 movq %r11, -4*8(%rdi)
19875 leaq -4*8(%rdi), %rdi
19876 jae .Lcopy_backward_loop
19877 @@ -135,12 +137,13 @@ ENTRY(memcpy)
19878 */
19879 movq 0*8(%rsi), %r8
19880 movq 1*8(%rsi), %r9
19881 - movq -2*8(%rsi, %rdx), %r10
19882 + movq -2*8(%rsi, %rdx), %rcx
19883 movq -1*8(%rsi, %rdx), %r11
19884 movq %r8, 0*8(%rdi)
19885 movq %r9, 1*8(%rdi)
19886 - movq %r10, -2*8(%rdi, %rdx)
19887 + movq %rcx, -2*8(%rdi, %rdx)
19888 movq %r11, -1*8(%rdi, %rdx)
19889 + pax_force_retaddr
19890 retq
19891 .p2align 4
19892 .Lless_16bytes:
19893 @@ -153,6 +156,7 @@ ENTRY(memcpy)
19894 movq -1*8(%rsi, %rdx), %r9
19895 movq %r8, 0*8(%rdi)
19896 movq %r9, -1*8(%rdi, %rdx)
19897 + pax_force_retaddr
19898 retq
19899 .p2align 4
19900 .Lless_8bytes:
19901 @@ -166,6 +170,7 @@ ENTRY(memcpy)
19902 movl -4(%rsi, %rdx), %r8d
19903 movl %ecx, (%rdi)
19904 movl %r8d, -4(%rdi, %rdx)
19905 + pax_force_retaddr
19906 retq
19907 .p2align 4
19908 .Lless_3bytes:
19909 @@ -183,6 +188,7 @@ ENTRY(memcpy)
19910 jnz .Lloop_1
19911
19912 .Lend:
19913 + pax_force_retaddr
19914 retq
19915 CFI_ENDPROC
19916 ENDPROC(memcpy)
19917 diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
19918 index ee16461..c39c199 100644
19919 --- a/arch/x86/lib/memmove_64.S
19920 +++ b/arch/x86/lib/memmove_64.S
19921 @@ -61,13 +61,13 @@ ENTRY(memmove)
19922 5:
19923 sub $0x20, %rdx
19924 movq 0*8(%rsi), %r11
19925 - movq 1*8(%rsi), %r10
19926 + movq 1*8(%rsi), %rcx
19927 movq 2*8(%rsi), %r9
19928 movq 3*8(%rsi), %r8
19929 leaq 4*8(%rsi), %rsi
19930
19931 movq %r11, 0*8(%rdi)
19932 - movq %r10, 1*8(%rdi)
19933 + movq %rcx, 1*8(%rdi)
19934 movq %r9, 2*8(%rdi)
19935 movq %r8, 3*8(%rdi)
19936 leaq 4*8(%rdi), %rdi
19937 @@ -81,10 +81,10 @@ ENTRY(memmove)
19938 4:
19939 movq %rdx, %rcx
19940 movq -8(%rsi, %rdx), %r11
19941 - lea -8(%rdi, %rdx), %r10
19942 + lea -8(%rdi, %rdx), %r9
19943 shrq $3, %rcx
19944 rep movsq
19945 - movq %r11, (%r10)
19946 + movq %r11, (%r9)
19947 jmp 13f
19948 .Lmemmove_end_forward:
19949
19950 @@ -95,14 +95,14 @@ ENTRY(memmove)
19951 7:
19952 movq %rdx, %rcx
19953 movq (%rsi), %r11
19954 - movq %rdi, %r10
19955 + movq %rdi, %r9
19956 leaq -8(%rsi, %rdx), %rsi
19957 leaq -8(%rdi, %rdx), %rdi
19958 shrq $3, %rcx
19959 std
19960 rep movsq
19961 cld
19962 - movq %r11, (%r10)
19963 + movq %r11, (%r9)
19964 jmp 13f
19965
19966 /*
19967 @@ -127,13 +127,13 @@ ENTRY(memmove)
19968 8:
19969 subq $0x20, %rdx
19970 movq -1*8(%rsi), %r11
19971 - movq -2*8(%rsi), %r10
19972 + movq -2*8(%rsi), %rcx
19973 movq -3*8(%rsi), %r9
19974 movq -4*8(%rsi), %r8
19975 leaq -4*8(%rsi), %rsi
19976
19977 movq %r11, -1*8(%rdi)
19978 - movq %r10, -2*8(%rdi)
19979 + movq %rcx, -2*8(%rdi)
19980 movq %r9, -3*8(%rdi)
19981 movq %r8, -4*8(%rdi)
19982 leaq -4*8(%rdi), %rdi
19983 @@ -151,11 +151,11 @@ ENTRY(memmove)
19984 * Move data from 16 bytes to 31 bytes.
19985 */
19986 movq 0*8(%rsi), %r11
19987 - movq 1*8(%rsi), %r10
19988 + movq 1*8(%rsi), %rcx
19989 movq -2*8(%rsi, %rdx), %r9
19990 movq -1*8(%rsi, %rdx), %r8
19991 movq %r11, 0*8(%rdi)
19992 - movq %r10, 1*8(%rdi)
19993 + movq %rcx, 1*8(%rdi)
19994 movq %r9, -2*8(%rdi, %rdx)
19995 movq %r8, -1*8(%rdi, %rdx)
19996 jmp 13f
19997 @@ -167,9 +167,9 @@ ENTRY(memmove)
19998 * Move data from 8 bytes to 15 bytes.
19999 */
20000 movq 0*8(%rsi), %r11
20001 - movq -1*8(%rsi, %rdx), %r10
20002 + movq -1*8(%rsi, %rdx), %r9
20003 movq %r11, 0*8(%rdi)
20004 - movq %r10, -1*8(%rdi, %rdx)
20005 + movq %r9, -1*8(%rdi, %rdx)
20006 jmp 13f
20007 10:
20008 cmpq $4, %rdx
20009 @@ -178,9 +178,9 @@ ENTRY(memmove)
20010 * Move data from 4 bytes to 7 bytes.
20011 */
20012 movl (%rsi), %r11d
20013 - movl -4(%rsi, %rdx), %r10d
20014 + movl -4(%rsi, %rdx), %r9d
20015 movl %r11d, (%rdi)
20016 - movl %r10d, -4(%rdi, %rdx)
20017 + movl %r9d, -4(%rdi, %rdx)
20018 jmp 13f
20019 11:
20020 cmp $2, %rdx
20021 @@ -189,9 +189,9 @@ ENTRY(memmove)
20022 * Move data from 2 bytes to 3 bytes.
20023 */
20024 movw (%rsi), %r11w
20025 - movw -2(%rsi, %rdx), %r10w
20026 + movw -2(%rsi, %rdx), %r9w
20027 movw %r11w, (%rdi)
20028 - movw %r10w, -2(%rdi, %rdx)
20029 + movw %r9w, -2(%rdi, %rdx)
20030 jmp 13f
20031 12:
20032 cmp $1, %rdx
20033 @@ -202,6 +202,7 @@ ENTRY(memmove)
20034 movb (%rsi), %r11b
20035 movb %r11b, (%rdi)
20036 13:
20037 + pax_force_retaddr
20038 retq
20039 CFI_ENDPROC
20040
20041 @@ -210,6 +211,7 @@ ENTRY(memmove)
20042 /* Forward moving data. */
20043 movq %rdx, %rcx
20044 rep movsb
20045 + pax_force_retaddr
20046 retq
20047 .Lmemmove_end_forward_efs:
20048 .previous
20049 diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
20050 index 79bd454..dff325a 100644
20051 --- a/arch/x86/lib/memset_64.S
20052 +++ b/arch/x86/lib/memset_64.S
20053 @@ -31,6 +31,7 @@
20054 movl %r8d,%ecx
20055 rep stosb
20056 movq %r9,%rax
20057 + pax_force_retaddr
20058 ret
20059 .Lmemset_e:
20060 .previous
20061 @@ -53,6 +54,7 @@
20062 movl %edx,%ecx
20063 rep stosb
20064 movq %r9,%rax
20065 + pax_force_retaddr
20066 ret
20067 .Lmemset_e_e:
20068 .previous
20069 @@ -60,13 +62,13 @@
20070 ENTRY(memset)
20071 ENTRY(__memset)
20072 CFI_STARTPROC
20073 - movq %rdi,%r10
20074 movq %rdx,%r11
20075
20076 /* expand byte value */
20077 movzbl %sil,%ecx
20078 movabs $0x0101010101010101,%rax
20079 mul %rcx /* with rax, clobbers rdx */
20080 + movq %rdi,%rdx
20081
20082 /* align dst */
20083 movl %edi,%r9d
20084 @@ -120,7 +122,8 @@ ENTRY(__memset)
20085 jnz .Lloop_1
20086
20087 .Lende:
20088 - movq %r10,%rax
20089 + movq %rdx,%rax
20090 + pax_force_retaddr
20091 ret
20092
20093 CFI_RESTORE_STATE
20094 diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
20095 index c9f2d9b..e7fd2c0 100644
20096 --- a/arch/x86/lib/mmx_32.c
20097 +++ b/arch/x86/lib/mmx_32.c
20098 @@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
20099 {
20100 void *p;
20101 int i;
20102 + unsigned long cr0;
20103
20104 if (unlikely(in_interrupt()))
20105 return __memcpy(to, from, len);
20106 @@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
20107 kernel_fpu_begin();
20108
20109 __asm__ __volatile__ (
20110 - "1: prefetch (%0)\n" /* This set is 28 bytes */
20111 - " prefetch 64(%0)\n"
20112 - " prefetch 128(%0)\n"
20113 - " prefetch 192(%0)\n"
20114 - " prefetch 256(%0)\n"
20115 + "1: prefetch (%1)\n" /* This set is 28 bytes */
20116 + " prefetch 64(%1)\n"
20117 + " prefetch 128(%1)\n"
20118 + " prefetch 192(%1)\n"
20119 + " prefetch 256(%1)\n"
20120 "2: \n"
20121 ".section .fixup, \"ax\"\n"
20122 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20123 + "3: \n"
20124 +
20125 +#ifdef CONFIG_PAX_KERNEXEC
20126 + " movl %%cr0, %0\n"
20127 + " movl %0, %%eax\n"
20128 + " andl $0xFFFEFFFF, %%eax\n"
20129 + " movl %%eax, %%cr0\n"
20130 +#endif
20131 +
20132 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20133 +
20134 +#ifdef CONFIG_PAX_KERNEXEC
20135 + " movl %0, %%cr0\n"
20136 +#endif
20137 +
20138 " jmp 2b\n"
20139 ".previous\n"
20140 _ASM_EXTABLE(1b, 3b)
20141 - : : "r" (from));
20142 + : "=&r" (cr0) : "r" (from) : "ax");
20143
20144 for ( ; i > 5; i--) {
20145 __asm__ __volatile__ (
20146 - "1: prefetch 320(%0)\n"
20147 - "2: movq (%0), %%mm0\n"
20148 - " movq 8(%0), %%mm1\n"
20149 - " movq 16(%0), %%mm2\n"
20150 - " movq 24(%0), %%mm3\n"
20151 - " movq %%mm0, (%1)\n"
20152 - " movq %%mm1, 8(%1)\n"
20153 - " movq %%mm2, 16(%1)\n"
20154 - " movq %%mm3, 24(%1)\n"
20155 - " movq 32(%0), %%mm0\n"
20156 - " movq 40(%0), %%mm1\n"
20157 - " movq 48(%0), %%mm2\n"
20158 - " movq 56(%0), %%mm3\n"
20159 - " movq %%mm0, 32(%1)\n"
20160 - " movq %%mm1, 40(%1)\n"
20161 - " movq %%mm2, 48(%1)\n"
20162 - " movq %%mm3, 56(%1)\n"
20163 + "1: prefetch 320(%1)\n"
20164 + "2: movq (%1), %%mm0\n"
20165 + " movq 8(%1), %%mm1\n"
20166 + " movq 16(%1), %%mm2\n"
20167 + " movq 24(%1), %%mm3\n"
20168 + " movq %%mm0, (%2)\n"
20169 + " movq %%mm1, 8(%2)\n"
20170 + " movq %%mm2, 16(%2)\n"
20171 + " movq %%mm3, 24(%2)\n"
20172 + " movq 32(%1), %%mm0\n"
20173 + " movq 40(%1), %%mm1\n"
20174 + " movq 48(%1), %%mm2\n"
20175 + " movq 56(%1), %%mm3\n"
20176 + " movq %%mm0, 32(%2)\n"
20177 + " movq %%mm1, 40(%2)\n"
20178 + " movq %%mm2, 48(%2)\n"
20179 + " movq %%mm3, 56(%2)\n"
20180 ".section .fixup, \"ax\"\n"
20181 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20182 + "3:\n"
20183 +
20184 +#ifdef CONFIG_PAX_KERNEXEC
20185 + " movl %%cr0, %0\n"
20186 + " movl %0, %%eax\n"
20187 + " andl $0xFFFEFFFF, %%eax\n"
20188 + " movl %%eax, %%cr0\n"
20189 +#endif
20190 +
20191 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20192 +
20193 +#ifdef CONFIG_PAX_KERNEXEC
20194 + " movl %0, %%cr0\n"
20195 +#endif
20196 +
20197 " jmp 2b\n"
20198 ".previous\n"
20199 _ASM_EXTABLE(1b, 3b)
20200 - : : "r" (from), "r" (to) : "memory");
20201 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
20202
20203 from += 64;
20204 to += 64;
20205 @@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
20206 static void fast_copy_page(void *to, void *from)
20207 {
20208 int i;
20209 + unsigned long cr0;
20210
20211 kernel_fpu_begin();
20212
20213 @@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
20214 * but that is for later. -AV
20215 */
20216 __asm__ __volatile__(
20217 - "1: prefetch (%0)\n"
20218 - " prefetch 64(%0)\n"
20219 - " prefetch 128(%0)\n"
20220 - " prefetch 192(%0)\n"
20221 - " prefetch 256(%0)\n"
20222 + "1: prefetch (%1)\n"
20223 + " prefetch 64(%1)\n"
20224 + " prefetch 128(%1)\n"
20225 + " prefetch 192(%1)\n"
20226 + " prefetch 256(%1)\n"
20227 "2: \n"
20228 ".section .fixup, \"ax\"\n"
20229 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20230 + "3: \n"
20231 +
20232 +#ifdef CONFIG_PAX_KERNEXEC
20233 + " movl %%cr0, %0\n"
20234 + " movl %0, %%eax\n"
20235 + " andl $0xFFFEFFFF, %%eax\n"
20236 + " movl %%eax, %%cr0\n"
20237 +#endif
20238 +
20239 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20240 +
20241 +#ifdef CONFIG_PAX_KERNEXEC
20242 + " movl %0, %%cr0\n"
20243 +#endif
20244 +
20245 " jmp 2b\n"
20246 ".previous\n"
20247 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
20248 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
20249
20250 for (i = 0; i < (4096-320)/64; i++) {
20251 __asm__ __volatile__ (
20252 - "1: prefetch 320(%0)\n"
20253 - "2: movq (%0), %%mm0\n"
20254 - " movntq %%mm0, (%1)\n"
20255 - " movq 8(%0), %%mm1\n"
20256 - " movntq %%mm1, 8(%1)\n"
20257 - " movq 16(%0), %%mm2\n"
20258 - " movntq %%mm2, 16(%1)\n"
20259 - " movq 24(%0), %%mm3\n"
20260 - " movntq %%mm3, 24(%1)\n"
20261 - " movq 32(%0), %%mm4\n"
20262 - " movntq %%mm4, 32(%1)\n"
20263 - " movq 40(%0), %%mm5\n"
20264 - " movntq %%mm5, 40(%1)\n"
20265 - " movq 48(%0), %%mm6\n"
20266 - " movntq %%mm6, 48(%1)\n"
20267 - " movq 56(%0), %%mm7\n"
20268 - " movntq %%mm7, 56(%1)\n"
20269 + "1: prefetch 320(%1)\n"
20270 + "2: movq (%1), %%mm0\n"
20271 + " movntq %%mm0, (%2)\n"
20272 + " movq 8(%1), %%mm1\n"
20273 + " movntq %%mm1, 8(%2)\n"
20274 + " movq 16(%1), %%mm2\n"
20275 + " movntq %%mm2, 16(%2)\n"
20276 + " movq 24(%1), %%mm3\n"
20277 + " movntq %%mm3, 24(%2)\n"
20278 + " movq 32(%1), %%mm4\n"
20279 + " movntq %%mm4, 32(%2)\n"
20280 + " movq 40(%1), %%mm5\n"
20281 + " movntq %%mm5, 40(%2)\n"
20282 + " movq 48(%1), %%mm6\n"
20283 + " movntq %%mm6, 48(%2)\n"
20284 + " movq 56(%1), %%mm7\n"
20285 + " movntq %%mm7, 56(%2)\n"
20286 ".section .fixup, \"ax\"\n"
20287 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20288 + "3:\n"
20289 +
20290 +#ifdef CONFIG_PAX_KERNEXEC
20291 + " movl %%cr0, %0\n"
20292 + " movl %0, %%eax\n"
20293 + " andl $0xFFFEFFFF, %%eax\n"
20294 + " movl %%eax, %%cr0\n"
20295 +#endif
20296 +
20297 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20298 +
20299 +#ifdef CONFIG_PAX_KERNEXEC
20300 + " movl %0, %%cr0\n"
20301 +#endif
20302 +
20303 " jmp 2b\n"
20304 ".previous\n"
20305 - _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
20306 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
20307
20308 from += 64;
20309 to += 64;
20310 @@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
20311 static void fast_copy_page(void *to, void *from)
20312 {
20313 int i;
20314 + unsigned long cr0;
20315
20316 kernel_fpu_begin();
20317
20318 __asm__ __volatile__ (
20319 - "1: prefetch (%0)\n"
20320 - " prefetch 64(%0)\n"
20321 - " prefetch 128(%0)\n"
20322 - " prefetch 192(%0)\n"
20323 - " prefetch 256(%0)\n"
20324 + "1: prefetch (%1)\n"
20325 + " prefetch 64(%1)\n"
20326 + " prefetch 128(%1)\n"
20327 + " prefetch 192(%1)\n"
20328 + " prefetch 256(%1)\n"
20329 "2: \n"
20330 ".section .fixup, \"ax\"\n"
20331 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20332 + "3: \n"
20333 +
20334 +#ifdef CONFIG_PAX_KERNEXEC
20335 + " movl %%cr0, %0\n"
20336 + " movl %0, %%eax\n"
20337 + " andl $0xFFFEFFFF, %%eax\n"
20338 + " movl %%eax, %%cr0\n"
20339 +#endif
20340 +
20341 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20342 +
20343 +#ifdef CONFIG_PAX_KERNEXEC
20344 + " movl %0, %%cr0\n"
20345 +#endif
20346 +
20347 " jmp 2b\n"
20348 ".previous\n"
20349 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
20350 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
20351
20352 for (i = 0; i < 4096/64; i++) {
20353 __asm__ __volatile__ (
20354 - "1: prefetch 320(%0)\n"
20355 - "2: movq (%0), %%mm0\n"
20356 - " movq 8(%0), %%mm1\n"
20357 - " movq 16(%0), %%mm2\n"
20358 - " movq 24(%0), %%mm3\n"
20359 - " movq %%mm0, (%1)\n"
20360 - " movq %%mm1, 8(%1)\n"
20361 - " movq %%mm2, 16(%1)\n"
20362 - " movq %%mm3, 24(%1)\n"
20363 - " movq 32(%0), %%mm0\n"
20364 - " movq 40(%0), %%mm1\n"
20365 - " movq 48(%0), %%mm2\n"
20366 - " movq 56(%0), %%mm3\n"
20367 - " movq %%mm0, 32(%1)\n"
20368 - " movq %%mm1, 40(%1)\n"
20369 - " movq %%mm2, 48(%1)\n"
20370 - " movq %%mm3, 56(%1)\n"
20371 + "1: prefetch 320(%1)\n"
20372 + "2: movq (%1), %%mm0\n"
20373 + " movq 8(%1), %%mm1\n"
20374 + " movq 16(%1), %%mm2\n"
20375 + " movq 24(%1), %%mm3\n"
20376 + " movq %%mm0, (%2)\n"
20377 + " movq %%mm1, 8(%2)\n"
20378 + " movq %%mm2, 16(%2)\n"
20379 + " movq %%mm3, 24(%2)\n"
20380 + " movq 32(%1), %%mm0\n"
20381 + " movq 40(%1), %%mm1\n"
20382 + " movq 48(%1), %%mm2\n"
20383 + " movq 56(%1), %%mm3\n"
20384 + " movq %%mm0, 32(%2)\n"
20385 + " movq %%mm1, 40(%2)\n"
20386 + " movq %%mm2, 48(%2)\n"
20387 + " movq %%mm3, 56(%2)\n"
20388 ".section .fixup, \"ax\"\n"
20389 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20390 + "3:\n"
20391 +
20392 +#ifdef CONFIG_PAX_KERNEXEC
20393 + " movl %%cr0, %0\n"
20394 + " movl %0, %%eax\n"
20395 + " andl $0xFFFEFFFF, %%eax\n"
20396 + " movl %%eax, %%cr0\n"
20397 +#endif
20398 +
20399 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20400 +
20401 +#ifdef CONFIG_PAX_KERNEXEC
20402 + " movl %0, %%cr0\n"
20403 +#endif
20404 +
20405 " jmp 2b\n"
20406 ".previous\n"
20407 _ASM_EXTABLE(1b, 3b)
20408 - : : "r" (from), "r" (to) : "memory");
20409 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
20410
20411 from += 64;
20412 to += 64;
20413 diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
20414 index 69fa106..adda88b 100644
20415 --- a/arch/x86/lib/msr-reg.S
20416 +++ b/arch/x86/lib/msr-reg.S
20417 @@ -3,6 +3,7 @@
20418 #include <asm/dwarf2.h>
20419 #include <asm/asm.h>
20420 #include <asm/msr.h>
20421 +#include <asm/alternative-asm.h>
20422
20423 #ifdef CONFIG_X86_64
20424 /*
20425 @@ -16,7 +17,7 @@ ENTRY(native_\op\()_safe_regs)
20426 CFI_STARTPROC
20427 pushq_cfi %rbx
20428 pushq_cfi %rbp
20429 - movq %rdi, %r10 /* Save pointer */
20430 + movq %rdi, %r9 /* Save pointer */
20431 xorl %r11d, %r11d /* Return value */
20432 movl (%rdi), %eax
20433 movl 4(%rdi), %ecx
20434 @@ -27,16 +28,17 @@ ENTRY(native_\op\()_safe_regs)
20435 movl 28(%rdi), %edi
20436 CFI_REMEMBER_STATE
20437 1: \op
20438 -2: movl %eax, (%r10)
20439 +2: movl %eax, (%r9)
20440 movl %r11d, %eax /* Return value */
20441 - movl %ecx, 4(%r10)
20442 - movl %edx, 8(%r10)
20443 - movl %ebx, 12(%r10)
20444 - movl %ebp, 20(%r10)
20445 - movl %esi, 24(%r10)
20446 - movl %edi, 28(%r10)
20447 + movl %ecx, 4(%r9)
20448 + movl %edx, 8(%r9)
20449 + movl %ebx, 12(%r9)
20450 + movl %ebp, 20(%r9)
20451 + movl %esi, 24(%r9)
20452 + movl %edi, 28(%r9)
20453 popq_cfi %rbp
20454 popq_cfi %rbx
20455 + pax_force_retaddr
20456 ret
20457 3:
20458 CFI_RESTORE_STATE
20459 diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
20460 index 36b0d15..d381858 100644
20461 --- a/arch/x86/lib/putuser.S
20462 +++ b/arch/x86/lib/putuser.S
20463 @@ -15,7 +15,9 @@
20464 #include <asm/thread_info.h>
20465 #include <asm/errno.h>
20466 #include <asm/asm.h>
20467 -
20468 +#include <asm/segment.h>
20469 +#include <asm/pgtable.h>
20470 +#include <asm/alternative-asm.h>
20471
20472 /*
20473 * __put_user_X
20474 @@ -29,52 +31,119 @@
20475 * as they get called from within inline assembly.
20476 */
20477
20478 -#define ENTER CFI_STARTPROC ; \
20479 - GET_THREAD_INFO(%_ASM_BX)
20480 -#define EXIT ret ; \
20481 +#define ENTER CFI_STARTPROC
20482 +#define EXIT pax_force_retaddr; ret ; \
20483 CFI_ENDPROC
20484
20485 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20486 +#define _DEST %_ASM_CX,%_ASM_BX
20487 +#else
20488 +#define _DEST %_ASM_CX
20489 +#endif
20490 +
20491 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
20492 +#define __copyuser_seg gs;
20493 +#else
20494 +#define __copyuser_seg
20495 +#endif
20496 +
20497 .text
20498 ENTRY(__put_user_1)
20499 ENTER
20500 +
20501 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20502 + GET_THREAD_INFO(%_ASM_BX)
20503 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
20504 jae bad_put_user
20505 -1: movb %al,(%_ASM_CX)
20506 +
20507 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20508 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20509 + cmp %_ASM_BX,%_ASM_CX
20510 + jb 1234f
20511 + xor %ebx,%ebx
20512 +1234:
20513 +#endif
20514 +
20515 +#endif
20516 +
20517 +1: __copyuser_seg movb %al,(_DEST)
20518 xor %eax,%eax
20519 EXIT
20520 ENDPROC(__put_user_1)
20521
20522 ENTRY(__put_user_2)
20523 ENTER
20524 +
20525 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20526 + GET_THREAD_INFO(%_ASM_BX)
20527 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20528 sub $1,%_ASM_BX
20529 cmp %_ASM_BX,%_ASM_CX
20530 jae bad_put_user
20531 -2: movw %ax,(%_ASM_CX)
20532 +
20533 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20534 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20535 + cmp %_ASM_BX,%_ASM_CX
20536 + jb 1234f
20537 + xor %ebx,%ebx
20538 +1234:
20539 +#endif
20540 +
20541 +#endif
20542 +
20543 +2: __copyuser_seg movw %ax,(_DEST)
20544 xor %eax,%eax
20545 EXIT
20546 ENDPROC(__put_user_2)
20547
20548 ENTRY(__put_user_4)
20549 ENTER
20550 +
20551 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20552 + GET_THREAD_INFO(%_ASM_BX)
20553 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20554 sub $3,%_ASM_BX
20555 cmp %_ASM_BX,%_ASM_CX
20556 jae bad_put_user
20557 -3: movl %eax,(%_ASM_CX)
20558 +
20559 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20560 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20561 + cmp %_ASM_BX,%_ASM_CX
20562 + jb 1234f
20563 + xor %ebx,%ebx
20564 +1234:
20565 +#endif
20566 +
20567 +#endif
20568 +
20569 +3: __copyuser_seg movl %eax,(_DEST)
20570 xor %eax,%eax
20571 EXIT
20572 ENDPROC(__put_user_4)
20573
20574 ENTRY(__put_user_8)
20575 ENTER
20576 +
20577 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20578 + GET_THREAD_INFO(%_ASM_BX)
20579 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20580 sub $7,%_ASM_BX
20581 cmp %_ASM_BX,%_ASM_CX
20582 jae bad_put_user
20583 -4: mov %_ASM_AX,(%_ASM_CX)
20584 +
20585 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20586 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20587 + cmp %_ASM_BX,%_ASM_CX
20588 + jb 1234f
20589 + xor %ebx,%ebx
20590 +1234:
20591 +#endif
20592 +
20593 +#endif
20594 +
20595 +4: __copyuser_seg mov %_ASM_AX,(_DEST)
20596 #ifdef CONFIG_X86_32
20597 -5: movl %edx,4(%_ASM_CX)
20598 +5: __copyuser_seg movl %edx,4(_DEST)
20599 #endif
20600 xor %eax,%eax
20601 EXIT
20602 diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
20603 index 1cad221..de671ee 100644
20604 --- a/arch/x86/lib/rwlock.S
20605 +++ b/arch/x86/lib/rwlock.S
20606 @@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
20607 FRAME
20608 0: LOCK_PREFIX
20609 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
20610 +
20611 +#ifdef CONFIG_PAX_REFCOUNT
20612 + jno 1234f
20613 + LOCK_PREFIX
20614 + WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
20615 + int $4
20616 +1234:
20617 + _ASM_EXTABLE(1234b, 1234b)
20618 +#endif
20619 +
20620 1: rep; nop
20621 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
20622 jne 1b
20623 LOCK_PREFIX
20624 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
20625 +
20626 +#ifdef CONFIG_PAX_REFCOUNT
20627 + jno 1234f
20628 + LOCK_PREFIX
20629 + WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
20630 + int $4
20631 +1234:
20632 + _ASM_EXTABLE(1234b, 1234b)
20633 +#endif
20634 +
20635 jnz 0b
20636 ENDFRAME
20637 + pax_force_retaddr
20638 ret
20639 CFI_ENDPROC
20640 END(__write_lock_failed)
20641 @@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
20642 FRAME
20643 0: LOCK_PREFIX
20644 READ_LOCK_SIZE(inc) (%__lock_ptr)
20645 +
20646 +#ifdef CONFIG_PAX_REFCOUNT
20647 + jno 1234f
20648 + LOCK_PREFIX
20649 + READ_LOCK_SIZE(dec) (%__lock_ptr)
20650 + int $4
20651 +1234:
20652 + _ASM_EXTABLE(1234b, 1234b)
20653 +#endif
20654 +
20655 1: rep; nop
20656 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
20657 js 1b
20658 LOCK_PREFIX
20659 READ_LOCK_SIZE(dec) (%__lock_ptr)
20660 +
20661 +#ifdef CONFIG_PAX_REFCOUNT
20662 + jno 1234f
20663 + LOCK_PREFIX
20664 + READ_LOCK_SIZE(inc) (%__lock_ptr)
20665 + int $4
20666 +1234:
20667 + _ASM_EXTABLE(1234b, 1234b)
20668 +#endif
20669 +
20670 js 0b
20671 ENDFRAME
20672 + pax_force_retaddr
20673 ret
20674 CFI_ENDPROC
20675 END(__read_lock_failed)
20676 diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
20677 index 5dff5f0..cadebf4 100644
20678 --- a/arch/x86/lib/rwsem.S
20679 +++ b/arch/x86/lib/rwsem.S
20680 @@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
20681 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
20682 CFI_RESTORE __ASM_REG(dx)
20683 restore_common_regs
20684 + pax_force_retaddr
20685 ret
20686 CFI_ENDPROC
20687 ENDPROC(call_rwsem_down_read_failed)
20688 @@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
20689 movq %rax,%rdi
20690 call rwsem_down_write_failed
20691 restore_common_regs
20692 + pax_force_retaddr
20693 ret
20694 CFI_ENDPROC
20695 ENDPROC(call_rwsem_down_write_failed)
20696 @@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
20697 movq %rax,%rdi
20698 call rwsem_wake
20699 restore_common_regs
20700 -1: ret
20701 +1: pax_force_retaddr
20702 + ret
20703 CFI_ENDPROC
20704 ENDPROC(call_rwsem_wake)
20705
20706 @@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
20707 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
20708 CFI_RESTORE __ASM_REG(dx)
20709 restore_common_regs
20710 + pax_force_retaddr
20711 ret
20712 CFI_ENDPROC
20713 ENDPROC(call_rwsem_downgrade_wake)
20714 diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
20715 index a63efd6..ccecad8 100644
20716 --- a/arch/x86/lib/thunk_64.S
20717 +++ b/arch/x86/lib/thunk_64.S
20718 @@ -8,6 +8,7 @@
20719 #include <linux/linkage.h>
20720 #include <asm/dwarf2.h>
20721 #include <asm/calling.h>
20722 +#include <asm/alternative-asm.h>
20723
20724 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
20725 .macro THUNK name, func, put_ret_addr_in_rdi=0
20726 @@ -41,5 +42,6 @@
20727 SAVE_ARGS
20728 restore:
20729 RESTORE_ARGS
20730 + pax_force_retaddr
20731 ret
20732 CFI_ENDPROC
20733 diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
20734 index e218d5d..35679b4 100644
20735 --- a/arch/x86/lib/usercopy_32.c
20736 +++ b/arch/x86/lib/usercopy_32.c
20737 @@ -43,7 +43,7 @@ do { \
20738 __asm__ __volatile__( \
20739 " testl %1,%1\n" \
20740 " jz 2f\n" \
20741 - "0: lodsb\n" \
20742 + "0: "__copyuser_seg"lodsb\n" \
20743 " stosb\n" \
20744 " testb %%al,%%al\n" \
20745 " jz 1f\n" \
20746 @@ -128,10 +128,12 @@ do { \
20747 int __d0; \
20748 might_fault(); \
20749 __asm__ __volatile__( \
20750 + __COPYUSER_SET_ES \
20751 "0: rep; stosl\n" \
20752 " movl %2,%0\n" \
20753 "1: rep; stosb\n" \
20754 "2:\n" \
20755 + __COPYUSER_RESTORE_ES \
20756 ".section .fixup,\"ax\"\n" \
20757 "3: lea 0(%2,%0,4),%0\n" \
20758 " jmp 2b\n" \
20759 @@ -200,6 +202,7 @@ long strnlen_user(const char __user *s, long n)
20760 might_fault();
20761
20762 __asm__ __volatile__(
20763 + __COPYUSER_SET_ES
20764 " testl %0, %0\n"
20765 " jz 3f\n"
20766 " andl %0,%%ecx\n"
20767 @@ -208,6 +211,7 @@ long strnlen_user(const char __user *s, long n)
20768 " subl %%ecx,%0\n"
20769 " addl %0,%%eax\n"
20770 "1:\n"
20771 + __COPYUSER_RESTORE_ES
20772 ".section .fixup,\"ax\"\n"
20773 "2: xorl %%eax,%%eax\n"
20774 " jmp 1b\n"
20775 @@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
20776
20777 #ifdef CONFIG_X86_INTEL_USERCOPY
20778 static unsigned long
20779 -__copy_user_intel(void __user *to, const void *from, unsigned long size)
20780 +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
20781 {
20782 int d0, d1;
20783 __asm__ __volatile__(
20784 @@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
20785 " .align 2,0x90\n"
20786 "3: movl 0(%4), %%eax\n"
20787 "4: movl 4(%4), %%edx\n"
20788 - "5: movl %%eax, 0(%3)\n"
20789 - "6: movl %%edx, 4(%3)\n"
20790 + "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
20791 + "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
20792 "7: movl 8(%4), %%eax\n"
20793 "8: movl 12(%4),%%edx\n"
20794 - "9: movl %%eax, 8(%3)\n"
20795 - "10: movl %%edx, 12(%3)\n"
20796 + "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
20797 + "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
20798 "11: movl 16(%4), %%eax\n"
20799 "12: movl 20(%4), %%edx\n"
20800 - "13: movl %%eax, 16(%3)\n"
20801 - "14: movl %%edx, 20(%3)\n"
20802 + "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
20803 + "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
20804 "15: movl 24(%4), %%eax\n"
20805 "16: movl 28(%4), %%edx\n"
20806 - "17: movl %%eax, 24(%3)\n"
20807 - "18: movl %%edx, 28(%3)\n"
20808 + "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
20809 + "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
20810 "19: movl 32(%4), %%eax\n"
20811 "20: movl 36(%4), %%edx\n"
20812 - "21: movl %%eax, 32(%3)\n"
20813 - "22: movl %%edx, 36(%3)\n"
20814 + "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
20815 + "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
20816 "23: movl 40(%4), %%eax\n"
20817 "24: movl 44(%4), %%edx\n"
20818 - "25: movl %%eax, 40(%3)\n"
20819 - "26: movl %%edx, 44(%3)\n"
20820 + "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
20821 + "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
20822 "27: movl 48(%4), %%eax\n"
20823 "28: movl 52(%4), %%edx\n"
20824 - "29: movl %%eax, 48(%3)\n"
20825 - "30: movl %%edx, 52(%3)\n"
20826 + "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
20827 + "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
20828 "31: movl 56(%4), %%eax\n"
20829 "32: movl 60(%4), %%edx\n"
20830 - "33: movl %%eax, 56(%3)\n"
20831 - "34: movl %%edx, 60(%3)\n"
20832 + "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
20833 + "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
20834 " addl $-64, %0\n"
20835 " addl $64, %4\n"
20836 " addl $64, %3\n"
20837 @@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
20838 " shrl $2, %0\n"
20839 " andl $3, %%eax\n"
20840 " cld\n"
20841 + __COPYUSER_SET_ES
20842 "99: rep; movsl\n"
20843 "36: movl %%eax, %0\n"
20844 "37: rep; movsb\n"
20845 "100:\n"
20846 + __COPYUSER_RESTORE_ES
20847 + ".section .fixup,\"ax\"\n"
20848 + "101: lea 0(%%eax,%0,4),%0\n"
20849 + " jmp 100b\n"
20850 + ".previous\n"
20851 + ".section __ex_table,\"a\"\n"
20852 + " .align 4\n"
20853 + " .long 1b,100b\n"
20854 + " .long 2b,100b\n"
20855 + " .long 3b,100b\n"
20856 + " .long 4b,100b\n"
20857 + " .long 5b,100b\n"
20858 + " .long 6b,100b\n"
20859 + " .long 7b,100b\n"
20860 + " .long 8b,100b\n"
20861 + " .long 9b,100b\n"
20862 + " .long 10b,100b\n"
20863 + " .long 11b,100b\n"
20864 + " .long 12b,100b\n"
20865 + " .long 13b,100b\n"
20866 + " .long 14b,100b\n"
20867 + " .long 15b,100b\n"
20868 + " .long 16b,100b\n"
20869 + " .long 17b,100b\n"
20870 + " .long 18b,100b\n"
20871 + " .long 19b,100b\n"
20872 + " .long 20b,100b\n"
20873 + " .long 21b,100b\n"
20874 + " .long 22b,100b\n"
20875 + " .long 23b,100b\n"
20876 + " .long 24b,100b\n"
20877 + " .long 25b,100b\n"
20878 + " .long 26b,100b\n"
20879 + " .long 27b,100b\n"
20880 + " .long 28b,100b\n"
20881 + " .long 29b,100b\n"
20882 + " .long 30b,100b\n"
20883 + " .long 31b,100b\n"
20884 + " .long 32b,100b\n"
20885 + " .long 33b,100b\n"
20886 + " .long 34b,100b\n"
20887 + " .long 35b,100b\n"
20888 + " .long 36b,100b\n"
20889 + " .long 37b,100b\n"
20890 + " .long 99b,101b\n"
20891 + ".previous"
20892 + : "=&c"(size), "=&D" (d0), "=&S" (d1)
20893 + : "1"(to), "2"(from), "0"(size)
20894 + : "eax", "edx", "memory");
20895 + return size;
20896 +}
20897 +
20898 +static unsigned long
20899 +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
20900 +{
20901 + int d0, d1;
20902 + __asm__ __volatile__(
20903 + " .align 2,0x90\n"
20904 + "1: "__copyuser_seg" movl 32(%4), %%eax\n"
20905 + " cmpl $67, %0\n"
20906 + " jbe 3f\n"
20907 + "2: "__copyuser_seg" movl 64(%4), %%eax\n"
20908 + " .align 2,0x90\n"
20909 + "3: "__copyuser_seg" movl 0(%4), %%eax\n"
20910 + "4: "__copyuser_seg" movl 4(%4), %%edx\n"
20911 + "5: movl %%eax, 0(%3)\n"
20912 + "6: movl %%edx, 4(%3)\n"
20913 + "7: "__copyuser_seg" movl 8(%4), %%eax\n"
20914 + "8: "__copyuser_seg" movl 12(%4),%%edx\n"
20915 + "9: movl %%eax, 8(%3)\n"
20916 + "10: movl %%edx, 12(%3)\n"
20917 + "11: "__copyuser_seg" movl 16(%4), %%eax\n"
20918 + "12: "__copyuser_seg" movl 20(%4), %%edx\n"
20919 + "13: movl %%eax, 16(%3)\n"
20920 + "14: movl %%edx, 20(%3)\n"
20921 + "15: "__copyuser_seg" movl 24(%4), %%eax\n"
20922 + "16: "__copyuser_seg" movl 28(%4), %%edx\n"
20923 + "17: movl %%eax, 24(%3)\n"
20924 + "18: movl %%edx, 28(%3)\n"
20925 + "19: "__copyuser_seg" movl 32(%4), %%eax\n"
20926 + "20: "__copyuser_seg" movl 36(%4), %%edx\n"
20927 + "21: movl %%eax, 32(%3)\n"
20928 + "22: movl %%edx, 36(%3)\n"
20929 + "23: "__copyuser_seg" movl 40(%4), %%eax\n"
20930 + "24: "__copyuser_seg" movl 44(%4), %%edx\n"
20931 + "25: movl %%eax, 40(%3)\n"
20932 + "26: movl %%edx, 44(%3)\n"
20933 + "27: "__copyuser_seg" movl 48(%4), %%eax\n"
20934 + "28: "__copyuser_seg" movl 52(%4), %%edx\n"
20935 + "29: movl %%eax, 48(%3)\n"
20936 + "30: movl %%edx, 52(%3)\n"
20937 + "31: "__copyuser_seg" movl 56(%4), %%eax\n"
20938 + "32: "__copyuser_seg" movl 60(%4), %%edx\n"
20939 + "33: movl %%eax, 56(%3)\n"
20940 + "34: movl %%edx, 60(%3)\n"
20941 + " addl $-64, %0\n"
20942 + " addl $64, %4\n"
20943 + " addl $64, %3\n"
20944 + " cmpl $63, %0\n"
20945 + " ja 1b\n"
20946 + "35: movl %0, %%eax\n"
20947 + " shrl $2, %0\n"
20948 + " andl $3, %%eax\n"
20949 + " cld\n"
20950 + "99: rep; "__copyuser_seg" movsl\n"
20951 + "36: movl %%eax, %0\n"
20952 + "37: rep; "__copyuser_seg" movsb\n"
20953 + "100:\n"
20954 ".section .fixup,\"ax\"\n"
20955 "101: lea 0(%%eax,%0,4),%0\n"
20956 " jmp 100b\n"
20957 @@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
20958 int d0, d1;
20959 __asm__ __volatile__(
20960 " .align 2,0x90\n"
20961 - "0: movl 32(%4), %%eax\n"
20962 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
20963 " cmpl $67, %0\n"
20964 " jbe 2f\n"
20965 - "1: movl 64(%4), %%eax\n"
20966 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
20967 " .align 2,0x90\n"
20968 - "2: movl 0(%4), %%eax\n"
20969 - "21: movl 4(%4), %%edx\n"
20970 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
20971 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
20972 " movl %%eax, 0(%3)\n"
20973 " movl %%edx, 4(%3)\n"
20974 - "3: movl 8(%4), %%eax\n"
20975 - "31: movl 12(%4),%%edx\n"
20976 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
20977 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
20978 " movl %%eax, 8(%3)\n"
20979 " movl %%edx, 12(%3)\n"
20980 - "4: movl 16(%4), %%eax\n"
20981 - "41: movl 20(%4), %%edx\n"
20982 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
20983 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
20984 " movl %%eax, 16(%3)\n"
20985 " movl %%edx, 20(%3)\n"
20986 - "10: movl 24(%4), %%eax\n"
20987 - "51: movl 28(%4), %%edx\n"
20988 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
20989 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
20990 " movl %%eax, 24(%3)\n"
20991 " movl %%edx, 28(%3)\n"
20992 - "11: movl 32(%4), %%eax\n"
20993 - "61: movl 36(%4), %%edx\n"
20994 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
20995 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
20996 " movl %%eax, 32(%3)\n"
20997 " movl %%edx, 36(%3)\n"
20998 - "12: movl 40(%4), %%eax\n"
20999 - "71: movl 44(%4), %%edx\n"
21000 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
21001 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
21002 " movl %%eax, 40(%3)\n"
21003 " movl %%edx, 44(%3)\n"
21004 - "13: movl 48(%4), %%eax\n"
21005 - "81: movl 52(%4), %%edx\n"
21006 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
21007 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
21008 " movl %%eax, 48(%3)\n"
21009 " movl %%edx, 52(%3)\n"
21010 - "14: movl 56(%4), %%eax\n"
21011 - "91: movl 60(%4), %%edx\n"
21012 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
21013 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
21014 " movl %%eax, 56(%3)\n"
21015 " movl %%edx, 60(%3)\n"
21016 " addl $-64, %0\n"
21017 @@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
21018 " shrl $2, %0\n"
21019 " andl $3, %%eax\n"
21020 " cld\n"
21021 - "6: rep; movsl\n"
21022 + "6: rep; "__copyuser_seg" movsl\n"
21023 " movl %%eax,%0\n"
21024 - "7: rep; movsb\n"
21025 + "7: rep; "__copyuser_seg" movsb\n"
21026 "8:\n"
21027 ".section .fixup,\"ax\"\n"
21028 "9: lea 0(%%eax,%0,4),%0\n"
21029 @@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
21030
21031 __asm__ __volatile__(
21032 " .align 2,0x90\n"
21033 - "0: movl 32(%4), %%eax\n"
21034 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
21035 " cmpl $67, %0\n"
21036 " jbe 2f\n"
21037 - "1: movl 64(%4), %%eax\n"
21038 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
21039 " .align 2,0x90\n"
21040 - "2: movl 0(%4), %%eax\n"
21041 - "21: movl 4(%4), %%edx\n"
21042 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
21043 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
21044 " movnti %%eax, 0(%3)\n"
21045 " movnti %%edx, 4(%3)\n"
21046 - "3: movl 8(%4), %%eax\n"
21047 - "31: movl 12(%4),%%edx\n"
21048 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
21049 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
21050 " movnti %%eax, 8(%3)\n"
21051 " movnti %%edx, 12(%3)\n"
21052 - "4: movl 16(%4), %%eax\n"
21053 - "41: movl 20(%4), %%edx\n"
21054 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
21055 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
21056 " movnti %%eax, 16(%3)\n"
21057 " movnti %%edx, 20(%3)\n"
21058 - "10: movl 24(%4), %%eax\n"
21059 - "51: movl 28(%4), %%edx\n"
21060 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
21061 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
21062 " movnti %%eax, 24(%3)\n"
21063 " movnti %%edx, 28(%3)\n"
21064 - "11: movl 32(%4), %%eax\n"
21065 - "61: movl 36(%4), %%edx\n"
21066 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
21067 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
21068 " movnti %%eax, 32(%3)\n"
21069 " movnti %%edx, 36(%3)\n"
21070 - "12: movl 40(%4), %%eax\n"
21071 - "71: movl 44(%4), %%edx\n"
21072 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
21073 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
21074 " movnti %%eax, 40(%3)\n"
21075 " movnti %%edx, 44(%3)\n"
21076 - "13: movl 48(%4), %%eax\n"
21077 - "81: movl 52(%4), %%edx\n"
21078 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
21079 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
21080 " movnti %%eax, 48(%3)\n"
21081 " movnti %%edx, 52(%3)\n"
21082 - "14: movl 56(%4), %%eax\n"
21083 - "91: movl 60(%4), %%edx\n"
21084 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
21085 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
21086 " movnti %%eax, 56(%3)\n"
21087 " movnti %%edx, 60(%3)\n"
21088 " addl $-64, %0\n"
21089 @@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
21090 " shrl $2, %0\n"
21091 " andl $3, %%eax\n"
21092 " cld\n"
21093 - "6: rep; movsl\n"
21094 + "6: rep; "__copyuser_seg" movsl\n"
21095 " movl %%eax,%0\n"
21096 - "7: rep; movsb\n"
21097 + "7: rep; "__copyuser_seg" movsb\n"
21098 "8:\n"
21099 ".section .fixup,\"ax\"\n"
21100 "9: lea 0(%%eax,%0,4),%0\n"
21101 @@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
21102
21103 __asm__ __volatile__(
21104 " .align 2,0x90\n"
21105 - "0: movl 32(%4), %%eax\n"
21106 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
21107 " cmpl $67, %0\n"
21108 " jbe 2f\n"
21109 - "1: movl 64(%4), %%eax\n"
21110 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
21111 " .align 2,0x90\n"
21112 - "2: movl 0(%4), %%eax\n"
21113 - "21: movl 4(%4), %%edx\n"
21114 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
21115 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
21116 " movnti %%eax, 0(%3)\n"
21117 " movnti %%edx, 4(%3)\n"
21118 - "3: movl 8(%4), %%eax\n"
21119 - "31: movl 12(%4),%%edx\n"
21120 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
21121 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
21122 " movnti %%eax, 8(%3)\n"
21123 " movnti %%edx, 12(%3)\n"
21124 - "4: movl 16(%4), %%eax\n"
21125 - "41: movl 20(%4), %%edx\n"
21126 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
21127 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
21128 " movnti %%eax, 16(%3)\n"
21129 " movnti %%edx, 20(%3)\n"
21130 - "10: movl 24(%4), %%eax\n"
21131 - "51: movl 28(%4), %%edx\n"
21132 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
21133 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
21134 " movnti %%eax, 24(%3)\n"
21135 " movnti %%edx, 28(%3)\n"
21136 - "11: movl 32(%4), %%eax\n"
21137 - "61: movl 36(%4), %%edx\n"
21138 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
21139 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
21140 " movnti %%eax, 32(%3)\n"
21141 " movnti %%edx, 36(%3)\n"
21142 - "12: movl 40(%4), %%eax\n"
21143 - "71: movl 44(%4), %%edx\n"
21144 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
21145 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
21146 " movnti %%eax, 40(%3)\n"
21147 " movnti %%edx, 44(%3)\n"
21148 - "13: movl 48(%4), %%eax\n"
21149 - "81: movl 52(%4), %%edx\n"
21150 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
21151 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
21152 " movnti %%eax, 48(%3)\n"
21153 " movnti %%edx, 52(%3)\n"
21154 - "14: movl 56(%4), %%eax\n"
21155 - "91: movl 60(%4), %%edx\n"
21156 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
21157 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
21158 " movnti %%eax, 56(%3)\n"
21159 " movnti %%edx, 60(%3)\n"
21160 " addl $-64, %0\n"
21161 @@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
21162 " shrl $2, %0\n"
21163 " andl $3, %%eax\n"
21164 " cld\n"
21165 - "6: rep; movsl\n"
21166 + "6: rep; "__copyuser_seg" movsl\n"
21167 " movl %%eax,%0\n"
21168 - "7: rep; movsb\n"
21169 + "7: rep; "__copyuser_seg" movsb\n"
21170 "8:\n"
21171 ".section .fixup,\"ax\"\n"
21172 "9: lea 0(%%eax,%0,4),%0\n"
21173 @@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
21174 */
21175 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
21176 unsigned long size);
21177 -unsigned long __copy_user_intel(void __user *to, const void *from,
21178 +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
21179 + unsigned long size);
21180 +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
21181 unsigned long size);
21182 unsigned long __copy_user_zeroing_intel_nocache(void *to,
21183 const void __user *from, unsigned long size);
21184 #endif /* CONFIG_X86_INTEL_USERCOPY */
21185
21186 /* Generic arbitrary sized copy. */
21187 -#define __copy_user(to, from, size) \
21188 +#define __copy_user(to, from, size, prefix, set, restore) \
21189 do { \
21190 int __d0, __d1, __d2; \
21191 __asm__ __volatile__( \
21192 + set \
21193 " cmp $7,%0\n" \
21194 " jbe 1f\n" \
21195 " movl %1,%0\n" \
21196 " negl %0\n" \
21197 " andl $7,%0\n" \
21198 " subl %0,%3\n" \
21199 - "4: rep; movsb\n" \
21200 + "4: rep; "prefix"movsb\n" \
21201 " movl %3,%0\n" \
21202 " shrl $2,%0\n" \
21203 " andl $3,%3\n" \
21204 " .align 2,0x90\n" \
21205 - "0: rep; movsl\n" \
21206 + "0: rep; "prefix"movsl\n" \
21207 " movl %3,%0\n" \
21208 - "1: rep; movsb\n" \
21209 + "1: rep; "prefix"movsb\n" \
21210 "2:\n" \
21211 + restore \
21212 ".section .fixup,\"ax\"\n" \
21213 "5: addl %3,%0\n" \
21214 " jmp 2b\n" \
21215 @@ -682,14 +799,14 @@ do { \
21216 " negl %0\n" \
21217 " andl $7,%0\n" \
21218 " subl %0,%3\n" \
21219 - "4: rep; movsb\n" \
21220 + "4: rep; "__copyuser_seg"movsb\n" \
21221 " movl %3,%0\n" \
21222 " shrl $2,%0\n" \
21223 " andl $3,%3\n" \
21224 " .align 2,0x90\n" \
21225 - "0: rep; movsl\n" \
21226 + "0: rep; "__copyuser_seg"movsl\n" \
21227 " movl %3,%0\n" \
21228 - "1: rep; movsb\n" \
21229 + "1: rep; "__copyuser_seg"movsb\n" \
21230 "2:\n" \
21231 ".section .fixup,\"ax\"\n" \
21232 "5: addl %3,%0\n" \
21233 @@ -775,9 +892,9 @@ survive:
21234 }
21235 #endif
21236 if (movsl_is_ok(to, from, n))
21237 - __copy_user(to, from, n);
21238 + __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
21239 else
21240 - n = __copy_user_intel(to, from, n);
21241 + n = __generic_copy_to_user_intel(to, from, n);
21242 return n;
21243 }
21244 EXPORT_SYMBOL(__copy_to_user_ll);
21245 @@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
21246 unsigned long n)
21247 {
21248 if (movsl_is_ok(to, from, n))
21249 - __copy_user(to, from, n);
21250 + __copy_user(to, from, n, __copyuser_seg, "", "");
21251 else
21252 - n = __copy_user_intel((void __user *)to,
21253 - (const void *)from, n);
21254 + n = __generic_copy_from_user_intel(to, from, n);
21255 return n;
21256 }
21257 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
21258 @@ -827,65 +943,50 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
21259 if (n > 64 && cpu_has_xmm2)
21260 n = __copy_user_intel_nocache(to, from, n);
21261 else
21262 - __copy_user(to, from, n);
21263 + __copy_user(to, from, n, __copyuser_seg, "", "");
21264 #else
21265 - __copy_user(to, from, n);
21266 + __copy_user(to, from, n, __copyuser_seg, "", "");
21267 #endif
21268 return n;
21269 }
21270 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
21271
21272 -/**
21273 - * copy_to_user: - Copy a block of data into user space.
21274 - * @to: Destination address, in user space.
21275 - * @from: Source address, in kernel space.
21276 - * @n: Number of bytes to copy.
21277 - *
21278 - * Context: User context only. This function may sleep.
21279 - *
21280 - * Copy data from kernel space to user space.
21281 - *
21282 - * Returns number of bytes that could not be copied.
21283 - * On success, this will be zero.
21284 - */
21285 -unsigned long
21286 -copy_to_user(void __user *to, const void *from, unsigned long n)
21287 -{
21288 - if (access_ok(VERIFY_WRITE, to, n))
21289 - n = __copy_to_user(to, from, n);
21290 - return n;
21291 -}
21292 -EXPORT_SYMBOL(copy_to_user);
21293 -
21294 -/**
21295 - * copy_from_user: - Copy a block of data from user space.
21296 - * @to: Destination address, in kernel space.
21297 - * @from: Source address, in user space.
21298 - * @n: Number of bytes to copy.
21299 - *
21300 - * Context: User context only. This function may sleep.
21301 - *
21302 - * Copy data from user space to kernel space.
21303 - *
21304 - * Returns number of bytes that could not be copied.
21305 - * On success, this will be zero.
21306 - *
21307 - * If some data could not be copied, this function will pad the copied
21308 - * data to the requested size using zero bytes.
21309 - */
21310 -unsigned long
21311 -_copy_from_user(void *to, const void __user *from, unsigned long n)
21312 -{
21313 - if (access_ok(VERIFY_READ, from, n))
21314 - n = __copy_from_user(to, from, n);
21315 - else
21316 - memset(to, 0, n);
21317 - return n;
21318 -}
21319 -EXPORT_SYMBOL(_copy_from_user);
21320 -
21321 void copy_from_user_overflow(void)
21322 {
21323 WARN(1, "Buffer overflow detected!\n");
21324 }
21325 EXPORT_SYMBOL(copy_from_user_overflow);
21326 +
21327 +void copy_to_user_overflow(void)
21328 +{
21329 + WARN(1, "Buffer overflow detected!\n");
21330 +}
21331 +EXPORT_SYMBOL(copy_to_user_overflow);
21332 +
21333 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21334 +void __set_fs(mm_segment_t x)
21335 +{
21336 + switch (x.seg) {
21337 + case 0:
21338 + loadsegment(gs, 0);
21339 + break;
21340 + case TASK_SIZE_MAX:
21341 + loadsegment(gs, __USER_DS);
21342 + break;
21343 + case -1UL:
21344 + loadsegment(gs, __KERNEL_DS);
21345 + break;
21346 + default:
21347 + BUG();
21348 + }
21349 + return;
21350 +}
21351 +EXPORT_SYMBOL(__set_fs);
21352 +
21353 +void set_fs(mm_segment_t x)
21354 +{
21355 + current_thread_info()->addr_limit = x;
21356 + __set_fs(x);
21357 +}
21358 +EXPORT_SYMBOL(set_fs);
21359 +#endif
21360 diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
21361 index b7c2849..8633ad8 100644
21362 --- a/arch/x86/lib/usercopy_64.c
21363 +++ b/arch/x86/lib/usercopy_64.c
21364 @@ -42,6 +42,12 @@ long
21365 __strncpy_from_user(char *dst, const char __user *src, long count)
21366 {
21367 long res;
21368 +
21369 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21370 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
21371 + src += PAX_USER_SHADOW_BASE;
21372 +#endif
21373 +
21374 __do_strncpy_from_user(dst, src, count, res);
21375 return res;
21376 }
21377 @@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
21378 {
21379 long __d0;
21380 might_fault();
21381 +
21382 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21383 + if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
21384 + addr += PAX_USER_SHADOW_BASE;
21385 +#endif
21386 +
21387 /* no memory constraint because it doesn't change any memory gcc knows
21388 about */
21389 asm volatile(
21390 @@ -149,12 +161,20 @@ long strlen_user(const char __user *s)
21391 }
21392 EXPORT_SYMBOL(strlen_user);
21393
21394 -unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
21395 +unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
21396 {
21397 - if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
21398 - return copy_user_generic((__force void *)to, (__force void *)from, len);
21399 - }
21400 - return len;
21401 + if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
21402 +
21403 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21404 + if ((unsigned long)to < PAX_USER_SHADOW_BASE)
21405 + to += PAX_USER_SHADOW_BASE;
21406 + if ((unsigned long)from < PAX_USER_SHADOW_BASE)
21407 + from += PAX_USER_SHADOW_BASE;
21408 +#endif
21409 +
21410 + return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
21411 + }
21412 + return len;
21413 }
21414 EXPORT_SYMBOL(copy_in_user);
21415
21416 @@ -164,7 +184,7 @@ EXPORT_SYMBOL(copy_in_user);
21417 * it is not necessary to optimize tail handling.
21418 */
21419 unsigned long
21420 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
21421 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
21422 {
21423 char c;
21424 unsigned zero_len;
21425 diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
21426 index d0474ad..36e9257 100644
21427 --- a/arch/x86/mm/extable.c
21428 +++ b/arch/x86/mm/extable.c
21429 @@ -8,7 +8,7 @@ int fixup_exception(struct pt_regs *regs)
21430 const struct exception_table_entry *fixup;
21431
21432 #ifdef CONFIG_PNPBIOS
21433 - if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
21434 + if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
21435 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
21436 extern u32 pnp_bios_is_utter_crap;
21437 pnp_bios_is_utter_crap = 1;
21438 diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
21439 index 5db0490..2ddce45 100644
21440 --- a/arch/x86/mm/fault.c
21441 +++ b/arch/x86/mm/fault.c
21442 @@ -13,11 +13,18 @@
21443 #include <linux/perf_event.h> /* perf_sw_event */
21444 #include <linux/hugetlb.h> /* hstate_index_to_shift */
21445 #include <linux/prefetch.h> /* prefetchw */
21446 +#include <linux/unistd.h>
21447 +#include <linux/compiler.h>
21448
21449 #include <asm/traps.h> /* dotraplinkage, ... */
21450 #include <asm/pgalloc.h> /* pgd_*(), ... */
21451 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
21452 #include <asm/fixmap.h> /* VSYSCALL_START */
21453 +#include <asm/tlbflush.h>
21454 +
21455 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21456 +#include <asm/stacktrace.h>
21457 +#endif
21458
21459 /*
21460 * Page fault error code bits:
21461 @@ -55,7 +62,7 @@ static inline int __kprobes notify_page_fault(struct pt_regs *regs)
21462 int ret = 0;
21463
21464 /* kprobe_running() needs smp_processor_id() */
21465 - if (kprobes_built_in() && !user_mode_vm(regs)) {
21466 + if (kprobes_built_in() && !user_mode(regs)) {
21467 preempt_disable();
21468 if (kprobe_running() && kprobe_fault_handler(regs, 14))
21469 ret = 1;
21470 @@ -116,7 +123,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
21471 return !instr_lo || (instr_lo>>1) == 1;
21472 case 0x00:
21473 /* Prefetch instruction is 0x0F0D or 0x0F18 */
21474 - if (probe_kernel_address(instr, opcode))
21475 + if (user_mode(regs)) {
21476 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
21477 + return 0;
21478 + } else if (probe_kernel_address(instr, opcode))
21479 return 0;
21480
21481 *prefetch = (instr_lo == 0xF) &&
21482 @@ -150,7 +160,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
21483 while (instr < max_instr) {
21484 unsigned char opcode;
21485
21486 - if (probe_kernel_address(instr, opcode))
21487 + if (user_mode(regs)) {
21488 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
21489 + break;
21490 + } else if (probe_kernel_address(instr, opcode))
21491 break;
21492
21493 instr++;
21494 @@ -181,6 +194,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
21495 force_sig_info(si_signo, &info, tsk);
21496 }
21497
21498 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21499 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
21500 +#endif
21501 +
21502 +#ifdef CONFIG_PAX_EMUTRAMP
21503 +static int pax_handle_fetch_fault(struct pt_regs *regs);
21504 +#endif
21505 +
21506 +#ifdef CONFIG_PAX_PAGEEXEC
21507 +static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
21508 +{
21509 + pgd_t *pgd;
21510 + pud_t *pud;
21511 + pmd_t *pmd;
21512 +
21513 + pgd = pgd_offset(mm, address);
21514 + if (!pgd_present(*pgd))
21515 + return NULL;
21516 + pud = pud_offset(pgd, address);
21517 + if (!pud_present(*pud))
21518 + return NULL;
21519 + pmd = pmd_offset(pud, address);
21520 + if (!pmd_present(*pmd))
21521 + return NULL;
21522 + return pmd;
21523 +}
21524 +#endif
21525 +
21526 DEFINE_SPINLOCK(pgd_lock);
21527 LIST_HEAD(pgd_list);
21528
21529 @@ -231,10 +272,22 @@ void vmalloc_sync_all(void)
21530 for (address = VMALLOC_START & PMD_MASK;
21531 address >= TASK_SIZE && address < FIXADDR_TOP;
21532 address += PMD_SIZE) {
21533 +
21534 +#ifdef CONFIG_PAX_PER_CPU_PGD
21535 + unsigned long cpu;
21536 +#else
21537 struct page *page;
21538 +#endif
21539
21540 spin_lock(&pgd_lock);
21541 +
21542 +#ifdef CONFIG_PAX_PER_CPU_PGD
21543 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
21544 + pgd_t *pgd = get_cpu_pgd(cpu);
21545 + pmd_t *ret;
21546 +#else
21547 list_for_each_entry(page, &pgd_list, lru) {
21548 + pgd_t *pgd = page_address(page);
21549 spinlock_t *pgt_lock;
21550 pmd_t *ret;
21551
21552 @@ -242,8 +295,13 @@ void vmalloc_sync_all(void)
21553 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
21554
21555 spin_lock(pgt_lock);
21556 - ret = vmalloc_sync_one(page_address(page), address);
21557 +#endif
21558 +
21559 + ret = vmalloc_sync_one(pgd, address);
21560 +
21561 +#ifndef CONFIG_PAX_PER_CPU_PGD
21562 spin_unlock(pgt_lock);
21563 +#endif
21564
21565 if (!ret)
21566 break;
21567 @@ -277,6 +335,11 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
21568 * an interrupt in the middle of a task switch..
21569 */
21570 pgd_paddr = read_cr3();
21571 +
21572 +#ifdef CONFIG_PAX_PER_CPU_PGD
21573 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
21574 +#endif
21575 +
21576 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
21577 if (!pmd_k)
21578 return -1;
21579 @@ -372,7 +435,14 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
21580 * happen within a race in page table update. In the later
21581 * case just flush:
21582 */
21583 +
21584 +#ifdef CONFIG_PAX_PER_CPU_PGD
21585 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
21586 + pgd = pgd_offset_cpu(smp_processor_id(), address);
21587 +#else
21588 pgd = pgd_offset(current->active_mm, address);
21589 +#endif
21590 +
21591 pgd_ref = pgd_offset_k(address);
21592 if (pgd_none(*pgd_ref))
21593 return -1;
21594 @@ -540,7 +610,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
21595 static int is_errata100(struct pt_regs *regs, unsigned long address)
21596 {
21597 #ifdef CONFIG_X86_64
21598 - if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
21599 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
21600 return 1;
21601 #endif
21602 return 0;
21603 @@ -567,7 +637,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
21604 }
21605
21606 static const char nx_warning[] = KERN_CRIT
21607 -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
21608 +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
21609
21610 static void
21611 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
21612 @@ -576,15 +646,26 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
21613 if (!oops_may_print())
21614 return;
21615
21616 - if (error_code & PF_INSTR) {
21617 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
21618 unsigned int level;
21619
21620 pte_t *pte = lookup_address(address, &level);
21621
21622 if (pte && pte_present(*pte) && !pte_exec(*pte))
21623 - printk(nx_warning, current_uid());
21624 + printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
21625 }
21626
21627 +#ifdef CONFIG_PAX_KERNEXEC
21628 + if (init_mm.start_code <= address && address < init_mm.end_code) {
21629 + if (current->signal->curr_ip)
21630 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
21631 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
21632 + else
21633 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
21634 + current->comm, task_pid_nr(current), current_uid(), current_euid());
21635 + }
21636 +#endif
21637 +
21638 printk(KERN_ALERT "BUG: unable to handle kernel ");
21639 if (address < PAGE_SIZE)
21640 printk(KERN_CONT "NULL pointer dereference");
21641 @@ -739,6 +820,21 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
21642 }
21643 #endif
21644
21645 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21646 + if (pax_is_fetch_fault(regs, error_code, address)) {
21647 +
21648 +#ifdef CONFIG_PAX_EMUTRAMP
21649 + switch (pax_handle_fetch_fault(regs)) {
21650 + case 2:
21651 + return;
21652 + }
21653 +#endif
21654 +
21655 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
21656 + do_group_exit(SIGKILL);
21657 + }
21658 +#endif
21659 +
21660 if (unlikely(show_unhandled_signals))
21661 show_signal_msg(regs, error_code, address, tsk);
21662
21663 @@ -835,7 +931,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
21664 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
21665 printk(KERN_ERR
21666 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
21667 - tsk->comm, tsk->pid, address);
21668 + tsk->comm, task_pid_nr(tsk), address);
21669 code = BUS_MCEERR_AR;
21670 }
21671 #endif
21672 @@ -890,6 +986,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
21673 return 1;
21674 }
21675
21676 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
21677 +static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
21678 +{
21679 + pte_t *pte;
21680 + pmd_t *pmd;
21681 + spinlock_t *ptl;
21682 + unsigned char pte_mask;
21683 +
21684 + if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
21685 + !(mm->pax_flags & MF_PAX_PAGEEXEC))
21686 + return 0;
21687 +
21688 + /* PaX: it's our fault, let's handle it if we can */
21689 +
21690 + /* PaX: take a look at read faults before acquiring any locks */
21691 + if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
21692 + /* instruction fetch attempt from a protected page in user mode */
21693 + up_read(&mm->mmap_sem);
21694 +
21695 +#ifdef CONFIG_PAX_EMUTRAMP
21696 + switch (pax_handle_fetch_fault(regs)) {
21697 + case 2:
21698 + return 1;
21699 + }
21700 +#endif
21701 +
21702 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
21703 + do_group_exit(SIGKILL);
21704 + }
21705 +
21706 + pmd = pax_get_pmd(mm, address);
21707 + if (unlikely(!pmd))
21708 + return 0;
21709 +
21710 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
21711 + if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
21712 + pte_unmap_unlock(pte, ptl);
21713 + return 0;
21714 + }
21715 +
21716 + if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
21717 + /* write attempt to a protected page in user mode */
21718 + pte_unmap_unlock(pte, ptl);
21719 + return 0;
21720 + }
21721 +
21722 +#ifdef CONFIG_SMP
21723 + if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
21724 +#else
21725 + if (likely(address > get_limit(regs->cs)))
21726 +#endif
21727 + {
21728 + set_pte(pte, pte_mkread(*pte));
21729 + __flush_tlb_one(address);
21730 + pte_unmap_unlock(pte, ptl);
21731 + up_read(&mm->mmap_sem);
21732 + return 1;
21733 + }
21734 +
21735 + pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
21736 +
21737 + /*
21738 + * PaX: fill DTLB with user rights and retry
21739 + */
21740 + __asm__ __volatile__ (
21741 + "orb %2,(%1)\n"
21742 +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
21743 +/*
21744 + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
21745 + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
21746 + * page fault when examined during a TLB load attempt. this is true not only
21747 + * for PTEs holding a non-present entry but also present entries that will
21748 + * raise a page fault (such as those set up by PaX, or the copy-on-write
21749 + * mechanism). in effect it means that we do *not* need to flush the TLBs
21750 + * for our target pages since their PTEs are simply not in the TLBs at all.
21751 +
21752 + * the best thing in omitting it is that we gain around 15-20% speed in the
21753 + * fast path of the page fault handler and can get rid of tracing since we
21754 + * can no longer flush unintended entries.
21755 + */
21756 + "invlpg (%0)\n"
21757 +#endif
21758 + __copyuser_seg"testb $0,(%0)\n"
21759 + "xorb %3,(%1)\n"
21760 + :
21761 + : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
21762 + : "memory", "cc");
21763 + pte_unmap_unlock(pte, ptl);
21764 + up_read(&mm->mmap_sem);
21765 + return 1;
21766 +}
21767 +#endif
21768 +
21769 /*
21770 * Handle a spurious fault caused by a stale TLB entry.
21771 *
21772 @@ -962,6 +1151,9 @@ int show_unhandled_signals = 1;
21773 static inline int
21774 access_error(unsigned long error_code, struct vm_area_struct *vma)
21775 {
21776 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
21777 + return 1;
21778 +
21779 if (error_code & PF_WRITE) {
21780 /* write, present and write, not present: */
21781 if (unlikely(!(vma->vm_flags & VM_WRITE)))
21782 @@ -995,18 +1187,32 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
21783 {
21784 struct vm_area_struct *vma;
21785 struct task_struct *tsk;
21786 - unsigned long address;
21787 struct mm_struct *mm;
21788 int fault;
21789 int write = error_code & PF_WRITE;
21790 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
21791 (write ? FAULT_FLAG_WRITE : 0);
21792
21793 - tsk = current;
21794 - mm = tsk->mm;
21795 -
21796 /* Get the faulting address: */
21797 - address = read_cr2();
21798 + unsigned long address = read_cr2();
21799 +
21800 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21801 + if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
21802 + if (!search_exception_tables(regs->ip)) {
21803 + bad_area_nosemaphore(regs, error_code, address);
21804 + return;
21805 + }
21806 + if (address < PAX_USER_SHADOW_BASE) {
21807 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
21808 + printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
21809 + show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
21810 + } else
21811 + address -= PAX_USER_SHADOW_BASE;
21812 + }
21813 +#endif
21814 +
21815 + tsk = current;
21816 + mm = tsk->mm;
21817
21818 /*
21819 * Detect and handle instructions that would cause a page fault for
21820 @@ -1067,7 +1273,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
21821 * User-mode registers count as a user access even for any
21822 * potential system fault or CPU buglet:
21823 */
21824 - if (user_mode_vm(regs)) {
21825 + if (user_mode(regs)) {
21826 local_irq_enable();
21827 error_code |= PF_USER;
21828 } else {
21829 @@ -1122,6 +1328,11 @@ retry:
21830 might_sleep();
21831 }
21832
21833 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
21834 + if (pax_handle_pageexec_fault(regs, mm, address, error_code))
21835 + return;
21836 +#endif
21837 +
21838 vma = find_vma(mm, address);
21839 if (unlikely(!vma)) {
21840 bad_area(regs, error_code, address);
21841 @@ -1133,18 +1344,24 @@ retry:
21842 bad_area(regs, error_code, address);
21843 return;
21844 }
21845 - if (error_code & PF_USER) {
21846 - /*
21847 - * Accessing the stack below %sp is always a bug.
21848 - * The large cushion allows instructions like enter
21849 - * and pusha to work. ("enter $65535, $31" pushes
21850 - * 32 pointers and then decrements %sp by 65535.)
21851 - */
21852 - if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
21853 - bad_area(regs, error_code, address);
21854 - return;
21855 - }
21856 + /*
21857 + * Accessing the stack below %sp is always a bug.
21858 + * The large cushion allows instructions like enter
21859 + * and pusha to work. ("enter $65535, $31" pushes
21860 + * 32 pointers and then decrements %sp by 65535.)
21861 + */
21862 + if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
21863 + bad_area(regs, error_code, address);
21864 + return;
21865 }
21866 +
21867 +#ifdef CONFIG_PAX_SEGMEXEC
21868 + if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
21869 + bad_area(regs, error_code, address);
21870 + return;
21871 + }
21872 +#endif
21873 +
21874 if (unlikely(expand_stack(vma, address))) {
21875 bad_area(regs, error_code, address);
21876 return;
21877 @@ -1199,3 +1416,292 @@ good_area:
21878
21879 up_read(&mm->mmap_sem);
21880 }
21881 +
21882 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21883 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
21884 +{
21885 + struct mm_struct *mm = current->mm;
21886 + unsigned long ip = regs->ip;
21887 +
21888 + if (v8086_mode(regs))
21889 + ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
21890 +
21891 +#ifdef CONFIG_PAX_PAGEEXEC
21892 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
21893 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
21894 + return true;
21895 + if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
21896 + return true;
21897 + return false;
21898 + }
21899 +#endif
21900 +
21901 +#ifdef CONFIG_PAX_SEGMEXEC
21902 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
21903 + if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
21904 + return true;
21905 + return false;
21906 + }
21907 +#endif
21908 +
21909 + return false;
21910 +}
21911 +#endif
21912 +
21913 +#ifdef CONFIG_PAX_EMUTRAMP
21914 +static int pax_handle_fetch_fault_32(struct pt_regs *regs)
21915 +{
21916 + int err;
21917 +
21918 + do { /* PaX: libffi trampoline emulation */
21919 + unsigned char mov, jmp;
21920 + unsigned int addr1, addr2;
21921 +
21922 +#ifdef CONFIG_X86_64
21923 + if ((regs->ip + 9) >> 32)
21924 + break;
21925 +#endif
21926 +
21927 + err = get_user(mov, (unsigned char __user *)regs->ip);
21928 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
21929 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
21930 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
21931 +
21932 + if (err)
21933 + break;
21934 +
21935 + if (mov == 0xB8 && jmp == 0xE9) {
21936 + regs->ax = addr1;
21937 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
21938 + return 2;
21939 + }
21940 + } while (0);
21941 +
21942 + do { /* PaX: gcc trampoline emulation #1 */
21943 + unsigned char mov1, mov2;
21944 + unsigned short jmp;
21945 + unsigned int addr1, addr2;
21946 +
21947 +#ifdef CONFIG_X86_64
21948 + if ((regs->ip + 11) >> 32)
21949 + break;
21950 +#endif
21951 +
21952 + err = get_user(mov1, (unsigned char __user *)regs->ip);
21953 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
21954 + err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
21955 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
21956 + err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
21957 +
21958 + if (err)
21959 + break;
21960 +
21961 + if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
21962 + regs->cx = addr1;
21963 + regs->ax = addr2;
21964 + regs->ip = addr2;
21965 + return 2;
21966 + }
21967 + } while (0);
21968 +
21969 + do { /* PaX: gcc trampoline emulation #2 */
21970 + unsigned char mov, jmp;
21971 + unsigned int addr1, addr2;
21972 +
21973 +#ifdef CONFIG_X86_64
21974 + if ((regs->ip + 9) >> 32)
21975 + break;
21976 +#endif
21977 +
21978 + err = get_user(mov, (unsigned char __user *)regs->ip);
21979 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
21980 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
21981 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
21982 +
21983 + if (err)
21984 + break;
21985 +
21986 + if (mov == 0xB9 && jmp == 0xE9) {
21987 + regs->cx = addr1;
21988 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
21989 + return 2;
21990 + }
21991 + } while (0);
21992 +
21993 + return 1; /* PaX in action */
21994 +}
21995 +
21996 +#ifdef CONFIG_X86_64
21997 +static int pax_handle_fetch_fault_64(struct pt_regs *regs)
21998 +{
21999 + int err;
22000 +
22001 + do { /* PaX: libffi trampoline emulation */
22002 + unsigned short mov1, mov2, jmp1;
22003 + unsigned char stcclc, jmp2;
22004 + unsigned long addr1, addr2;
22005 +
22006 + err = get_user(mov1, (unsigned short __user *)regs->ip);
22007 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
22008 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
22009 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
22010 + err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
22011 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
22012 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
22013 +
22014 + if (err)
22015 + break;
22016 +
22017 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
22018 + regs->r11 = addr1;
22019 + regs->r10 = addr2;
22020 + if (stcclc == 0xF8)
22021 + regs->flags &= ~X86_EFLAGS_CF;
22022 + else
22023 + regs->flags |= X86_EFLAGS_CF;
22024 + regs->ip = addr1;
22025 + return 2;
22026 + }
22027 + } while (0);
22028 +
22029 + do { /* PaX: gcc trampoline emulation #1 */
22030 + unsigned short mov1, mov2, jmp1;
22031 + unsigned char jmp2;
22032 + unsigned int addr1;
22033 + unsigned long addr2;
22034 +
22035 + err = get_user(mov1, (unsigned short __user *)regs->ip);
22036 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
22037 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
22038 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
22039 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
22040 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
22041 +
22042 + if (err)
22043 + break;
22044 +
22045 + if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
22046 + regs->r11 = addr1;
22047 + regs->r10 = addr2;
22048 + regs->ip = addr1;
22049 + return 2;
22050 + }
22051 + } while (0);
22052 +
22053 + do { /* PaX: gcc trampoline emulation #2 */
22054 + unsigned short mov1, mov2, jmp1;
22055 + unsigned char jmp2;
22056 + unsigned long addr1, addr2;
22057 +
22058 + err = get_user(mov1, (unsigned short __user *)regs->ip);
22059 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
22060 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
22061 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
22062 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
22063 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
22064 +
22065 + if (err)
22066 + break;
22067 +
22068 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
22069 + regs->r11 = addr1;
22070 + regs->r10 = addr2;
22071 + regs->ip = addr1;
22072 + return 2;
22073 + }
22074 + } while (0);
22075 +
22076 + return 1; /* PaX in action */
22077 +}
22078 +#endif
22079 +
22080 +/*
22081 + * PaX: decide what to do with offenders (regs->ip = fault address)
22082 + *
22083 + * returns 1 when task should be killed
22084 + * 2 when gcc trampoline was detected
22085 + */
22086 +static int pax_handle_fetch_fault(struct pt_regs *regs)
22087 +{
22088 + if (v8086_mode(regs))
22089 + return 1;
22090 +
22091 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
22092 + return 1;
22093 +
22094 +#ifdef CONFIG_X86_32
22095 + return pax_handle_fetch_fault_32(regs);
22096 +#else
22097 + if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
22098 + return pax_handle_fetch_fault_32(regs);
22099 + else
22100 + return pax_handle_fetch_fault_64(regs);
22101 +#endif
22102 +}
22103 +#endif
22104 +
22105 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
22106 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
22107 +{
22108 + long i;
22109 +
22110 + printk(KERN_ERR "PAX: bytes at PC: ");
22111 + for (i = 0; i < 20; i++) {
22112 + unsigned char c;
22113 + if (get_user(c, (unsigned char __force_user *)pc+i))
22114 + printk(KERN_CONT "?? ");
22115 + else
22116 + printk(KERN_CONT "%02x ", c);
22117 + }
22118 + printk("\n");
22119 +
22120 + printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
22121 + for (i = -1; i < 80 / (long)sizeof(long); i++) {
22122 + unsigned long c;
22123 + if (get_user(c, (unsigned long __force_user *)sp+i)) {
22124 +#ifdef CONFIG_X86_32
22125 + printk(KERN_CONT "???????? ");
22126 +#else
22127 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
22128 + printk(KERN_CONT "???????? ???????? ");
22129 + else
22130 + printk(KERN_CONT "???????????????? ");
22131 +#endif
22132 + } else {
22133 +#ifdef CONFIG_X86_64
22134 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
22135 + printk(KERN_CONT "%08x ", (unsigned int)c);
22136 + printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
22137 + } else
22138 +#endif
22139 + printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
22140 + }
22141 + }
22142 + printk("\n");
22143 +}
22144 +#endif
22145 +
22146 +/**
22147 + * probe_kernel_write(): safely attempt to write to a location
22148 + * @dst: address to write to
22149 + * @src: pointer to the data that shall be written
22150 + * @size: size of the data chunk
22151 + *
22152 + * Safely write to address @dst from the buffer at @src. If a kernel fault
22153 + * happens, handle that and return -EFAULT.
22154 + */
22155 +long notrace probe_kernel_write(void *dst, const void *src, size_t size)
22156 +{
22157 + long ret;
22158 + mm_segment_t old_fs = get_fs();
22159 +
22160 + set_fs(KERNEL_DS);
22161 + pagefault_disable();
22162 + pax_open_kernel();
22163 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
22164 + pax_close_kernel();
22165 + pagefault_enable();
22166 + set_fs(old_fs);
22167 +
22168 + return ret ? -EFAULT : 0;
22169 +}
22170 diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
22171 index dd74e46..7d26398 100644
22172 --- a/arch/x86/mm/gup.c
22173 +++ b/arch/x86/mm/gup.c
22174 @@ -255,7 +255,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
22175 addr = start;
22176 len = (unsigned long) nr_pages << PAGE_SHIFT;
22177 end = start + len;
22178 - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
22179 + if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
22180 (void __user *)start, len)))
22181 return 0;
22182
22183 diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
22184 index f4f29b1..5cac4fb 100644
22185 --- a/arch/x86/mm/highmem_32.c
22186 +++ b/arch/x86/mm/highmem_32.c
22187 @@ -44,7 +44,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
22188 idx = type + KM_TYPE_NR*smp_processor_id();
22189 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
22190 BUG_ON(!pte_none(*(kmap_pte-idx)));
22191 +
22192 + pax_open_kernel();
22193 set_pte(kmap_pte-idx, mk_pte(page, prot));
22194 + pax_close_kernel();
22195 +
22196 arch_flush_lazy_mmu_mode();
22197
22198 return (void *)vaddr;
22199 diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
22200 index f581a18..29efd37 100644
22201 --- a/arch/x86/mm/hugetlbpage.c
22202 +++ b/arch/x86/mm/hugetlbpage.c
22203 @@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
22204 struct hstate *h = hstate_file(file);
22205 struct mm_struct *mm = current->mm;
22206 struct vm_area_struct *vma;
22207 - unsigned long start_addr;
22208 + unsigned long start_addr, pax_task_size = TASK_SIZE;
22209 +
22210 +#ifdef CONFIG_PAX_SEGMEXEC
22211 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
22212 + pax_task_size = SEGMEXEC_TASK_SIZE;
22213 +#endif
22214 +
22215 + pax_task_size -= PAGE_SIZE;
22216
22217 if (len > mm->cached_hole_size) {
22218 - start_addr = mm->free_area_cache;
22219 + start_addr = mm->free_area_cache;
22220 } else {
22221 - start_addr = TASK_UNMAPPED_BASE;
22222 - mm->cached_hole_size = 0;
22223 + start_addr = mm->mmap_base;
22224 + mm->cached_hole_size = 0;
22225 }
22226
22227 full_search:
22228 @@ -280,26 +287,27 @@ full_search:
22229
22230 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
22231 /* At this point: (!vma || addr < vma->vm_end). */
22232 - if (TASK_SIZE - len < addr) {
22233 + if (pax_task_size - len < addr) {
22234 /*
22235 * Start a new search - just in case we missed
22236 * some holes.
22237 */
22238 - if (start_addr != TASK_UNMAPPED_BASE) {
22239 - start_addr = TASK_UNMAPPED_BASE;
22240 + if (start_addr != mm->mmap_base) {
22241 + start_addr = mm->mmap_base;
22242 mm->cached_hole_size = 0;
22243 goto full_search;
22244 }
22245 return -ENOMEM;
22246 }
22247 - if (!vma || addr + len <= vma->vm_start) {
22248 - mm->free_area_cache = addr + len;
22249 - return addr;
22250 - }
22251 + if (check_heap_stack_gap(vma, addr, len))
22252 + break;
22253 if (addr + mm->cached_hole_size < vma->vm_start)
22254 mm->cached_hole_size = vma->vm_start - addr;
22255 addr = ALIGN(vma->vm_end, huge_page_size(h));
22256 }
22257 +
22258 + mm->free_area_cache = addr + len;
22259 + return addr;
22260 }
22261
22262 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
22263 @@ -308,10 +316,9 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
22264 {
22265 struct hstate *h = hstate_file(file);
22266 struct mm_struct *mm = current->mm;
22267 - struct vm_area_struct *vma, *prev_vma;
22268 - unsigned long base = mm->mmap_base, addr = addr0;
22269 + struct vm_area_struct *vma;
22270 + unsigned long base = mm->mmap_base, addr;
22271 unsigned long largest_hole = mm->cached_hole_size;
22272 - int first_time = 1;
22273
22274 /* don't allow allocations above current base */
22275 if (mm->free_area_cache > base)
22276 @@ -321,64 +328,63 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
22277 largest_hole = 0;
22278 mm->free_area_cache = base;
22279 }
22280 -try_again:
22281 +
22282 /* make sure it can fit in the remaining address space */
22283 if (mm->free_area_cache < len)
22284 goto fail;
22285
22286 /* either no address requested or can't fit in requested address hole */
22287 - addr = (mm->free_area_cache - len) & huge_page_mask(h);
22288 + addr = (mm->free_area_cache - len);
22289 do {
22290 + addr &= huge_page_mask(h);
22291 + vma = find_vma(mm, addr);
22292 /*
22293 * Lookup failure means no vma is above this address,
22294 * i.e. return with success:
22295 - */
22296 - if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
22297 - return addr;
22298 -
22299 - /*
22300 * new region fits between prev_vma->vm_end and
22301 * vma->vm_start, use it:
22302 */
22303 - if (addr + len <= vma->vm_start &&
22304 - (!prev_vma || (addr >= prev_vma->vm_end))) {
22305 + if (check_heap_stack_gap(vma, addr, len)) {
22306 /* remember the address as a hint for next time */
22307 - mm->cached_hole_size = largest_hole;
22308 - return (mm->free_area_cache = addr);
22309 - } else {
22310 - /* pull free_area_cache down to the first hole */
22311 - if (mm->free_area_cache == vma->vm_end) {
22312 - mm->free_area_cache = vma->vm_start;
22313 - mm->cached_hole_size = largest_hole;
22314 - }
22315 + mm->cached_hole_size = largest_hole;
22316 + return (mm->free_area_cache = addr);
22317 + }
22318 + /* pull free_area_cache down to the first hole */
22319 + if (mm->free_area_cache == vma->vm_end) {
22320 + mm->free_area_cache = vma->vm_start;
22321 + mm->cached_hole_size = largest_hole;
22322 }
22323
22324 /* remember the largest hole we saw so far */
22325 if (addr + largest_hole < vma->vm_start)
22326 - largest_hole = vma->vm_start - addr;
22327 + largest_hole = vma->vm_start - addr;
22328
22329 /* try just below the current vma->vm_start */
22330 - addr = (vma->vm_start - len) & huge_page_mask(h);
22331 - } while (len <= vma->vm_start);
22332 + addr = skip_heap_stack_gap(vma, len);
22333 + } while (!IS_ERR_VALUE(addr));
22334
22335 fail:
22336 /*
22337 - * if hint left us with no space for the requested
22338 - * mapping then try again:
22339 - */
22340 - if (first_time) {
22341 - mm->free_area_cache = base;
22342 - largest_hole = 0;
22343 - first_time = 0;
22344 - goto try_again;
22345 - }
22346 - /*
22347 * A failed mmap() very likely causes application failure,
22348 * so fall back to the bottom-up function here. This scenario
22349 * can happen with large stack limits and large mmap()
22350 * allocations.
22351 */
22352 - mm->free_area_cache = TASK_UNMAPPED_BASE;
22353 +
22354 +#ifdef CONFIG_PAX_SEGMEXEC
22355 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
22356 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
22357 + else
22358 +#endif
22359 +
22360 + mm->mmap_base = TASK_UNMAPPED_BASE;
22361 +
22362 +#ifdef CONFIG_PAX_RANDMMAP
22363 + if (mm->pax_flags & MF_PAX_RANDMMAP)
22364 + mm->mmap_base += mm->delta_mmap;
22365 +#endif
22366 +
22367 + mm->free_area_cache = mm->mmap_base;
22368 mm->cached_hole_size = ~0UL;
22369 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
22370 len, pgoff, flags);
22371 @@ -386,6 +392,7 @@ fail:
22372 /*
22373 * Restore the topdown base:
22374 */
22375 + mm->mmap_base = base;
22376 mm->free_area_cache = base;
22377 mm->cached_hole_size = ~0UL;
22378
22379 @@ -399,10 +406,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
22380 struct hstate *h = hstate_file(file);
22381 struct mm_struct *mm = current->mm;
22382 struct vm_area_struct *vma;
22383 + unsigned long pax_task_size = TASK_SIZE;
22384
22385 if (len & ~huge_page_mask(h))
22386 return -EINVAL;
22387 - if (len > TASK_SIZE)
22388 +
22389 +#ifdef CONFIG_PAX_SEGMEXEC
22390 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
22391 + pax_task_size = SEGMEXEC_TASK_SIZE;
22392 +#endif
22393 +
22394 + pax_task_size -= PAGE_SIZE;
22395 +
22396 + if (len > pax_task_size)
22397 return -ENOMEM;
22398
22399 if (flags & MAP_FIXED) {
22400 @@ -414,8 +430,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
22401 if (addr) {
22402 addr = ALIGN(addr, huge_page_size(h));
22403 vma = find_vma(mm, addr);
22404 - if (TASK_SIZE - len >= addr &&
22405 - (!vma || addr + len <= vma->vm_start))
22406 + if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
22407 return addr;
22408 }
22409 if (mm->get_unmapped_area == arch_get_unmapped_area)
22410 diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
22411 index 87488b9..399f416 100644
22412 --- a/arch/x86/mm/init.c
22413 +++ b/arch/x86/mm/init.c
22414 @@ -15,6 +15,7 @@
22415 #include <asm/tlbflush.h>
22416 #include <asm/tlb.h>
22417 #include <asm/proto.h>
22418 +#include <asm/desc.h>
22419
22420 unsigned long __initdata pgt_buf_start;
22421 unsigned long __meminitdata pgt_buf_end;
22422 @@ -31,7 +32,7 @@ int direct_gbpages
22423 static void __init find_early_table_space(unsigned long end, int use_pse,
22424 int use_gbpages)
22425 {
22426 - unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
22427 + unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
22428 phys_addr_t base;
22429
22430 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
22431 @@ -312,8 +313,29 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
22432 */
22433 int devmem_is_allowed(unsigned long pagenr)
22434 {
22435 +#ifdef CONFIG_GRKERNSEC_KMEM
22436 + /* allow BDA */
22437 + if (!pagenr)
22438 + return 1;
22439 + /* allow EBDA */
22440 + if ((0x9f000 >> PAGE_SHIFT) == pagenr)
22441 + return 1;
22442 +#else
22443 + if (!pagenr)
22444 + return 1;
22445 +#ifdef CONFIG_VM86
22446 + if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
22447 + return 1;
22448 +#endif
22449 +#endif
22450 +
22451 + if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
22452 + return 1;
22453 +#ifdef CONFIG_GRKERNSEC_KMEM
22454 + /* throw out everything else below 1MB */
22455 if (pagenr <= 256)
22456 - return 1;
22457 + return 0;
22458 +#endif
22459 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
22460 return 0;
22461 if (!page_is_ram(pagenr))
22462 @@ -372,6 +394,86 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
22463
22464 void free_initmem(void)
22465 {
22466 +
22467 +#ifdef CONFIG_PAX_KERNEXEC
22468 +#ifdef CONFIG_X86_32
22469 + /* PaX: limit KERNEL_CS to actual size */
22470 + unsigned long addr, limit;
22471 + struct desc_struct d;
22472 + int cpu;
22473 +
22474 + limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
22475 + limit = (limit - 1UL) >> PAGE_SHIFT;
22476 +
22477 + memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
22478 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
22479 + pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
22480 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
22481 + }
22482 +
22483 + /* PaX: make KERNEL_CS read-only */
22484 + addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
22485 + if (!paravirt_enabled())
22486 + set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
22487 +/*
22488 + for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
22489 + pgd = pgd_offset_k(addr);
22490 + pud = pud_offset(pgd, addr);
22491 + pmd = pmd_offset(pud, addr);
22492 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22493 + }
22494 +*/
22495 +#ifdef CONFIG_X86_PAE
22496 + set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
22497 +/*
22498 + for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
22499 + pgd = pgd_offset_k(addr);
22500 + pud = pud_offset(pgd, addr);
22501 + pmd = pmd_offset(pud, addr);
22502 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
22503 + }
22504 +*/
22505 +#endif
22506 +
22507 +#ifdef CONFIG_MODULES
22508 + set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
22509 +#endif
22510 +
22511 +#else
22512 + pgd_t *pgd;
22513 + pud_t *pud;
22514 + pmd_t *pmd;
22515 + unsigned long addr, end;
22516 +
22517 + /* PaX: make kernel code/rodata read-only, rest non-executable */
22518 + for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
22519 + pgd = pgd_offset_k(addr);
22520 + pud = pud_offset(pgd, addr);
22521 + pmd = pmd_offset(pud, addr);
22522 + if (!pmd_present(*pmd))
22523 + continue;
22524 + if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
22525 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22526 + else
22527 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
22528 + }
22529 +
22530 + addr = (unsigned long)__va(__pa(__START_KERNEL_map));
22531 + end = addr + KERNEL_IMAGE_SIZE;
22532 + for (; addr < end; addr += PMD_SIZE) {
22533 + pgd = pgd_offset_k(addr);
22534 + pud = pud_offset(pgd, addr);
22535 + pmd = pmd_offset(pud, addr);
22536 + if (!pmd_present(*pmd))
22537 + continue;
22538 + if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
22539 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22540 + }
22541 +#endif
22542 +
22543 + flush_tlb_all();
22544 +#endif
22545 +
22546 free_init_pages("unused kernel memory",
22547 (unsigned long)(&__init_begin),
22548 (unsigned long)(&__init_end));
22549 diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
22550 index 29f7c6d..b46b35b 100644
22551 --- a/arch/x86/mm/init_32.c
22552 +++ b/arch/x86/mm/init_32.c
22553 @@ -74,36 +74,6 @@ static __init void *alloc_low_page(void)
22554 }
22555
22556 /*
22557 - * Creates a middle page table and puts a pointer to it in the
22558 - * given global directory entry. This only returns the gd entry
22559 - * in non-PAE compilation mode, since the middle layer is folded.
22560 - */
22561 -static pmd_t * __init one_md_table_init(pgd_t *pgd)
22562 -{
22563 - pud_t *pud;
22564 - pmd_t *pmd_table;
22565 -
22566 -#ifdef CONFIG_X86_PAE
22567 - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
22568 - if (after_bootmem)
22569 - pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
22570 - else
22571 - pmd_table = (pmd_t *)alloc_low_page();
22572 - paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
22573 - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
22574 - pud = pud_offset(pgd, 0);
22575 - BUG_ON(pmd_table != pmd_offset(pud, 0));
22576 -
22577 - return pmd_table;
22578 - }
22579 -#endif
22580 - pud = pud_offset(pgd, 0);
22581 - pmd_table = pmd_offset(pud, 0);
22582 -
22583 - return pmd_table;
22584 -}
22585 -
22586 -/*
22587 * Create a page table and place a pointer to it in a middle page
22588 * directory entry:
22589 */
22590 @@ -123,13 +93,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
22591 page_table = (pte_t *)alloc_low_page();
22592
22593 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
22594 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
22595 + set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
22596 +#else
22597 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
22598 +#endif
22599 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
22600 }
22601
22602 return pte_offset_kernel(pmd, 0);
22603 }
22604
22605 +static pmd_t * __init one_md_table_init(pgd_t *pgd)
22606 +{
22607 + pud_t *pud;
22608 + pmd_t *pmd_table;
22609 +
22610 + pud = pud_offset(pgd, 0);
22611 + pmd_table = pmd_offset(pud, 0);
22612 +
22613 + return pmd_table;
22614 +}
22615 +
22616 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
22617 {
22618 int pgd_idx = pgd_index(vaddr);
22619 @@ -203,6 +188,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
22620 int pgd_idx, pmd_idx;
22621 unsigned long vaddr;
22622 pgd_t *pgd;
22623 + pud_t *pud;
22624 pmd_t *pmd;
22625 pte_t *pte = NULL;
22626
22627 @@ -212,8 +198,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
22628 pgd = pgd_base + pgd_idx;
22629
22630 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
22631 - pmd = one_md_table_init(pgd);
22632 - pmd = pmd + pmd_index(vaddr);
22633 + pud = pud_offset(pgd, vaddr);
22634 + pmd = pmd_offset(pud, vaddr);
22635 +
22636 +#ifdef CONFIG_X86_PAE
22637 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
22638 +#endif
22639 +
22640 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
22641 pmd++, pmd_idx++) {
22642 pte = page_table_kmap_check(one_page_table_init(pmd),
22643 @@ -225,11 +216,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
22644 }
22645 }
22646
22647 -static inline int is_kernel_text(unsigned long addr)
22648 +static inline int is_kernel_text(unsigned long start, unsigned long end)
22649 {
22650 - if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
22651 - return 1;
22652 - return 0;
22653 + if ((start > ktla_ktva((unsigned long)_etext) ||
22654 + end <= ktla_ktva((unsigned long)_stext)) &&
22655 + (start > ktla_ktva((unsigned long)_einittext) ||
22656 + end <= ktla_ktva((unsigned long)_sinittext)) &&
22657 +
22658 +#ifdef CONFIG_ACPI_SLEEP
22659 + (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
22660 +#endif
22661 +
22662 + (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
22663 + return 0;
22664 + return 1;
22665 }
22666
22667 /*
22668 @@ -246,9 +246,10 @@ kernel_physical_mapping_init(unsigned long start,
22669 unsigned long last_map_addr = end;
22670 unsigned long start_pfn, end_pfn;
22671 pgd_t *pgd_base = swapper_pg_dir;
22672 - int pgd_idx, pmd_idx, pte_ofs;
22673 + unsigned int pgd_idx, pmd_idx, pte_ofs;
22674 unsigned long pfn;
22675 pgd_t *pgd;
22676 + pud_t *pud;
22677 pmd_t *pmd;
22678 pte_t *pte;
22679 unsigned pages_2m, pages_4k;
22680 @@ -281,8 +282,13 @@ repeat:
22681 pfn = start_pfn;
22682 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
22683 pgd = pgd_base + pgd_idx;
22684 - for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
22685 - pmd = one_md_table_init(pgd);
22686 + for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
22687 + pud = pud_offset(pgd, 0);
22688 + pmd = pmd_offset(pud, 0);
22689 +
22690 +#ifdef CONFIG_X86_PAE
22691 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
22692 +#endif
22693
22694 if (pfn >= end_pfn)
22695 continue;
22696 @@ -294,14 +300,13 @@ repeat:
22697 #endif
22698 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
22699 pmd++, pmd_idx++) {
22700 - unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
22701 + unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
22702
22703 /*
22704 * Map with big pages if possible, otherwise
22705 * create normal page tables:
22706 */
22707 if (use_pse) {
22708 - unsigned int addr2;
22709 pgprot_t prot = PAGE_KERNEL_LARGE;
22710 /*
22711 * first pass will use the same initial
22712 @@ -311,11 +316,7 @@ repeat:
22713 __pgprot(PTE_IDENT_ATTR |
22714 _PAGE_PSE);
22715
22716 - addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
22717 - PAGE_OFFSET + PAGE_SIZE-1;
22718 -
22719 - if (is_kernel_text(addr) ||
22720 - is_kernel_text(addr2))
22721 + if (is_kernel_text(address, address + PMD_SIZE))
22722 prot = PAGE_KERNEL_LARGE_EXEC;
22723
22724 pages_2m++;
22725 @@ -332,7 +333,7 @@ repeat:
22726 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
22727 pte += pte_ofs;
22728 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
22729 - pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
22730 + pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
22731 pgprot_t prot = PAGE_KERNEL;
22732 /*
22733 * first pass will use the same initial
22734 @@ -340,7 +341,7 @@ repeat:
22735 */
22736 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
22737
22738 - if (is_kernel_text(addr))
22739 + if (is_kernel_text(address, address + PAGE_SIZE))
22740 prot = PAGE_KERNEL_EXEC;
22741
22742 pages_4k++;
22743 @@ -472,7 +473,7 @@ void __init native_pagetable_setup_start(pgd_t *base)
22744
22745 pud = pud_offset(pgd, va);
22746 pmd = pmd_offset(pud, va);
22747 - if (!pmd_present(*pmd))
22748 + if (!pmd_present(*pmd) || pmd_huge(*pmd))
22749 break;
22750
22751 pte = pte_offset_kernel(pmd, va);
22752 @@ -524,12 +525,10 @@ void __init early_ioremap_page_table_range_init(void)
22753
22754 static void __init pagetable_init(void)
22755 {
22756 - pgd_t *pgd_base = swapper_pg_dir;
22757 -
22758 - permanent_kmaps_init(pgd_base);
22759 + permanent_kmaps_init(swapper_pg_dir);
22760 }
22761
22762 -pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
22763 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
22764 EXPORT_SYMBOL_GPL(__supported_pte_mask);
22765
22766 /* user-defined highmem size */
22767 @@ -757,6 +756,12 @@ void __init mem_init(void)
22768
22769 pci_iommu_alloc();
22770
22771 +#ifdef CONFIG_PAX_PER_CPU_PGD
22772 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
22773 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
22774 + KERNEL_PGD_PTRS);
22775 +#endif
22776 +
22777 #ifdef CONFIG_FLATMEM
22778 BUG_ON(!mem_map);
22779 #endif
22780 @@ -774,7 +779,7 @@ void __init mem_init(void)
22781 set_highmem_pages_init();
22782
22783 codesize = (unsigned long) &_etext - (unsigned long) &_text;
22784 - datasize = (unsigned long) &_edata - (unsigned long) &_etext;
22785 + datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
22786 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
22787
22788 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
22789 @@ -815,10 +820,10 @@ void __init mem_init(void)
22790 ((unsigned long)&__init_end -
22791 (unsigned long)&__init_begin) >> 10,
22792
22793 - (unsigned long)&_etext, (unsigned long)&_edata,
22794 - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
22795 + (unsigned long)&_sdata, (unsigned long)&_edata,
22796 + ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
22797
22798 - (unsigned long)&_text, (unsigned long)&_etext,
22799 + ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
22800 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
22801
22802 /*
22803 @@ -896,6 +901,7 @@ void set_kernel_text_rw(void)
22804 if (!kernel_set_to_readonly)
22805 return;
22806
22807 + start = ktla_ktva(start);
22808 pr_debug("Set kernel text: %lx - %lx for read write\n",
22809 start, start+size);
22810
22811 @@ -910,6 +916,7 @@ void set_kernel_text_ro(void)
22812 if (!kernel_set_to_readonly)
22813 return;
22814
22815 + start = ktla_ktva(start);
22816 pr_debug("Set kernel text: %lx - %lx for read only\n",
22817 start, start+size);
22818
22819 @@ -938,6 +945,7 @@ void mark_rodata_ro(void)
22820 unsigned long start = PFN_ALIGN(_text);
22821 unsigned long size = PFN_ALIGN(_etext) - start;
22822
22823 + start = ktla_ktva(start);
22824 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
22825 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
22826 size >> 10);
22827 diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
22828 index bbaaa00..796fa65 100644
22829 --- a/arch/x86/mm/init_64.c
22830 +++ b/arch/x86/mm/init_64.c
22831 @@ -75,7 +75,7 @@ early_param("gbpages", parse_direct_gbpages_on);
22832 * around without checking the pgd every time.
22833 */
22834
22835 -pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
22836 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
22837 EXPORT_SYMBOL_GPL(__supported_pte_mask);
22838
22839 int force_personality32;
22840 @@ -108,12 +108,22 @@ void sync_global_pgds(unsigned long start, unsigned long end)
22841
22842 for (address = start; address <= end; address += PGDIR_SIZE) {
22843 const pgd_t *pgd_ref = pgd_offset_k(address);
22844 +
22845 +#ifdef CONFIG_PAX_PER_CPU_PGD
22846 + unsigned long cpu;
22847 +#else
22848 struct page *page;
22849 +#endif
22850
22851 if (pgd_none(*pgd_ref))
22852 continue;
22853
22854 spin_lock(&pgd_lock);
22855 +
22856 +#ifdef CONFIG_PAX_PER_CPU_PGD
22857 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
22858 + pgd_t *pgd = pgd_offset_cpu(cpu, address);
22859 +#else
22860 list_for_each_entry(page, &pgd_list, lru) {
22861 pgd_t *pgd;
22862 spinlock_t *pgt_lock;
22863 @@ -122,6 +132,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
22864 /* the pgt_lock only for Xen */
22865 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
22866 spin_lock(pgt_lock);
22867 +#endif
22868
22869 if (pgd_none(*pgd))
22870 set_pgd(pgd, *pgd_ref);
22871 @@ -129,7 +140,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
22872 BUG_ON(pgd_page_vaddr(*pgd)
22873 != pgd_page_vaddr(*pgd_ref));
22874
22875 +#ifndef CONFIG_PAX_PER_CPU_PGD
22876 spin_unlock(pgt_lock);
22877 +#endif
22878 +
22879 }
22880 spin_unlock(&pgd_lock);
22881 }
22882 @@ -203,7 +217,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
22883 pmd = fill_pmd(pud, vaddr);
22884 pte = fill_pte(pmd, vaddr);
22885
22886 + pax_open_kernel();
22887 set_pte(pte, new_pte);
22888 + pax_close_kernel();
22889
22890 /*
22891 * It's enough to flush this one mapping.
22892 @@ -262,14 +278,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
22893 pgd = pgd_offset_k((unsigned long)__va(phys));
22894 if (pgd_none(*pgd)) {
22895 pud = (pud_t *) spp_getpage();
22896 - set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
22897 - _PAGE_USER));
22898 + set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
22899 }
22900 pud = pud_offset(pgd, (unsigned long)__va(phys));
22901 if (pud_none(*pud)) {
22902 pmd = (pmd_t *) spp_getpage();
22903 - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
22904 - _PAGE_USER));
22905 + set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
22906 }
22907 pmd = pmd_offset(pud, phys);
22908 BUG_ON(!pmd_none(*pmd));
22909 @@ -330,7 +344,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
22910 if (pfn >= pgt_buf_top)
22911 panic("alloc_low_page: ran out of memory");
22912
22913 - adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
22914 + adr = (void __force_kernel *)early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
22915 clear_page(adr);
22916 *phys = pfn * PAGE_SIZE;
22917 return adr;
22918 @@ -346,7 +360,7 @@ static __ref void *map_low_page(void *virt)
22919
22920 phys = __pa(virt);
22921 left = phys & (PAGE_SIZE - 1);
22922 - adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
22923 + adr = (void __force_kernel *)early_memremap(phys & PAGE_MASK, PAGE_SIZE);
22924 adr = (void *)(((unsigned long)adr) | left);
22925
22926 return adr;
22927 @@ -693,6 +707,12 @@ void __init mem_init(void)
22928
22929 pci_iommu_alloc();
22930
22931 +#ifdef CONFIG_PAX_PER_CPU_PGD
22932 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
22933 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
22934 + KERNEL_PGD_PTRS);
22935 +#endif
22936 +
22937 /* clear_bss() already clear the empty_zero_page */
22938
22939 reservedpages = 0;
22940 @@ -853,8 +873,8 @@ int kern_addr_valid(unsigned long addr)
22941 static struct vm_area_struct gate_vma = {
22942 .vm_start = VSYSCALL_START,
22943 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
22944 - .vm_page_prot = PAGE_READONLY_EXEC,
22945 - .vm_flags = VM_READ | VM_EXEC
22946 + .vm_page_prot = PAGE_READONLY,
22947 + .vm_flags = VM_READ
22948 };
22949
22950 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
22951 @@ -888,7 +908,7 @@ int in_gate_area_no_mm(unsigned long addr)
22952
22953 const char *arch_vma_name(struct vm_area_struct *vma)
22954 {
22955 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
22956 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
22957 return "[vdso]";
22958 if (vma == &gate_vma)
22959 return "[vsyscall]";
22960 diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
22961 index 7b179b4..6bd1777 100644
22962 --- a/arch/x86/mm/iomap_32.c
22963 +++ b/arch/x86/mm/iomap_32.c
22964 @@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
22965 type = kmap_atomic_idx_push();
22966 idx = type + KM_TYPE_NR * smp_processor_id();
22967 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
22968 +
22969 + pax_open_kernel();
22970 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
22971 + pax_close_kernel();
22972 +
22973 arch_flush_lazy_mmu_mode();
22974
22975 return (void *)vaddr;
22976 diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
22977 index be1ef57..55f0160 100644
22978 --- a/arch/x86/mm/ioremap.c
22979 +++ b/arch/x86/mm/ioremap.c
22980 @@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
22981 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
22982 int is_ram = page_is_ram(pfn);
22983
22984 - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
22985 + if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
22986 return NULL;
22987 WARN_ON_ONCE(is_ram);
22988 }
22989 @@ -315,6 +315,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
22990
22991 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
22992 if (page_is_ram(start >> PAGE_SHIFT))
22993 +#ifdef CONFIG_HIGHMEM
22994 + if ((start >> PAGE_SHIFT) < max_low_pfn)
22995 +#endif
22996 return __va(phys);
22997
22998 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
22999 @@ -344,7 +347,7 @@ static int __init early_ioremap_debug_setup(char *str)
23000 early_param("early_ioremap_debug", early_ioremap_debug_setup);
23001
23002 static __initdata int after_paging_init;
23003 -static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
23004 +static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
23005
23006 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
23007 {
23008 @@ -381,8 +384,7 @@ void __init early_ioremap_init(void)
23009 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
23010
23011 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
23012 - memset(bm_pte, 0, sizeof(bm_pte));
23013 - pmd_populate_kernel(&init_mm, pmd, bm_pte);
23014 + pmd_populate_user(&init_mm, pmd, bm_pte);
23015
23016 /*
23017 * The boot-ioremap range spans multiple pmds, for which
23018 diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
23019 index d87dd6d..bf3fa66 100644
23020 --- a/arch/x86/mm/kmemcheck/kmemcheck.c
23021 +++ b/arch/x86/mm/kmemcheck/kmemcheck.c
23022 @@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
23023 * memory (e.g. tracked pages)? For now, we need this to avoid
23024 * invoking kmemcheck for PnP BIOS calls.
23025 */
23026 - if (regs->flags & X86_VM_MASK)
23027 + if (v8086_mode(regs))
23028 return false;
23029 - if (regs->cs != __KERNEL_CS)
23030 + if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
23031 return false;
23032
23033 pte = kmemcheck_pte_lookup(address);
23034 diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
23035 index 845df68..1d8d29f 100644
23036 --- a/arch/x86/mm/mmap.c
23037 +++ b/arch/x86/mm/mmap.c
23038 @@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
23039 * Leave an at least ~128 MB hole with possible stack randomization.
23040 */
23041 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
23042 -#define MAX_GAP (TASK_SIZE/6*5)
23043 +#define MAX_GAP (pax_task_size/6*5)
23044
23045 static int mmap_is_legacy(void)
23046 {
23047 @@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
23048 return rnd << PAGE_SHIFT;
23049 }
23050
23051 -static unsigned long mmap_base(void)
23052 +static unsigned long mmap_base(struct mm_struct *mm)
23053 {
23054 unsigned long gap = rlimit(RLIMIT_STACK);
23055 + unsigned long pax_task_size = TASK_SIZE;
23056 +
23057 +#ifdef CONFIG_PAX_SEGMEXEC
23058 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
23059 + pax_task_size = SEGMEXEC_TASK_SIZE;
23060 +#endif
23061
23062 if (gap < MIN_GAP)
23063 gap = MIN_GAP;
23064 else if (gap > MAX_GAP)
23065 gap = MAX_GAP;
23066
23067 - return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
23068 + return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
23069 }
23070
23071 /*
23072 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
23073 * does, but not when emulating X86_32
23074 */
23075 -static unsigned long mmap_legacy_base(void)
23076 +static unsigned long mmap_legacy_base(struct mm_struct *mm)
23077 {
23078 - if (mmap_is_ia32())
23079 + if (mmap_is_ia32()) {
23080 +
23081 +#ifdef CONFIG_PAX_SEGMEXEC
23082 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
23083 + return SEGMEXEC_TASK_UNMAPPED_BASE;
23084 + else
23085 +#endif
23086 +
23087 return TASK_UNMAPPED_BASE;
23088 - else
23089 + } else
23090 return TASK_UNMAPPED_BASE + mmap_rnd();
23091 }
23092
23093 @@ -113,11 +126,23 @@ static unsigned long mmap_legacy_base(void)
23094 void arch_pick_mmap_layout(struct mm_struct *mm)
23095 {
23096 if (mmap_is_legacy()) {
23097 - mm->mmap_base = mmap_legacy_base();
23098 + mm->mmap_base = mmap_legacy_base(mm);
23099 +
23100 +#ifdef CONFIG_PAX_RANDMMAP
23101 + if (mm->pax_flags & MF_PAX_RANDMMAP)
23102 + mm->mmap_base += mm->delta_mmap;
23103 +#endif
23104 +
23105 mm->get_unmapped_area = arch_get_unmapped_area;
23106 mm->unmap_area = arch_unmap_area;
23107 } else {
23108 - mm->mmap_base = mmap_base();
23109 + mm->mmap_base = mmap_base(mm);
23110 +
23111 +#ifdef CONFIG_PAX_RANDMMAP
23112 + if (mm->pax_flags & MF_PAX_RANDMMAP)
23113 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
23114 +#endif
23115 +
23116 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
23117 mm->unmap_area = arch_unmap_area_topdown;
23118 }
23119 diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
23120 index de54b9b..799051e 100644
23121 --- a/arch/x86/mm/mmio-mod.c
23122 +++ b/arch/x86/mm/mmio-mod.c
23123 @@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
23124 break;
23125 default:
23126 {
23127 - unsigned char *ip = (unsigned char *)instptr;
23128 + unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
23129 my_trace->opcode = MMIO_UNKNOWN_OP;
23130 my_trace->width = 0;
23131 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
23132 @@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
23133 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
23134 void __iomem *addr)
23135 {
23136 - static atomic_t next_id;
23137 + static atomic_unchecked_t next_id;
23138 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
23139 /* These are page-unaligned. */
23140 struct mmiotrace_map map = {
23141 @@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
23142 .private = trace
23143 },
23144 .phys = offset,
23145 - .id = atomic_inc_return(&next_id)
23146 + .id = atomic_inc_return_unchecked(&next_id)
23147 };
23148 map.map_id = trace->id;
23149
23150 diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
23151 index b008656..773eac2 100644
23152 --- a/arch/x86/mm/pageattr-test.c
23153 +++ b/arch/x86/mm/pageattr-test.c
23154 @@ -36,7 +36,7 @@ enum {
23155
23156 static int pte_testbit(pte_t pte)
23157 {
23158 - return pte_flags(pte) & _PAGE_UNUSED1;
23159 + return pte_flags(pte) & _PAGE_CPA_TEST;
23160 }
23161
23162 struct split_state {
23163 diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
23164 index f9e5267..77b1a40 100644
23165 --- a/arch/x86/mm/pageattr.c
23166 +++ b/arch/x86/mm/pageattr.c
23167 @@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
23168 */
23169 #ifdef CONFIG_PCI_BIOS
23170 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
23171 - pgprot_val(forbidden) |= _PAGE_NX;
23172 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
23173 #endif
23174
23175 /*
23176 @@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
23177 * Does not cover __inittext since that is gone later on. On
23178 * 64bit we do not enforce !NX on the low mapping
23179 */
23180 - if (within(address, (unsigned long)_text, (unsigned long)_etext))
23181 - pgprot_val(forbidden) |= _PAGE_NX;
23182 + if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
23183 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
23184
23185 +#ifdef CONFIG_DEBUG_RODATA
23186 /*
23187 * The .rodata section needs to be read-only. Using the pfn
23188 * catches all aliases.
23189 @@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
23190 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
23191 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
23192 pgprot_val(forbidden) |= _PAGE_RW;
23193 +#endif
23194
23195 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
23196 /*
23197 @@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
23198 }
23199 #endif
23200
23201 +#ifdef CONFIG_PAX_KERNEXEC
23202 + if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
23203 + pgprot_val(forbidden) |= _PAGE_RW;
23204 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
23205 + }
23206 +#endif
23207 +
23208 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
23209
23210 return prot;
23211 @@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
23212 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
23213 {
23214 /* change init_mm */
23215 + pax_open_kernel();
23216 set_pte_atomic(kpte, pte);
23217 +
23218 #ifdef CONFIG_X86_32
23219 if (!SHARED_KERNEL_PMD) {
23220 +
23221 +#ifdef CONFIG_PAX_PER_CPU_PGD
23222 + unsigned long cpu;
23223 +#else
23224 struct page *page;
23225 +#endif
23226
23227 +#ifdef CONFIG_PAX_PER_CPU_PGD
23228 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
23229 + pgd_t *pgd = get_cpu_pgd(cpu);
23230 +#else
23231 list_for_each_entry(page, &pgd_list, lru) {
23232 - pgd_t *pgd;
23233 + pgd_t *pgd = (pgd_t *)page_address(page);
23234 +#endif
23235 +
23236 pud_t *pud;
23237 pmd_t *pmd;
23238
23239 - pgd = (pgd_t *)page_address(page) + pgd_index(address);
23240 + pgd += pgd_index(address);
23241 pud = pud_offset(pgd, address);
23242 pmd = pmd_offset(pud, address);
23243 set_pte_atomic((pte_t *)pmd, pte);
23244 }
23245 }
23246 #endif
23247 + pax_close_kernel();
23248 }
23249
23250 static int
23251 diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
23252 index f6ff57b..481690f 100644
23253 --- a/arch/x86/mm/pat.c
23254 +++ b/arch/x86/mm/pat.c
23255 @@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
23256
23257 if (!entry) {
23258 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
23259 - current->comm, current->pid, start, end);
23260 + current->comm, task_pid_nr(current), start, end);
23261 return -EINVAL;
23262 }
23263
23264 @@ -492,8 +492,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
23265 while (cursor < to) {
23266 if (!devmem_is_allowed(pfn)) {
23267 printk(KERN_INFO
23268 - "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
23269 - current->comm, from, to);
23270 + "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
23271 + current->comm, from, to, cursor);
23272 return 0;
23273 }
23274 cursor += PAGE_SIZE;
23275 @@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
23276 printk(KERN_INFO
23277 "%s:%d ioremap_change_attr failed %s "
23278 "for %Lx-%Lx\n",
23279 - current->comm, current->pid,
23280 + current->comm, task_pid_nr(current),
23281 cattr_name(flags),
23282 base, (unsigned long long)(base + size));
23283 return -EINVAL;
23284 @@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
23285 if (want_flags != flags) {
23286 printk(KERN_WARNING
23287 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
23288 - current->comm, current->pid,
23289 + current->comm, task_pid_nr(current),
23290 cattr_name(want_flags),
23291 (unsigned long long)paddr,
23292 (unsigned long long)(paddr + size),
23293 @@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
23294 free_memtype(paddr, paddr + size);
23295 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
23296 " for %Lx-%Lx, got %s\n",
23297 - current->comm, current->pid,
23298 + current->comm, task_pid_nr(current),
23299 cattr_name(want_flags),
23300 (unsigned long long)paddr,
23301 (unsigned long long)(paddr + size),
23302 diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
23303 index 9f0614d..92ae64a 100644
23304 --- a/arch/x86/mm/pf_in.c
23305 +++ b/arch/x86/mm/pf_in.c
23306 @@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
23307 int i;
23308 enum reason_type rv = OTHERS;
23309
23310 - p = (unsigned char *)ins_addr;
23311 + p = (unsigned char *)ktla_ktva(ins_addr);
23312 p += skip_prefix(p, &prf);
23313 p += get_opcode(p, &opcode);
23314
23315 @@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
23316 struct prefix_bits prf;
23317 int i;
23318
23319 - p = (unsigned char *)ins_addr;
23320 + p = (unsigned char *)ktla_ktva(ins_addr);
23321 p += skip_prefix(p, &prf);
23322 p += get_opcode(p, &opcode);
23323
23324 @@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
23325 struct prefix_bits prf;
23326 int i;
23327
23328 - p = (unsigned char *)ins_addr;
23329 + p = (unsigned char *)ktla_ktva(ins_addr);
23330 p += skip_prefix(p, &prf);
23331 p += get_opcode(p, &opcode);
23332
23333 @@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
23334 struct prefix_bits prf;
23335 int i;
23336
23337 - p = (unsigned char *)ins_addr;
23338 + p = (unsigned char *)ktla_ktva(ins_addr);
23339 p += skip_prefix(p, &prf);
23340 p += get_opcode(p, &opcode);
23341 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
23342 @@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
23343 struct prefix_bits prf;
23344 int i;
23345
23346 - p = (unsigned char *)ins_addr;
23347 + p = (unsigned char *)ktla_ktva(ins_addr);
23348 p += skip_prefix(p, &prf);
23349 p += get_opcode(p, &opcode);
23350 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
23351 diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
23352 index 8573b83..c3b1a30 100644
23353 --- a/arch/x86/mm/pgtable.c
23354 +++ b/arch/x86/mm/pgtable.c
23355 @@ -84,10 +84,52 @@ static inline void pgd_list_del(pgd_t *pgd)
23356 list_del(&page->lru);
23357 }
23358
23359 -#define UNSHARED_PTRS_PER_PGD \
23360 - (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
23361 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23362 +pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
23363
23364 +void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
23365 +{
23366 + while (count--)
23367 + *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
23368 +}
23369 +#endif
23370
23371 +#ifdef CONFIG_PAX_PER_CPU_PGD
23372 +void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
23373 +{
23374 + while (count--)
23375 +
23376 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23377 + *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
23378 +#else
23379 + *dst++ = *src++;
23380 +#endif
23381 +
23382 +}
23383 +#endif
23384 +
23385 +#ifdef CONFIG_X86_64
23386 +#define pxd_t pud_t
23387 +#define pyd_t pgd_t
23388 +#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
23389 +#define pxd_free(mm, pud) pud_free((mm), (pud))
23390 +#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
23391 +#define pyd_offset(mm, address) pgd_offset((mm), (address))
23392 +#define PYD_SIZE PGDIR_SIZE
23393 +#else
23394 +#define pxd_t pmd_t
23395 +#define pyd_t pud_t
23396 +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
23397 +#define pxd_free(mm, pud) pmd_free((mm), (pud))
23398 +#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
23399 +#define pyd_offset(mm, address) pud_offset((mm), (address))
23400 +#define PYD_SIZE PUD_SIZE
23401 +#endif
23402 +
23403 +#ifdef CONFIG_PAX_PER_CPU_PGD
23404 +static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
23405 +static inline void pgd_dtor(pgd_t *pgd) {}
23406 +#else
23407 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
23408 {
23409 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
23410 @@ -128,6 +170,7 @@ static void pgd_dtor(pgd_t *pgd)
23411 pgd_list_del(pgd);
23412 spin_unlock(&pgd_lock);
23413 }
23414 +#endif
23415
23416 /*
23417 * List of all pgd's needed for non-PAE so it can invalidate entries
23418 @@ -140,7 +183,7 @@ static void pgd_dtor(pgd_t *pgd)
23419 * -- wli
23420 */
23421
23422 -#ifdef CONFIG_X86_PAE
23423 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
23424 /*
23425 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
23426 * updating the top-level pagetable entries to guarantee the
23427 @@ -152,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
23428 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
23429 * and initialize the kernel pmds here.
23430 */
23431 -#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
23432 +#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
23433
23434 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
23435 {
23436 @@ -170,36 +213,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
23437 */
23438 flush_tlb_mm(mm);
23439 }
23440 +#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
23441 +#define PREALLOCATED_PXDS USER_PGD_PTRS
23442 #else /* !CONFIG_X86_PAE */
23443
23444 /* No need to prepopulate any pagetable entries in non-PAE modes. */
23445 -#define PREALLOCATED_PMDS 0
23446 +#define PREALLOCATED_PXDS 0
23447
23448 #endif /* CONFIG_X86_PAE */
23449
23450 -static void free_pmds(pmd_t *pmds[])
23451 +static void free_pxds(pxd_t *pxds[])
23452 {
23453 int i;
23454
23455 - for(i = 0; i < PREALLOCATED_PMDS; i++)
23456 - if (pmds[i])
23457 - free_page((unsigned long)pmds[i]);
23458 + for(i = 0; i < PREALLOCATED_PXDS; i++)
23459 + if (pxds[i])
23460 + free_page((unsigned long)pxds[i]);
23461 }
23462
23463 -static int preallocate_pmds(pmd_t *pmds[])
23464 +static int preallocate_pxds(pxd_t *pxds[])
23465 {
23466 int i;
23467 bool failed = false;
23468
23469 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
23470 - pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
23471 - if (pmd == NULL)
23472 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
23473 + pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
23474 + if (pxd == NULL)
23475 failed = true;
23476 - pmds[i] = pmd;
23477 + pxds[i] = pxd;
23478 }
23479
23480 if (failed) {
23481 - free_pmds(pmds);
23482 + free_pxds(pxds);
23483 return -ENOMEM;
23484 }
23485
23486 @@ -212,51 +257,55 @@ static int preallocate_pmds(pmd_t *pmds[])
23487 * preallocate which never got a corresponding vma will need to be
23488 * freed manually.
23489 */
23490 -static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
23491 +static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
23492 {
23493 int i;
23494
23495 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
23496 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
23497 pgd_t pgd = pgdp[i];
23498
23499 if (pgd_val(pgd) != 0) {
23500 - pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
23501 + pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
23502
23503 - pgdp[i] = native_make_pgd(0);
23504 + set_pgd(pgdp + i, native_make_pgd(0));
23505
23506 - paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
23507 - pmd_free(mm, pmd);
23508 + paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
23509 + pxd_free(mm, pxd);
23510 }
23511 }
23512 }
23513
23514 -static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
23515 +static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
23516 {
23517 - pud_t *pud;
23518 + pyd_t *pyd;
23519 unsigned long addr;
23520 int i;
23521
23522 - if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
23523 + if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
23524 return;
23525
23526 - pud = pud_offset(pgd, 0);
23527 +#ifdef CONFIG_X86_64
23528 + pyd = pyd_offset(mm, 0L);
23529 +#else
23530 + pyd = pyd_offset(pgd, 0L);
23531 +#endif
23532
23533 - for (addr = i = 0; i < PREALLOCATED_PMDS;
23534 - i++, pud++, addr += PUD_SIZE) {
23535 - pmd_t *pmd = pmds[i];
23536 + for (addr = i = 0; i < PREALLOCATED_PXDS;
23537 + i++, pyd++, addr += PYD_SIZE) {
23538 + pxd_t *pxd = pxds[i];
23539
23540 if (i >= KERNEL_PGD_BOUNDARY)
23541 - memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
23542 - sizeof(pmd_t) * PTRS_PER_PMD);
23543 + memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
23544 + sizeof(pxd_t) * PTRS_PER_PMD);
23545
23546 - pud_populate(mm, pud, pmd);
23547 + pyd_populate(mm, pyd, pxd);
23548 }
23549 }
23550
23551 pgd_t *pgd_alloc(struct mm_struct *mm)
23552 {
23553 pgd_t *pgd;
23554 - pmd_t *pmds[PREALLOCATED_PMDS];
23555 + pxd_t *pxds[PREALLOCATED_PXDS];
23556
23557 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
23558
23559 @@ -265,11 +314,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
23560
23561 mm->pgd = pgd;
23562
23563 - if (preallocate_pmds(pmds) != 0)
23564 + if (preallocate_pxds(pxds) != 0)
23565 goto out_free_pgd;
23566
23567 if (paravirt_pgd_alloc(mm) != 0)
23568 - goto out_free_pmds;
23569 + goto out_free_pxds;
23570
23571 /*
23572 * Make sure that pre-populating the pmds is atomic with
23573 @@ -279,14 +328,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
23574 spin_lock(&pgd_lock);
23575
23576 pgd_ctor(mm, pgd);
23577 - pgd_prepopulate_pmd(mm, pgd, pmds);
23578 + pgd_prepopulate_pxd(mm, pgd, pxds);
23579
23580 spin_unlock(&pgd_lock);
23581
23582 return pgd;
23583
23584 -out_free_pmds:
23585 - free_pmds(pmds);
23586 +out_free_pxds:
23587 + free_pxds(pxds);
23588 out_free_pgd:
23589 free_page((unsigned long)pgd);
23590 out:
23591 @@ -295,7 +344,7 @@ out:
23592
23593 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
23594 {
23595 - pgd_mop_up_pmds(mm, pgd);
23596 + pgd_mop_up_pxds(mm, pgd);
23597 pgd_dtor(pgd);
23598 paravirt_pgd_free(mm, pgd);
23599 free_page((unsigned long)pgd);
23600 diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
23601 index cac7184..09a39fa 100644
23602 --- a/arch/x86/mm/pgtable_32.c
23603 +++ b/arch/x86/mm/pgtable_32.c
23604 @@ -48,10 +48,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
23605 return;
23606 }
23607 pte = pte_offset_kernel(pmd, vaddr);
23608 +
23609 + pax_open_kernel();
23610 if (pte_val(pteval))
23611 set_pte_at(&init_mm, vaddr, pte, pteval);
23612 else
23613 pte_clear(&init_mm, vaddr, pte);
23614 + pax_close_kernel();
23615
23616 /*
23617 * It's enough to flush this one mapping.
23618 diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
23619 index 410531d..0f16030 100644
23620 --- a/arch/x86/mm/setup_nx.c
23621 +++ b/arch/x86/mm/setup_nx.c
23622 @@ -5,8 +5,10 @@
23623 #include <asm/pgtable.h>
23624 #include <asm/proto.h>
23625
23626 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
23627 static int disable_nx __cpuinitdata;
23628
23629 +#ifndef CONFIG_PAX_PAGEEXEC
23630 /*
23631 * noexec = on|off
23632 *
23633 @@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
23634 return 0;
23635 }
23636 early_param("noexec", noexec_setup);
23637 +#endif
23638 +
23639 +#endif
23640
23641 void __cpuinit x86_configure_nx(void)
23642 {
23643 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
23644 if (cpu_has_nx && !disable_nx)
23645 __supported_pte_mask |= _PAGE_NX;
23646 else
23647 +#endif
23648 __supported_pte_mask &= ~_PAGE_NX;
23649 }
23650
23651 diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
23652 index d6c0418..06a0ad5 100644
23653 --- a/arch/x86/mm/tlb.c
23654 +++ b/arch/x86/mm/tlb.c
23655 @@ -65,7 +65,11 @@ void leave_mm(int cpu)
23656 BUG();
23657 cpumask_clear_cpu(cpu,
23658 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
23659 +
23660 +#ifndef CONFIG_PAX_PER_CPU_PGD
23661 load_cr3(swapper_pg_dir);
23662 +#endif
23663 +
23664 }
23665 EXPORT_SYMBOL_GPL(leave_mm);
23666
23667 diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
23668 index 6687022..ceabcfa 100644
23669 --- a/arch/x86/net/bpf_jit.S
23670 +++ b/arch/x86/net/bpf_jit.S
23671 @@ -9,6 +9,7 @@
23672 */
23673 #include <linux/linkage.h>
23674 #include <asm/dwarf2.h>
23675 +#include <asm/alternative-asm.h>
23676
23677 /*
23678 * Calling convention :
23679 @@ -35,6 +36,7 @@ sk_load_word:
23680 jle bpf_slow_path_word
23681 mov (SKBDATA,%rsi),%eax
23682 bswap %eax /* ntohl() */
23683 + pax_force_retaddr
23684 ret
23685
23686
23687 @@ -53,6 +55,7 @@ sk_load_half:
23688 jle bpf_slow_path_half
23689 movzwl (SKBDATA,%rsi),%eax
23690 rol $8,%ax # ntohs()
23691 + pax_force_retaddr
23692 ret
23693
23694 sk_load_byte_ind:
23695 @@ -66,6 +69,7 @@ sk_load_byte:
23696 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
23697 jle bpf_slow_path_byte
23698 movzbl (SKBDATA,%rsi),%eax
23699 + pax_force_retaddr
23700 ret
23701
23702 /**
23703 @@ -82,6 +86,7 @@ ENTRY(sk_load_byte_msh)
23704 movzbl (SKBDATA,%rsi),%ebx
23705 and $15,%bl
23706 shl $2,%bl
23707 + pax_force_retaddr
23708 ret
23709 CFI_ENDPROC
23710 ENDPROC(sk_load_byte_msh)
23711 @@ -91,6 +96,7 @@ bpf_error:
23712 xor %eax,%eax
23713 mov -8(%rbp),%rbx
23714 leaveq
23715 + pax_force_retaddr
23716 ret
23717
23718 /* rsi contains offset and can be scratched */
23719 @@ -113,6 +119,7 @@ bpf_slow_path_word:
23720 js bpf_error
23721 mov -12(%rbp),%eax
23722 bswap %eax
23723 + pax_force_retaddr
23724 ret
23725
23726 bpf_slow_path_half:
23727 @@ -121,12 +128,14 @@ bpf_slow_path_half:
23728 mov -12(%rbp),%ax
23729 rol $8,%ax
23730 movzwl %ax,%eax
23731 + pax_force_retaddr
23732 ret
23733
23734 bpf_slow_path_byte:
23735 bpf_slow_path_common(1)
23736 js bpf_error
23737 movzbl -12(%rbp),%eax
23738 + pax_force_retaddr
23739 ret
23740
23741 bpf_slow_path_byte_msh:
23742 @@ -137,4 +146,5 @@ bpf_slow_path_byte_msh:
23743 and $15,%al
23744 shl $2,%al
23745 xchg %eax,%ebx
23746 + pax_force_retaddr
23747 ret
23748 diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
23749 index 7c1b765..8c072c6 100644
23750 --- a/arch/x86/net/bpf_jit_comp.c
23751 +++ b/arch/x86/net/bpf_jit_comp.c
23752 @@ -117,6 +117,10 @@ static inline void bpf_flush_icache(void *start, void *end)
23753 set_fs(old_fs);
23754 }
23755
23756 +struct bpf_jit_work {
23757 + struct work_struct work;
23758 + void *image;
23759 +};
23760
23761 void bpf_jit_compile(struct sk_filter *fp)
23762 {
23763 @@ -141,6 +145,10 @@ void bpf_jit_compile(struct sk_filter *fp)
23764 if (addrs == NULL)
23765 return;
23766
23767 + fp->work = kmalloc(sizeof(*fp->work), GFP_KERNEL);
23768 + if (!fp->work)
23769 + goto out;
23770 +
23771 /* Before first pass, make a rough estimation of addrs[]
23772 * each bpf instruction is translated to less than 64 bytes
23773 */
23774 @@ -476,7 +484,7 @@ void bpf_jit_compile(struct sk_filter *fp)
23775 func = sk_load_word;
23776 common_load: seen |= SEEN_DATAREF;
23777 if ((int)K < 0)
23778 - goto out;
23779 + goto error;
23780 t_offset = func - (image + addrs[i]);
23781 EMIT1_off32(0xbe, K); /* mov imm32,%esi */
23782 EMIT1_off32(0xe8, t_offset); /* call */
23783 @@ -586,17 +594,18 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
23784 break;
23785 default:
23786 /* hmm, too complex filter, give up with jit compiler */
23787 - goto out;
23788 + goto error;
23789 }
23790 ilen = prog - temp;
23791 if (image) {
23792 if (unlikely(proglen + ilen > oldproglen)) {
23793 pr_err("bpb_jit_compile fatal error\n");
23794 - kfree(addrs);
23795 - module_free(NULL, image);
23796 - return;
23797 + module_free_exec(NULL, image);
23798 + goto error;
23799 }
23800 + pax_open_kernel();
23801 memcpy(image + proglen, temp, ilen);
23802 + pax_close_kernel();
23803 }
23804 proglen += ilen;
23805 addrs[i] = proglen;
23806 @@ -617,11 +626,9 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
23807 break;
23808 }
23809 if (proglen == oldproglen) {
23810 - image = module_alloc(max_t(unsigned int,
23811 - proglen,
23812 - sizeof(struct work_struct)));
23813 + image = module_alloc_exec(proglen);
23814 if (!image)
23815 - goto out;
23816 + goto error;
23817 }
23818 oldproglen = proglen;
23819 }
23820 @@ -637,7 +644,10 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
23821 bpf_flush_icache(image, image + proglen);
23822
23823 fp->bpf_func = (void *)image;
23824 - }
23825 + } else
23826 +error:
23827 + kfree(fp->work);
23828 +
23829 out:
23830 kfree(addrs);
23831 return;
23832 @@ -645,18 +655,20 @@ out:
23833
23834 static void jit_free_defer(struct work_struct *arg)
23835 {
23836 - module_free(NULL, arg);
23837 + module_free_exec(NULL, ((struct bpf_jit_work *)arg)->image);
23838 + kfree(arg);
23839 }
23840
23841 /* run from softirq, we must use a work_struct to call
23842 - * module_free() from process context
23843 + * module_free_exec() from process context
23844 */
23845 void bpf_jit_free(struct sk_filter *fp)
23846 {
23847 if (fp->bpf_func != sk_run_filter) {
23848 - struct work_struct *work = (struct work_struct *)fp->bpf_func;
23849 + struct work_struct *work = &fp->work->work;
23850
23851 INIT_WORK(work, jit_free_defer);
23852 + fp->work->image = fp->bpf_func;
23853 schedule_work(work);
23854 }
23855 }
23856 diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
23857 index bff89df..377758a 100644
23858 --- a/arch/x86/oprofile/backtrace.c
23859 +++ b/arch/x86/oprofile/backtrace.c
23860 @@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
23861 struct stack_frame_ia32 *fp;
23862 unsigned long bytes;
23863
23864 - bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
23865 + bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
23866 if (bytes != sizeof(bufhead))
23867 return NULL;
23868
23869 - fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
23870 + fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
23871
23872 oprofile_add_trace(bufhead[0].return_address);
23873
23874 @@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
23875 struct stack_frame bufhead[2];
23876 unsigned long bytes;
23877
23878 - bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
23879 + bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
23880 if (bytes != sizeof(bufhead))
23881 return NULL;
23882
23883 @@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
23884 {
23885 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
23886
23887 - if (!user_mode_vm(regs)) {
23888 + if (!user_mode(regs)) {
23889 unsigned long stack = kernel_stack_pointer(regs);
23890 if (depth)
23891 dump_trace(NULL, regs, (unsigned long *)stack, 0,
23892 diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
23893 index cb29191..036766d 100644
23894 --- a/arch/x86/pci/mrst.c
23895 +++ b/arch/x86/pci/mrst.c
23896 @@ -234,7 +234,9 @@ int __init pci_mrst_init(void)
23897 printk(KERN_INFO "Moorestown platform detected, using MRST PCI ops\n");
23898 pci_mmcfg_late_init();
23899 pcibios_enable_irq = mrst_pci_irq_enable;
23900 - pci_root_ops = pci_mrst_ops;
23901 + pax_open_kernel();
23902 + memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
23903 + pax_close_kernel();
23904 /* Continue with standard init */
23905 return 1;
23906 }
23907 diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
23908 index db0e9a5..0372c14 100644
23909 --- a/arch/x86/pci/pcbios.c
23910 +++ b/arch/x86/pci/pcbios.c
23911 @@ -79,50 +79,93 @@ union bios32 {
23912 static struct {
23913 unsigned long address;
23914 unsigned short segment;
23915 -} bios32_indirect = { 0, __KERNEL_CS };
23916 +} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
23917
23918 /*
23919 * Returns the entry point for the given service, NULL on error
23920 */
23921
23922 -static unsigned long bios32_service(unsigned long service)
23923 +static unsigned long __devinit bios32_service(unsigned long service)
23924 {
23925 unsigned char return_code; /* %al */
23926 unsigned long address; /* %ebx */
23927 unsigned long length; /* %ecx */
23928 unsigned long entry; /* %edx */
23929 unsigned long flags;
23930 + struct desc_struct d, *gdt;
23931
23932 local_irq_save(flags);
23933 - __asm__("lcall *(%%edi); cld"
23934 +
23935 + gdt = get_cpu_gdt_table(smp_processor_id());
23936 +
23937 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
23938 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
23939 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
23940 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
23941 +
23942 + __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
23943 : "=a" (return_code),
23944 "=b" (address),
23945 "=c" (length),
23946 "=d" (entry)
23947 : "0" (service),
23948 "1" (0),
23949 - "D" (&bios32_indirect));
23950 + "D" (&bios32_indirect),
23951 + "r"(__PCIBIOS_DS)
23952 + : "memory");
23953 +
23954 + pax_open_kernel();
23955 + gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
23956 + gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
23957 + gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
23958 + gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
23959 + pax_close_kernel();
23960 +
23961 local_irq_restore(flags);
23962
23963 switch (return_code) {
23964 - case 0:
23965 - return address + entry;
23966 - case 0x80: /* Not present */
23967 - printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
23968 - return 0;
23969 - default: /* Shouldn't happen */
23970 - printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
23971 - service, return_code);
23972 + case 0: {
23973 + int cpu;
23974 + unsigned char flags;
23975 +
23976 + printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
23977 + if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
23978 + printk(KERN_WARNING "bios32_service: not valid\n");
23979 return 0;
23980 + }
23981 + address = address + PAGE_OFFSET;
23982 + length += 16UL; /* some BIOSs underreport this... */
23983 + flags = 4;
23984 + if (length >= 64*1024*1024) {
23985 + length >>= PAGE_SHIFT;
23986 + flags |= 8;
23987 + }
23988 +
23989 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
23990 + gdt = get_cpu_gdt_table(cpu);
23991 + pack_descriptor(&d, address, length, 0x9b, flags);
23992 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
23993 + pack_descriptor(&d, address, length, 0x93, flags);
23994 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
23995 + }
23996 + return entry;
23997 + }
23998 + case 0x80: /* Not present */
23999 + printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
24000 + return 0;
24001 + default: /* Shouldn't happen */
24002 + printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
24003 + service, return_code);
24004 + return 0;
24005 }
24006 }
24007
24008 static struct {
24009 unsigned long address;
24010 unsigned short segment;
24011 -} pci_indirect = { 0, __KERNEL_CS };
24012 +} pci_indirect __read_only = { 0, __PCIBIOS_CS };
24013
24014 -static int pci_bios_present;
24015 +static int pci_bios_present __read_only;
24016
24017 static int __devinit check_pcibios(void)
24018 {
24019 @@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
24020 unsigned long flags, pcibios_entry;
24021
24022 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
24023 - pci_indirect.address = pcibios_entry + PAGE_OFFSET;
24024 + pci_indirect.address = pcibios_entry;
24025
24026 local_irq_save(flags);
24027 - __asm__(
24028 - "lcall *(%%edi); cld\n\t"
24029 + __asm__("movw %w6, %%ds\n\t"
24030 + "lcall *%%ss:(%%edi); cld\n\t"
24031 + "push %%ss\n\t"
24032 + "pop %%ds\n\t"
24033 "jc 1f\n\t"
24034 "xor %%ah, %%ah\n"
24035 "1:"
24036 @@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
24037 "=b" (ebx),
24038 "=c" (ecx)
24039 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
24040 - "D" (&pci_indirect)
24041 + "D" (&pci_indirect),
24042 + "r" (__PCIBIOS_DS)
24043 : "memory");
24044 local_irq_restore(flags);
24045
24046 @@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24047
24048 switch (len) {
24049 case 1:
24050 - __asm__("lcall *(%%esi); cld\n\t"
24051 + __asm__("movw %w6, %%ds\n\t"
24052 + "lcall *%%ss:(%%esi); cld\n\t"
24053 + "push %%ss\n\t"
24054 + "pop %%ds\n\t"
24055 "jc 1f\n\t"
24056 "xor %%ah, %%ah\n"
24057 "1:"
24058 @@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24059 : "1" (PCIBIOS_READ_CONFIG_BYTE),
24060 "b" (bx),
24061 "D" ((long)reg),
24062 - "S" (&pci_indirect));
24063 + "S" (&pci_indirect),
24064 + "r" (__PCIBIOS_DS));
24065 /*
24066 * Zero-extend the result beyond 8 bits, do not trust the
24067 * BIOS having done it:
24068 @@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24069 *value &= 0xff;
24070 break;
24071 case 2:
24072 - __asm__("lcall *(%%esi); cld\n\t"
24073 + __asm__("movw %w6, %%ds\n\t"
24074 + "lcall *%%ss:(%%esi); cld\n\t"
24075 + "push %%ss\n\t"
24076 + "pop %%ds\n\t"
24077 "jc 1f\n\t"
24078 "xor %%ah, %%ah\n"
24079 "1:"
24080 @@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24081 : "1" (PCIBIOS_READ_CONFIG_WORD),
24082 "b" (bx),
24083 "D" ((long)reg),
24084 - "S" (&pci_indirect));
24085 + "S" (&pci_indirect),
24086 + "r" (__PCIBIOS_DS));
24087 /*
24088 * Zero-extend the result beyond 16 bits, do not trust the
24089 * BIOS having done it:
24090 @@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24091 *value &= 0xffff;
24092 break;
24093 case 4:
24094 - __asm__("lcall *(%%esi); cld\n\t"
24095 + __asm__("movw %w6, %%ds\n\t"
24096 + "lcall *%%ss:(%%esi); cld\n\t"
24097 + "push %%ss\n\t"
24098 + "pop %%ds\n\t"
24099 "jc 1f\n\t"
24100 "xor %%ah, %%ah\n"
24101 "1:"
24102 @@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24103 : "1" (PCIBIOS_READ_CONFIG_DWORD),
24104 "b" (bx),
24105 "D" ((long)reg),
24106 - "S" (&pci_indirect));
24107 + "S" (&pci_indirect),
24108 + "r" (__PCIBIOS_DS));
24109 break;
24110 }
24111
24112 @@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
24113
24114 switch (len) {
24115 case 1:
24116 - __asm__("lcall *(%%esi); cld\n\t"
24117 + __asm__("movw %w6, %%ds\n\t"
24118 + "lcall *%%ss:(%%esi); cld\n\t"
24119 + "push %%ss\n\t"
24120 + "pop %%ds\n\t"
24121 "jc 1f\n\t"
24122 "xor %%ah, %%ah\n"
24123 "1:"
24124 @@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
24125 "c" (value),
24126 "b" (bx),
24127 "D" ((long)reg),
24128 - "S" (&pci_indirect));
24129 + "S" (&pci_indirect),
24130 + "r" (__PCIBIOS_DS));
24131 break;
24132 case 2:
24133 - __asm__("lcall *(%%esi); cld\n\t"
24134 + __asm__("movw %w6, %%ds\n\t"
24135 + "lcall *%%ss:(%%esi); cld\n\t"
24136 + "push %%ss\n\t"
24137 + "pop %%ds\n\t"
24138 "jc 1f\n\t"
24139 "xor %%ah, %%ah\n"
24140 "1:"
24141 @@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
24142 "c" (value),
24143 "b" (bx),
24144 "D" ((long)reg),
24145 - "S" (&pci_indirect));
24146 + "S" (&pci_indirect),
24147 + "r" (__PCIBIOS_DS));
24148 break;
24149 case 4:
24150 - __asm__("lcall *(%%esi); cld\n\t"
24151 + __asm__("movw %w6, %%ds\n\t"
24152 + "lcall *%%ss:(%%esi); cld\n\t"
24153 + "push %%ss\n\t"
24154 + "pop %%ds\n\t"
24155 "jc 1f\n\t"
24156 "xor %%ah, %%ah\n"
24157 "1:"
24158 @@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
24159 "c" (value),
24160 "b" (bx),
24161 "D" ((long)reg),
24162 - "S" (&pci_indirect));
24163 + "S" (&pci_indirect),
24164 + "r" (__PCIBIOS_DS));
24165 break;
24166 }
24167
24168 @@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
24169
24170 DBG("PCI: Fetching IRQ routing table... ");
24171 __asm__("push %%es\n\t"
24172 + "movw %w8, %%ds\n\t"
24173 "push %%ds\n\t"
24174 "pop %%es\n\t"
24175 - "lcall *(%%esi); cld\n\t"
24176 + "lcall *%%ss:(%%esi); cld\n\t"
24177 "pop %%es\n\t"
24178 + "push %%ss\n\t"
24179 + "pop %%ds\n"
24180 "jc 1f\n\t"
24181 "xor %%ah, %%ah\n"
24182 "1:"
24183 @@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
24184 "1" (0),
24185 "D" ((long) &opt),
24186 "S" (&pci_indirect),
24187 - "m" (opt)
24188 + "m" (opt),
24189 + "r" (__PCIBIOS_DS)
24190 : "memory");
24191 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
24192 if (ret & 0xff00)
24193 @@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
24194 {
24195 int ret;
24196
24197 - __asm__("lcall *(%%esi); cld\n\t"
24198 + __asm__("movw %w5, %%ds\n\t"
24199 + "lcall *%%ss:(%%esi); cld\n\t"
24200 + "push %%ss\n\t"
24201 + "pop %%ds\n"
24202 "jc 1f\n\t"
24203 "xor %%ah, %%ah\n"
24204 "1:"
24205 @@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
24206 : "0" (PCIBIOS_SET_PCI_HW_INT),
24207 "b" ((dev->bus->number << 8) | dev->devfn),
24208 "c" ((irq << 8) | (pin + 10)),
24209 - "S" (&pci_indirect));
24210 + "S" (&pci_indirect),
24211 + "r" (__PCIBIOS_DS));
24212 return !(ret & 0xff00);
24213 }
24214 EXPORT_SYMBOL(pcibios_set_irq_routing);
24215 diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
24216 index 40e4469..1ab536e 100644
24217 --- a/arch/x86/platform/efi/efi_32.c
24218 +++ b/arch/x86/platform/efi/efi_32.c
24219 @@ -44,11 +44,22 @@ void efi_call_phys_prelog(void)
24220 {
24221 struct desc_ptr gdt_descr;
24222
24223 +#ifdef CONFIG_PAX_KERNEXEC
24224 + struct desc_struct d;
24225 +#endif
24226 +
24227 local_irq_save(efi_rt_eflags);
24228
24229 load_cr3(initial_page_table);
24230 __flush_tlb_all();
24231
24232 +#ifdef CONFIG_PAX_KERNEXEC
24233 + pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
24234 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
24235 + pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
24236 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
24237 +#endif
24238 +
24239 gdt_descr.address = __pa(get_cpu_gdt_table(0));
24240 gdt_descr.size = GDT_SIZE - 1;
24241 load_gdt(&gdt_descr);
24242 @@ -58,6 +69,14 @@ void efi_call_phys_epilog(void)
24243 {
24244 struct desc_ptr gdt_descr;
24245
24246 +#ifdef CONFIG_PAX_KERNEXEC
24247 + struct desc_struct d;
24248 +
24249 + memset(&d, 0, sizeof d);
24250 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
24251 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
24252 +#endif
24253 +
24254 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
24255 gdt_descr.size = GDT_SIZE - 1;
24256 load_gdt(&gdt_descr);
24257 diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
24258 index fbe66e6..c5c0dd2 100644
24259 --- a/arch/x86/platform/efi/efi_stub_32.S
24260 +++ b/arch/x86/platform/efi/efi_stub_32.S
24261 @@ -6,7 +6,9 @@
24262 */
24263
24264 #include <linux/linkage.h>
24265 +#include <linux/init.h>
24266 #include <asm/page_types.h>
24267 +#include <asm/segment.h>
24268
24269 /*
24270 * efi_call_phys(void *, ...) is a function with variable parameters.
24271 @@ -20,7 +22,7 @@
24272 * service functions will comply with gcc calling convention, too.
24273 */
24274
24275 -.text
24276 +__INIT
24277 ENTRY(efi_call_phys)
24278 /*
24279 * 0. The function can only be called in Linux kernel. So CS has been
24280 @@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
24281 * The mapping of lower virtual memory has been created in prelog and
24282 * epilog.
24283 */
24284 - movl $1f, %edx
24285 - subl $__PAGE_OFFSET, %edx
24286 - jmp *%edx
24287 + movl $(__KERNEXEC_EFI_DS), %edx
24288 + mov %edx, %ds
24289 + mov %edx, %es
24290 + mov %edx, %ss
24291 + ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
24292 1:
24293
24294 /*
24295 @@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
24296 * parameter 2, ..., param n. To make things easy, we save the return
24297 * address of efi_call_phys in a global variable.
24298 */
24299 - popl %edx
24300 - movl %edx, saved_return_addr
24301 - /* get the function pointer into ECX*/
24302 - popl %ecx
24303 - movl %ecx, efi_rt_function_ptr
24304 - movl $2f, %edx
24305 - subl $__PAGE_OFFSET, %edx
24306 - pushl %edx
24307 + popl (saved_return_addr)
24308 + popl (efi_rt_function_ptr)
24309
24310 /*
24311 * 3. Clear PG bit in %CR0.
24312 @@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
24313 /*
24314 * 5. Call the physical function.
24315 */
24316 - jmp *%ecx
24317 + call *(efi_rt_function_ptr-__PAGE_OFFSET)
24318
24319 -2:
24320 /*
24321 * 6. After EFI runtime service returns, control will return to
24322 * following instruction. We'd better readjust stack pointer first.
24323 @@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
24324 movl %cr0, %edx
24325 orl $0x80000000, %edx
24326 movl %edx, %cr0
24327 - jmp 1f
24328 -1:
24329 +
24330 /*
24331 * 8. Now restore the virtual mode from flat mode by
24332 * adding EIP with PAGE_OFFSET.
24333 */
24334 - movl $1f, %edx
24335 - jmp *%edx
24336 + ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
24337 1:
24338 + movl $(__KERNEL_DS), %edx
24339 + mov %edx, %ds
24340 + mov %edx, %es
24341 + mov %edx, %ss
24342
24343 /*
24344 * 9. Balance the stack. And because EAX contain the return value,
24345 * we'd better not clobber it.
24346 */
24347 - leal efi_rt_function_ptr, %edx
24348 - movl (%edx), %ecx
24349 - pushl %ecx
24350 + pushl (efi_rt_function_ptr)
24351
24352 /*
24353 - * 10. Push the saved return address onto the stack and return.
24354 + * 10. Return to the saved return address.
24355 */
24356 - leal saved_return_addr, %edx
24357 - movl (%edx), %ecx
24358 - pushl %ecx
24359 - ret
24360 + jmpl *(saved_return_addr)
24361 ENDPROC(efi_call_phys)
24362 .previous
24363
24364 -.data
24365 +__INITDATA
24366 saved_return_addr:
24367 .long 0
24368 efi_rt_function_ptr:
24369 diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
24370 index 4c07cca..2c8427d 100644
24371 --- a/arch/x86/platform/efi/efi_stub_64.S
24372 +++ b/arch/x86/platform/efi/efi_stub_64.S
24373 @@ -7,6 +7,7 @@
24374 */
24375
24376 #include <linux/linkage.h>
24377 +#include <asm/alternative-asm.h>
24378
24379 #define SAVE_XMM \
24380 mov %rsp, %rax; \
24381 @@ -40,6 +41,7 @@ ENTRY(efi_call0)
24382 call *%rdi
24383 addq $32, %rsp
24384 RESTORE_XMM
24385 + pax_force_retaddr 0, 1
24386 ret
24387 ENDPROC(efi_call0)
24388
24389 @@ -50,6 +52,7 @@ ENTRY(efi_call1)
24390 call *%rdi
24391 addq $32, %rsp
24392 RESTORE_XMM
24393 + pax_force_retaddr 0, 1
24394 ret
24395 ENDPROC(efi_call1)
24396
24397 @@ -60,6 +63,7 @@ ENTRY(efi_call2)
24398 call *%rdi
24399 addq $32, %rsp
24400 RESTORE_XMM
24401 + pax_force_retaddr 0, 1
24402 ret
24403 ENDPROC(efi_call2)
24404
24405 @@ -71,6 +75,7 @@ ENTRY(efi_call3)
24406 call *%rdi
24407 addq $32, %rsp
24408 RESTORE_XMM
24409 + pax_force_retaddr 0, 1
24410 ret
24411 ENDPROC(efi_call3)
24412
24413 @@ -83,6 +88,7 @@ ENTRY(efi_call4)
24414 call *%rdi
24415 addq $32, %rsp
24416 RESTORE_XMM
24417 + pax_force_retaddr 0, 1
24418 ret
24419 ENDPROC(efi_call4)
24420
24421 @@ -96,6 +102,7 @@ ENTRY(efi_call5)
24422 call *%rdi
24423 addq $48, %rsp
24424 RESTORE_XMM
24425 + pax_force_retaddr 0, 1
24426 ret
24427 ENDPROC(efi_call5)
24428
24429 @@ -112,5 +119,6 @@ ENTRY(efi_call6)
24430 call *%rdi
24431 addq $48, %rsp
24432 RESTORE_XMM
24433 + pax_force_retaddr 0, 1
24434 ret
24435 ENDPROC(efi_call6)
24436 diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
24437 index ad4ec1c..686479e 100644
24438 --- a/arch/x86/platform/mrst/mrst.c
24439 +++ b/arch/x86/platform/mrst/mrst.c
24440 @@ -76,18 +76,20 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
24441 EXPORT_SYMBOL_GPL(sfi_mrtc_array);
24442 int sfi_mrtc_num;
24443
24444 -static void mrst_power_off(void)
24445 +static __noreturn void mrst_power_off(void)
24446 {
24447 if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT)
24448 intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 1);
24449 + BUG();
24450 }
24451
24452 -static void mrst_reboot(void)
24453 +static __noreturn void mrst_reboot(void)
24454 {
24455 if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT)
24456 intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 0);
24457 else
24458 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
24459 + BUG();
24460 }
24461
24462 /* parse all the mtimer info to a static mtimer array */
24463 diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
24464 index f10c0af..3ec1f95 100644
24465 --- a/arch/x86/power/cpu.c
24466 +++ b/arch/x86/power/cpu.c
24467 @@ -131,7 +131,7 @@ static void do_fpu_end(void)
24468 static void fix_processor_context(void)
24469 {
24470 int cpu = smp_processor_id();
24471 - struct tss_struct *t = &per_cpu(init_tss, cpu);
24472 + struct tss_struct *t = init_tss + cpu;
24473
24474 set_tss_desc(cpu, t); /*
24475 * This just modifies memory; should not be
24476 @@ -141,7 +141,9 @@ static void fix_processor_context(void)
24477 */
24478
24479 #ifdef CONFIG_X86_64
24480 + pax_open_kernel();
24481 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
24482 + pax_close_kernel();
24483
24484 syscall_init(); /* This sets MSR_*STAR and related */
24485 #endif
24486 diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
24487 index 5d17950..2253fc9 100644
24488 --- a/arch/x86/vdso/Makefile
24489 +++ b/arch/x86/vdso/Makefile
24490 @@ -137,7 +137,7 @@ quiet_cmd_vdso = VDSO $@
24491 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
24492 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
24493
24494 -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
24495 +VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
24496 GCOV_PROFILE := n
24497
24498 #
24499 diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
24500 index 468d591..8e80a0a 100644
24501 --- a/arch/x86/vdso/vdso32-setup.c
24502 +++ b/arch/x86/vdso/vdso32-setup.c
24503 @@ -25,6 +25,7 @@
24504 #include <asm/tlbflush.h>
24505 #include <asm/vdso.h>
24506 #include <asm/proto.h>
24507 +#include <asm/mman.h>
24508
24509 enum {
24510 VDSO_DISABLED = 0,
24511 @@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
24512 void enable_sep_cpu(void)
24513 {
24514 int cpu = get_cpu();
24515 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
24516 + struct tss_struct *tss = init_tss + cpu;
24517
24518 if (!boot_cpu_has(X86_FEATURE_SEP)) {
24519 put_cpu();
24520 @@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
24521 gate_vma.vm_start = FIXADDR_USER_START;
24522 gate_vma.vm_end = FIXADDR_USER_END;
24523 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
24524 - gate_vma.vm_page_prot = __P101;
24525 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
24526 /*
24527 * Make sure the vDSO gets into every core dump.
24528 * Dumping its contents makes post-mortem fully interpretable later
24529 @@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
24530 if (compat)
24531 addr = VDSO_HIGH_BASE;
24532 else {
24533 - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
24534 + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
24535 if (IS_ERR_VALUE(addr)) {
24536 ret = addr;
24537 goto up_fail;
24538 }
24539 }
24540
24541 - current->mm->context.vdso = (void *)addr;
24542 + current->mm->context.vdso = addr;
24543
24544 if (compat_uses_vma || !compat) {
24545 /*
24546 @@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
24547 }
24548
24549 current_thread_info()->sysenter_return =
24550 - VDSO32_SYMBOL(addr, SYSENTER_RETURN);
24551 + (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
24552
24553 up_fail:
24554 if (ret)
24555 - current->mm->context.vdso = NULL;
24556 + current->mm->context.vdso = 0;
24557
24558 up_write(&mm->mmap_sem);
24559
24560 @@ -412,8 +413,14 @@ __initcall(ia32_binfmt_init);
24561
24562 const char *arch_vma_name(struct vm_area_struct *vma)
24563 {
24564 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
24565 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
24566 return "[vdso]";
24567 +
24568 +#ifdef CONFIG_PAX_SEGMEXEC
24569 + if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
24570 + return "[vdso]";
24571 +#endif
24572 +
24573 return NULL;
24574 }
24575
24576 @@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
24577 * Check to see if the corresponding task was created in compat vdso
24578 * mode.
24579 */
24580 - if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
24581 + if (mm && mm->context.vdso == VDSO_HIGH_BASE)
24582 return &gate_vma;
24583 return NULL;
24584 }
24585 diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
24586 index 153407c..611cba9 100644
24587 --- a/arch/x86/vdso/vma.c
24588 +++ b/arch/x86/vdso/vma.c
24589 @@ -16,8 +16,6 @@
24590 #include <asm/vdso.h>
24591 #include <asm/page.h>
24592
24593 -unsigned int __read_mostly vdso_enabled = 1;
24594 -
24595 extern char vdso_start[], vdso_end[];
24596 extern unsigned short vdso_sync_cpuid;
24597
24598 @@ -96,7 +94,6 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
24599 * unaligned here as a result of stack start randomization.
24600 */
24601 addr = PAGE_ALIGN(addr);
24602 - addr = align_addr(addr, NULL, ALIGN_VDSO);
24603
24604 return addr;
24605 }
24606 @@ -106,40 +103,35 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
24607 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
24608 {
24609 struct mm_struct *mm = current->mm;
24610 - unsigned long addr;
24611 + unsigned long addr = 0;
24612 int ret;
24613
24614 - if (!vdso_enabled)
24615 - return 0;
24616 -
24617 down_write(&mm->mmap_sem);
24618 +
24619 +#ifdef CONFIG_PAX_RANDMMAP
24620 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
24621 +#endif
24622 +
24623 addr = vdso_addr(mm->start_stack, vdso_size);
24624 + addr = align_addr(addr, NULL, ALIGN_VDSO);
24625 addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
24626 if (IS_ERR_VALUE(addr)) {
24627 ret = addr;
24628 goto up_fail;
24629 }
24630
24631 - current->mm->context.vdso = (void *)addr;
24632 + mm->context.vdso = addr;
24633
24634 ret = install_special_mapping(mm, addr, vdso_size,
24635 VM_READ|VM_EXEC|
24636 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
24637 VM_ALWAYSDUMP,
24638 vdso_pages);
24639 - if (ret) {
24640 - current->mm->context.vdso = NULL;
24641 - goto up_fail;
24642 - }
24643 +
24644 + if (ret)
24645 + mm->context.vdso = 0;
24646
24647 up_fail:
24648 up_write(&mm->mmap_sem);
24649 return ret;
24650 }
24651 -
24652 -static __init int vdso_setup(char *s)
24653 -{
24654 - vdso_enabled = simple_strtoul(s, NULL, 0);
24655 - return 0;
24656 -}
24657 -__setup("vdso=", vdso_setup);
24658 diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
24659 index 1f92865..c843b20 100644
24660 --- a/arch/x86/xen/enlighten.c
24661 +++ b/arch/x86/xen/enlighten.c
24662 @@ -85,8 +85,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
24663
24664 struct shared_info xen_dummy_shared_info;
24665
24666 -void *xen_initial_gdt;
24667 -
24668 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
24669 __read_mostly int xen_have_vector_callback;
24670 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
24671 @@ -1029,7 +1027,7 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
24672 #endif
24673 };
24674
24675 -static void xen_reboot(int reason)
24676 +static __noreturn void xen_reboot(int reason)
24677 {
24678 struct sched_shutdown r = { .reason = reason };
24679
24680 @@ -1037,17 +1035,17 @@ static void xen_reboot(int reason)
24681 BUG();
24682 }
24683
24684 -static void xen_restart(char *msg)
24685 +static __noreturn void xen_restart(char *msg)
24686 {
24687 xen_reboot(SHUTDOWN_reboot);
24688 }
24689
24690 -static void xen_emergency_restart(void)
24691 +static __noreturn void xen_emergency_restart(void)
24692 {
24693 xen_reboot(SHUTDOWN_reboot);
24694 }
24695
24696 -static void xen_machine_halt(void)
24697 +static __noreturn void xen_machine_halt(void)
24698 {
24699 xen_reboot(SHUTDOWN_poweroff);
24700 }
24701 @@ -1153,7 +1151,17 @@ asmlinkage void __init xen_start_kernel(void)
24702 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
24703
24704 /* Work out if we support NX */
24705 - x86_configure_nx();
24706 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
24707 + if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
24708 + (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
24709 + unsigned l, h;
24710 +
24711 + __supported_pte_mask |= _PAGE_NX;
24712 + rdmsr(MSR_EFER, l, h);
24713 + l |= EFER_NX;
24714 + wrmsr(MSR_EFER, l, h);
24715 + }
24716 +#endif
24717
24718 xen_setup_features();
24719
24720 @@ -1184,13 +1192,6 @@ asmlinkage void __init xen_start_kernel(void)
24721
24722 machine_ops = xen_machine_ops;
24723
24724 - /*
24725 - * The only reliable way to retain the initial address of the
24726 - * percpu gdt_page is to remember it here, so we can go and
24727 - * mark it RW later, when the initial percpu area is freed.
24728 - */
24729 - xen_initial_gdt = &per_cpu(gdt_page, 0);
24730 -
24731 xen_smp_init();
24732
24733 #ifdef CONFIG_ACPI_NUMA
24734 diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
24735 index 87f6673..e2555a6 100644
24736 --- a/arch/x86/xen/mmu.c
24737 +++ b/arch/x86/xen/mmu.c
24738 @@ -1733,6 +1733,9 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
24739 convert_pfn_mfn(init_level4_pgt);
24740 convert_pfn_mfn(level3_ident_pgt);
24741 convert_pfn_mfn(level3_kernel_pgt);
24742 + convert_pfn_mfn(level3_vmalloc_start_pgt);
24743 + convert_pfn_mfn(level3_vmalloc_end_pgt);
24744 + convert_pfn_mfn(level3_vmemmap_pgt);
24745
24746 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
24747 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
24748 @@ -1751,7 +1754,11 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
24749 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
24750 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
24751 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
24752 + set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
24753 + set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
24754 + set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
24755 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
24756 + set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
24757 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
24758 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
24759
24760 @@ -1962,6 +1969,7 @@ static void __init xen_post_allocator_init(void)
24761 pv_mmu_ops.set_pud = xen_set_pud;
24762 #if PAGETABLE_LEVELS == 4
24763 pv_mmu_ops.set_pgd = xen_set_pgd;
24764 + pv_mmu_ops.set_pgd_batched = xen_set_pgd;
24765 #endif
24766
24767 /* This will work as long as patching hasn't happened yet
24768 @@ -2043,6 +2051,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
24769 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
24770 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
24771 .set_pgd = xen_set_pgd_hyper,
24772 + .set_pgd_batched = xen_set_pgd_hyper,
24773
24774 .alloc_pud = xen_alloc_pmd_init,
24775 .release_pud = xen_release_pmd_init,
24776 diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
24777 index 041d4fe..7666b7e 100644
24778 --- a/arch/x86/xen/smp.c
24779 +++ b/arch/x86/xen/smp.c
24780 @@ -194,11 +194,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
24781 {
24782 BUG_ON(smp_processor_id() != 0);
24783 native_smp_prepare_boot_cpu();
24784 -
24785 - /* We've switched to the "real" per-cpu gdt, so make sure the
24786 - old memory can be recycled */
24787 - make_lowmem_page_readwrite(xen_initial_gdt);
24788 -
24789 xen_filter_cpu_maps();
24790 xen_setup_vcpu_info_placement();
24791 }
24792 @@ -275,12 +270,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
24793 gdt = get_cpu_gdt_table(cpu);
24794
24795 ctxt->flags = VGCF_IN_KERNEL;
24796 - ctxt->user_regs.ds = __USER_DS;
24797 - ctxt->user_regs.es = __USER_DS;
24798 + ctxt->user_regs.ds = __KERNEL_DS;
24799 + ctxt->user_regs.es = __KERNEL_DS;
24800 ctxt->user_regs.ss = __KERNEL_DS;
24801 #ifdef CONFIG_X86_32
24802 ctxt->user_regs.fs = __KERNEL_PERCPU;
24803 - ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
24804 + savesegment(gs, ctxt->user_regs.gs);
24805 #else
24806 ctxt->gs_base_kernel = per_cpu_offset(cpu);
24807 #endif
24808 @@ -331,13 +326,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
24809 int rc;
24810
24811 per_cpu(current_task, cpu) = idle;
24812 + per_cpu(current_tinfo, cpu) = &idle->tinfo;
24813 #ifdef CONFIG_X86_32
24814 irq_ctx_init(cpu);
24815 #else
24816 clear_tsk_thread_flag(idle, TIF_FORK);
24817 - per_cpu(kernel_stack, cpu) =
24818 - (unsigned long)task_stack_page(idle) -
24819 - KERNEL_STACK_OFFSET + THREAD_SIZE;
24820 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
24821 #endif
24822 xen_setup_runstate_info(cpu);
24823 xen_setup_timer(cpu);
24824 diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
24825 index b040b0e..8cc4fe0 100644
24826 --- a/arch/x86/xen/xen-asm_32.S
24827 +++ b/arch/x86/xen/xen-asm_32.S
24828 @@ -83,14 +83,14 @@ ENTRY(xen_iret)
24829 ESP_OFFSET=4 # bytes pushed onto stack
24830
24831 /*
24832 - * Store vcpu_info pointer for easy access. Do it this way to
24833 - * avoid having to reload %fs
24834 + * Store vcpu_info pointer for easy access.
24835 */
24836 #ifdef CONFIG_SMP
24837 - GET_THREAD_INFO(%eax)
24838 - movl TI_cpu(%eax), %eax
24839 - movl __per_cpu_offset(,%eax,4), %eax
24840 - mov xen_vcpu(%eax), %eax
24841 + push %fs
24842 + mov $(__KERNEL_PERCPU), %eax
24843 + mov %eax, %fs
24844 + mov PER_CPU_VAR(xen_vcpu), %eax
24845 + pop %fs
24846 #else
24847 movl xen_vcpu, %eax
24848 #endif
24849 diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
24850 index aaa7291..3f77960 100644
24851 --- a/arch/x86/xen/xen-head.S
24852 +++ b/arch/x86/xen/xen-head.S
24853 @@ -19,6 +19,17 @@ ENTRY(startup_xen)
24854 #ifdef CONFIG_X86_32
24855 mov %esi,xen_start_info
24856 mov $init_thread_union+THREAD_SIZE,%esp
24857 +#ifdef CONFIG_SMP
24858 + movl $cpu_gdt_table,%edi
24859 + movl $__per_cpu_load,%eax
24860 + movw %ax,__KERNEL_PERCPU + 2(%edi)
24861 + rorl $16,%eax
24862 + movb %al,__KERNEL_PERCPU + 4(%edi)
24863 + movb %ah,__KERNEL_PERCPU + 7(%edi)
24864 + movl $__per_cpu_end - 1,%eax
24865 + subl $__per_cpu_start,%eax
24866 + movw %ax,__KERNEL_PERCPU + 0(%edi)
24867 +#endif
24868 #else
24869 mov %rsi,xen_start_info
24870 mov $init_thread_union+THREAD_SIZE,%rsp
24871 diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
24872 index b095739..8c17bcd 100644
24873 --- a/arch/x86/xen/xen-ops.h
24874 +++ b/arch/x86/xen/xen-ops.h
24875 @@ -10,8 +10,6 @@
24876 extern const char xen_hypervisor_callback[];
24877 extern const char xen_failsafe_callback[];
24878
24879 -extern void *xen_initial_gdt;
24880 -
24881 struct trap_info;
24882 void xen_copy_trap_info(struct trap_info *traps);
24883
24884 diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
24885 index 58916af..9cb880b 100644
24886 --- a/block/blk-iopoll.c
24887 +++ b/block/blk-iopoll.c
24888 @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
24889 }
24890 EXPORT_SYMBOL(blk_iopoll_complete);
24891
24892 -static void blk_iopoll_softirq(struct softirq_action *h)
24893 +static void blk_iopoll_softirq(void)
24894 {
24895 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
24896 int rearm = 0, budget = blk_iopoll_budget;
24897 diff --git a/block/blk-map.c b/block/blk-map.c
24898 index 623e1cd..ca1e109 100644
24899 --- a/block/blk-map.c
24900 +++ b/block/blk-map.c
24901 @@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
24902 if (!len || !kbuf)
24903 return -EINVAL;
24904
24905 - do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
24906 + do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
24907 if (do_copy)
24908 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
24909 else
24910 diff --git a/block/blk-softirq.c b/block/blk-softirq.c
24911 index 1366a89..e17f54b 100644
24912 --- a/block/blk-softirq.c
24913 +++ b/block/blk-softirq.c
24914 @@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
24915 * Softirq action handler - move entries to local list and loop over them
24916 * while passing them to the queue registered handler.
24917 */
24918 -static void blk_done_softirq(struct softirq_action *h)
24919 +static void blk_done_softirq(void)
24920 {
24921 struct list_head *cpu_list, local_list;
24922
24923 diff --git a/block/bsg.c b/block/bsg.c
24924 index 702f131..37808bf 100644
24925 --- a/block/bsg.c
24926 +++ b/block/bsg.c
24927 @@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
24928 struct sg_io_v4 *hdr, struct bsg_device *bd,
24929 fmode_t has_write_perm)
24930 {
24931 + unsigned char tmpcmd[sizeof(rq->__cmd)];
24932 + unsigned char *cmdptr;
24933 +
24934 if (hdr->request_len > BLK_MAX_CDB) {
24935 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
24936 if (!rq->cmd)
24937 return -ENOMEM;
24938 - }
24939 + cmdptr = rq->cmd;
24940 + } else
24941 + cmdptr = tmpcmd;
24942
24943 - if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
24944 + if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
24945 hdr->request_len))
24946 return -EFAULT;
24947
24948 + if (cmdptr != rq->cmd)
24949 + memcpy(rq->cmd, cmdptr, hdr->request_len);
24950 +
24951 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
24952 if (blk_verify_command(rq->cmd, has_write_perm))
24953 return -EPERM;
24954 diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
24955 index 7b72502..646105c 100644
24956 --- a/block/compat_ioctl.c
24957 +++ b/block/compat_ioctl.c
24958 @@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
24959 err |= __get_user(f->spec1, &uf->spec1);
24960 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
24961 err |= __get_user(name, &uf->name);
24962 - f->name = compat_ptr(name);
24963 + f->name = (void __force_kernel *)compat_ptr(name);
24964 if (err) {
24965 err = -EFAULT;
24966 goto out;
24967 diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
24968 index 688be8a..8a37d98 100644
24969 --- a/block/scsi_ioctl.c
24970 +++ b/block/scsi_ioctl.c
24971 @@ -223,8 +223,20 @@ EXPORT_SYMBOL(blk_verify_command);
24972 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
24973 struct sg_io_hdr *hdr, fmode_t mode)
24974 {
24975 - if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
24976 + unsigned char tmpcmd[sizeof(rq->__cmd)];
24977 + unsigned char *cmdptr;
24978 +
24979 + if (rq->cmd != rq->__cmd)
24980 + cmdptr = rq->cmd;
24981 + else
24982 + cmdptr = tmpcmd;
24983 +
24984 + if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
24985 return -EFAULT;
24986 +
24987 + if (cmdptr != rq->cmd)
24988 + memcpy(rq->cmd, cmdptr, hdr->cmd_len);
24989 +
24990 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
24991 return -EPERM;
24992
24993 @@ -433,6 +445,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
24994 int err;
24995 unsigned int in_len, out_len, bytes, opcode, cmdlen;
24996 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
24997 + unsigned char tmpcmd[sizeof(rq->__cmd)];
24998 + unsigned char *cmdptr;
24999
25000 if (!sic)
25001 return -EINVAL;
25002 @@ -466,9 +480,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
25003 */
25004 err = -EFAULT;
25005 rq->cmd_len = cmdlen;
25006 - if (copy_from_user(rq->cmd, sic->data, cmdlen))
25007 +
25008 + if (rq->cmd != rq->__cmd)
25009 + cmdptr = rq->cmd;
25010 + else
25011 + cmdptr = tmpcmd;
25012 +
25013 + if (copy_from_user(cmdptr, sic->data, cmdlen))
25014 goto error;
25015
25016 + if (rq->cmd != cmdptr)
25017 + memcpy(rq->cmd, cmdptr, cmdlen);
25018 +
25019 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
25020 goto error;
25021
25022 diff --git a/crypto/cryptd.c b/crypto/cryptd.c
25023 index 671d4d6..5f24030 100644
25024 --- a/crypto/cryptd.c
25025 +++ b/crypto/cryptd.c
25026 @@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
25027
25028 struct cryptd_blkcipher_request_ctx {
25029 crypto_completion_t complete;
25030 -};
25031 +} __no_const;
25032
25033 struct cryptd_hash_ctx {
25034 struct crypto_shash *child;
25035 @@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
25036
25037 struct cryptd_aead_request_ctx {
25038 crypto_completion_t complete;
25039 -};
25040 +} __no_const;
25041
25042 static void cryptd_queue_worker(struct work_struct *work);
25043
25044 diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
25045 index 5d41894..22021e4 100644
25046 --- a/drivers/acpi/apei/cper.c
25047 +++ b/drivers/acpi/apei/cper.c
25048 @@ -38,12 +38,12 @@
25049 */
25050 u64 cper_next_record_id(void)
25051 {
25052 - static atomic64_t seq;
25053 + static atomic64_unchecked_t seq;
25054
25055 - if (!atomic64_read(&seq))
25056 - atomic64_set(&seq, ((u64)get_seconds()) << 32);
25057 + if (!atomic64_read_unchecked(&seq))
25058 + atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
25059
25060 - return atomic64_inc_return(&seq);
25061 + return atomic64_inc_return_unchecked(&seq);
25062 }
25063 EXPORT_SYMBOL_GPL(cper_next_record_id);
25064
25065 diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
25066 index 6c47ae9..abfdd63 100644
25067 --- a/drivers/acpi/ec_sys.c
25068 +++ b/drivers/acpi/ec_sys.c
25069 @@ -12,6 +12,7 @@
25070 #include <linux/acpi.h>
25071 #include <linux/debugfs.h>
25072 #include <linux/module.h>
25073 +#include <linux/uaccess.h>
25074 #include "internal.h"
25075
25076 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
25077 @@ -40,7 +41,7 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
25078 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
25079 */
25080 unsigned int size = EC_SPACE_SIZE;
25081 - u8 *data = (u8 *) buf;
25082 + u8 data;
25083 loff_t init_off = *off;
25084 int err = 0;
25085
25086 @@ -53,9 +54,11 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
25087 size = count;
25088
25089 while (size) {
25090 - err = ec_read(*off, &data[*off - init_off]);
25091 + err = ec_read(*off, &data);
25092 if (err)
25093 return err;
25094 + if (put_user(data, &buf[*off - init_off]))
25095 + return -EFAULT;
25096 *off += 1;
25097 size--;
25098 }
25099 @@ -71,7 +74,6 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
25100
25101 unsigned int size = count;
25102 loff_t init_off = *off;
25103 - u8 *data = (u8 *) buf;
25104 int err = 0;
25105
25106 if (*off >= EC_SPACE_SIZE)
25107 @@ -82,7 +84,9 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
25108 }
25109
25110 while (size) {
25111 - u8 byte_write = data[*off - init_off];
25112 + u8 byte_write;
25113 + if (get_user(byte_write, &buf[*off - init_off]))
25114 + return -EFAULT;
25115 err = ec_write(*off, byte_write);
25116 if (err)
25117 return err;
25118 diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
25119 index 251c7b62..000462d 100644
25120 --- a/drivers/acpi/proc.c
25121 +++ b/drivers/acpi/proc.c
25122 @@ -343,19 +343,13 @@ acpi_system_write_wakeup_device(struct file *file,
25123 size_t count, loff_t * ppos)
25124 {
25125 struct list_head *node, *next;
25126 - char strbuf[5];
25127 - char str[5] = "";
25128 - unsigned int len = count;
25129 + char strbuf[5] = {0};
25130
25131 - if (len > 4)
25132 - len = 4;
25133 - if (len < 0)
25134 + if (count > 4)
25135 + count = 4;
25136 + if (copy_from_user(strbuf, buffer, count))
25137 return -EFAULT;
25138 -
25139 - if (copy_from_user(strbuf, buffer, len))
25140 - return -EFAULT;
25141 - strbuf[len] = '\0';
25142 - sscanf(strbuf, "%s", str);
25143 + strbuf[count] = '\0';
25144
25145 mutex_lock(&acpi_device_lock);
25146 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
25147 @@ -364,7 +358,7 @@ acpi_system_write_wakeup_device(struct file *file,
25148 if (!dev->wakeup.flags.valid)
25149 continue;
25150
25151 - if (!strncmp(dev->pnp.bus_id, str, 4)) {
25152 + if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
25153 if (device_can_wakeup(&dev->dev)) {
25154 bool enable = !device_may_wakeup(&dev->dev);
25155 device_set_wakeup_enable(&dev->dev, enable);
25156 diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
25157 index 9d7bc9f..a6fc091 100644
25158 --- a/drivers/acpi/processor_driver.c
25159 +++ b/drivers/acpi/processor_driver.c
25160 @@ -473,7 +473,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
25161 return 0;
25162 #endif
25163
25164 - BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
25165 + BUG_ON(pr->id >= nr_cpu_ids);
25166
25167 /*
25168 * Buggy BIOS check
25169 diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
25170 index c04ad68..0b99473 100644
25171 --- a/drivers/ata/libata-core.c
25172 +++ b/drivers/ata/libata-core.c
25173 @@ -4733,7 +4733,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
25174 struct ata_port *ap;
25175 unsigned int tag;
25176
25177 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
25178 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
25179 ap = qc->ap;
25180
25181 qc->flags = 0;
25182 @@ -4749,7 +4749,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
25183 struct ata_port *ap;
25184 struct ata_link *link;
25185
25186 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
25187 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
25188 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
25189 ap = qc->ap;
25190 link = qc->dev->link;
25191 @@ -5754,6 +5754,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
25192 return;
25193
25194 spin_lock(&lock);
25195 + pax_open_kernel();
25196
25197 for (cur = ops->inherits; cur; cur = cur->inherits) {
25198 void **inherit = (void **)cur;
25199 @@ -5767,8 +5768,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
25200 if (IS_ERR(*pp))
25201 *pp = NULL;
25202
25203 - ops->inherits = NULL;
25204 + *(struct ata_port_operations **)&ops->inherits = NULL;
25205
25206 + pax_close_kernel();
25207 spin_unlock(&lock);
25208 }
25209
25210 diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
25211 index e8574bb..f9f6a72 100644
25212 --- a/drivers/ata/pata_arasan_cf.c
25213 +++ b/drivers/ata/pata_arasan_cf.c
25214 @@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(struct platform_device *pdev)
25215 /* Handle platform specific quirks */
25216 if (pdata->quirk) {
25217 if (pdata->quirk & CF_BROKEN_PIO) {
25218 - ap->ops->set_piomode = NULL;
25219 + pax_open_kernel();
25220 + *(void **)&ap->ops->set_piomode = NULL;
25221 + pax_close_kernel();
25222 ap->pio_mask = 0;
25223 }
25224 if (pdata->quirk & CF_BROKEN_MWDMA)
25225 diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
25226 index f9b983a..887b9d8 100644
25227 --- a/drivers/atm/adummy.c
25228 +++ b/drivers/atm/adummy.c
25229 @@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
25230 vcc->pop(vcc, skb);
25231 else
25232 dev_kfree_skb_any(skb);
25233 - atomic_inc(&vcc->stats->tx);
25234 + atomic_inc_unchecked(&vcc->stats->tx);
25235
25236 return 0;
25237 }
25238 diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
25239 index f8f41e0..1f987dd 100644
25240 --- a/drivers/atm/ambassador.c
25241 +++ b/drivers/atm/ambassador.c
25242 @@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
25243 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
25244
25245 // VC layer stats
25246 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25247 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25248
25249 // free the descriptor
25250 kfree (tx_descr);
25251 @@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
25252 dump_skb ("<<<", vc, skb);
25253
25254 // VC layer stats
25255 - atomic_inc(&atm_vcc->stats->rx);
25256 + atomic_inc_unchecked(&atm_vcc->stats->rx);
25257 __net_timestamp(skb);
25258 // end of our responsibility
25259 atm_vcc->push (atm_vcc, skb);
25260 @@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
25261 } else {
25262 PRINTK (KERN_INFO, "dropped over-size frame");
25263 // should we count this?
25264 - atomic_inc(&atm_vcc->stats->rx_drop);
25265 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25266 }
25267
25268 } else {
25269 @@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
25270 }
25271
25272 if (check_area (skb->data, skb->len)) {
25273 - atomic_inc(&atm_vcc->stats->tx_err);
25274 + atomic_inc_unchecked(&atm_vcc->stats->tx_err);
25275 return -ENOMEM; // ?
25276 }
25277
25278 diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
25279 index b22d71c..d6e1049 100644
25280 --- a/drivers/atm/atmtcp.c
25281 +++ b/drivers/atm/atmtcp.c
25282 @@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
25283 if (vcc->pop) vcc->pop(vcc,skb);
25284 else dev_kfree_skb(skb);
25285 if (dev_data) return 0;
25286 - atomic_inc(&vcc->stats->tx_err);
25287 + atomic_inc_unchecked(&vcc->stats->tx_err);
25288 return -ENOLINK;
25289 }
25290 size = skb->len+sizeof(struct atmtcp_hdr);
25291 @@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
25292 if (!new_skb) {
25293 if (vcc->pop) vcc->pop(vcc,skb);
25294 else dev_kfree_skb(skb);
25295 - atomic_inc(&vcc->stats->tx_err);
25296 + atomic_inc_unchecked(&vcc->stats->tx_err);
25297 return -ENOBUFS;
25298 }
25299 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
25300 @@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
25301 if (vcc->pop) vcc->pop(vcc,skb);
25302 else dev_kfree_skb(skb);
25303 out_vcc->push(out_vcc,new_skb);
25304 - atomic_inc(&vcc->stats->tx);
25305 - atomic_inc(&out_vcc->stats->rx);
25306 + atomic_inc_unchecked(&vcc->stats->tx);
25307 + atomic_inc_unchecked(&out_vcc->stats->rx);
25308 return 0;
25309 }
25310
25311 @@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
25312 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
25313 read_unlock(&vcc_sklist_lock);
25314 if (!out_vcc) {
25315 - atomic_inc(&vcc->stats->tx_err);
25316 + atomic_inc_unchecked(&vcc->stats->tx_err);
25317 goto done;
25318 }
25319 skb_pull(skb,sizeof(struct atmtcp_hdr));
25320 @@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
25321 __net_timestamp(new_skb);
25322 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
25323 out_vcc->push(out_vcc,new_skb);
25324 - atomic_inc(&vcc->stats->tx);
25325 - atomic_inc(&out_vcc->stats->rx);
25326 + atomic_inc_unchecked(&vcc->stats->tx);
25327 + atomic_inc_unchecked(&out_vcc->stats->rx);
25328 done:
25329 if (vcc->pop) vcc->pop(vcc,skb);
25330 else dev_kfree_skb(skb);
25331 diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
25332 index 956e9ac..133516d 100644
25333 --- a/drivers/atm/eni.c
25334 +++ b/drivers/atm/eni.c
25335 @@ -526,7 +526,7 @@ static int rx_aal0(struct atm_vcc *vcc)
25336 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
25337 vcc->dev->number);
25338 length = 0;
25339 - atomic_inc(&vcc->stats->rx_err);
25340 + atomic_inc_unchecked(&vcc->stats->rx_err);
25341 }
25342 else {
25343 length = ATM_CELL_SIZE-1; /* no HEC */
25344 @@ -581,7 +581,7 @@ static int rx_aal5(struct atm_vcc *vcc)
25345 size);
25346 }
25347 eff = length = 0;
25348 - atomic_inc(&vcc->stats->rx_err);
25349 + atomic_inc_unchecked(&vcc->stats->rx_err);
25350 }
25351 else {
25352 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
25353 @@ -598,7 +598,7 @@ static int rx_aal5(struct atm_vcc *vcc)
25354 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
25355 vcc->dev->number,vcc->vci,length,size << 2,descr);
25356 length = eff = 0;
25357 - atomic_inc(&vcc->stats->rx_err);
25358 + atomic_inc_unchecked(&vcc->stats->rx_err);
25359 }
25360 }
25361 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
25362 @@ -771,7 +771,7 @@ rx_dequeued++;
25363 vcc->push(vcc,skb);
25364 pushed++;
25365 }
25366 - atomic_inc(&vcc->stats->rx);
25367 + atomic_inc_unchecked(&vcc->stats->rx);
25368 }
25369 wake_up(&eni_dev->rx_wait);
25370 }
25371 @@ -1229,7 +1229,7 @@ static void dequeue_tx(struct atm_dev *dev)
25372 PCI_DMA_TODEVICE);
25373 if (vcc->pop) vcc->pop(vcc,skb);
25374 else dev_kfree_skb_irq(skb);
25375 - atomic_inc(&vcc->stats->tx);
25376 + atomic_inc_unchecked(&vcc->stats->tx);
25377 wake_up(&eni_dev->tx_wait);
25378 dma_complete++;
25379 }
25380 @@ -1569,7 +1569,7 @@ tx_complete++;
25381 /*--------------------------------- entries ---------------------------------*/
25382
25383
25384 -static const char *media_name[] __devinitdata = {
25385 +static const char *media_name[] __devinitconst = {
25386 "MMF", "SMF", "MMF", "03?", /* 0- 3 */
25387 "UTP", "05?", "06?", "07?", /* 4- 7 */
25388 "TAXI","09?", "10?", "11?", /* 8-11 */
25389 diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
25390 index 5072f8a..fa52520d 100644
25391 --- a/drivers/atm/firestream.c
25392 +++ b/drivers/atm/firestream.c
25393 @@ -750,7 +750,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
25394 }
25395 }
25396
25397 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25398 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25399
25400 fs_dprintk (FS_DEBUG_TXMEM, "i");
25401 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
25402 @@ -817,7 +817,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
25403 #endif
25404 skb_put (skb, qe->p1 & 0xffff);
25405 ATM_SKB(skb)->vcc = atm_vcc;
25406 - atomic_inc(&atm_vcc->stats->rx);
25407 + atomic_inc_unchecked(&atm_vcc->stats->rx);
25408 __net_timestamp(skb);
25409 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
25410 atm_vcc->push (atm_vcc, skb);
25411 @@ -838,12 +838,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
25412 kfree (pe);
25413 }
25414 if (atm_vcc)
25415 - atomic_inc(&atm_vcc->stats->rx_drop);
25416 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25417 break;
25418 case 0x1f: /* Reassembly abort: no buffers. */
25419 /* Silently increment error counter. */
25420 if (atm_vcc)
25421 - atomic_inc(&atm_vcc->stats->rx_drop);
25422 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25423 break;
25424 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
25425 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
25426 diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
25427 index 361f5ae..7fc552d 100644
25428 --- a/drivers/atm/fore200e.c
25429 +++ b/drivers/atm/fore200e.c
25430 @@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
25431 #endif
25432 /* check error condition */
25433 if (*entry->status & STATUS_ERROR)
25434 - atomic_inc(&vcc->stats->tx_err);
25435 + atomic_inc_unchecked(&vcc->stats->tx_err);
25436 else
25437 - atomic_inc(&vcc->stats->tx);
25438 + atomic_inc_unchecked(&vcc->stats->tx);
25439 }
25440 }
25441
25442 @@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
25443 if (skb == NULL) {
25444 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
25445
25446 - atomic_inc(&vcc->stats->rx_drop);
25447 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25448 return -ENOMEM;
25449 }
25450
25451 @@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
25452
25453 dev_kfree_skb_any(skb);
25454
25455 - atomic_inc(&vcc->stats->rx_drop);
25456 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25457 return -ENOMEM;
25458 }
25459
25460 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
25461
25462 vcc->push(vcc, skb);
25463 - atomic_inc(&vcc->stats->rx);
25464 + atomic_inc_unchecked(&vcc->stats->rx);
25465
25466 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
25467
25468 @@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
25469 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
25470 fore200e->atm_dev->number,
25471 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
25472 - atomic_inc(&vcc->stats->rx_err);
25473 + atomic_inc_unchecked(&vcc->stats->rx_err);
25474 }
25475 }
25476
25477 @@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
25478 goto retry_here;
25479 }
25480
25481 - atomic_inc(&vcc->stats->tx_err);
25482 + atomic_inc_unchecked(&vcc->stats->tx_err);
25483
25484 fore200e->tx_sat++;
25485 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
25486 diff --git a/drivers/atm/he.c b/drivers/atm/he.c
25487 index 9a51df4..f3bb5f8 100644
25488 --- a/drivers/atm/he.c
25489 +++ b/drivers/atm/he.c
25490 @@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
25491
25492 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
25493 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
25494 - atomic_inc(&vcc->stats->rx_drop);
25495 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25496 goto return_host_buffers;
25497 }
25498
25499 @@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
25500 RBRQ_LEN_ERR(he_dev->rbrq_head)
25501 ? "LEN_ERR" : "",
25502 vcc->vpi, vcc->vci);
25503 - atomic_inc(&vcc->stats->rx_err);
25504 + atomic_inc_unchecked(&vcc->stats->rx_err);
25505 goto return_host_buffers;
25506 }
25507
25508 @@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
25509 vcc->push(vcc, skb);
25510 spin_lock(&he_dev->global_lock);
25511
25512 - atomic_inc(&vcc->stats->rx);
25513 + atomic_inc_unchecked(&vcc->stats->rx);
25514
25515 return_host_buffers:
25516 ++pdus_assembled;
25517 @@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
25518 tpd->vcc->pop(tpd->vcc, tpd->skb);
25519 else
25520 dev_kfree_skb_any(tpd->skb);
25521 - atomic_inc(&tpd->vcc->stats->tx_err);
25522 + atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
25523 }
25524 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
25525 return;
25526 @@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25527 vcc->pop(vcc, skb);
25528 else
25529 dev_kfree_skb_any(skb);
25530 - atomic_inc(&vcc->stats->tx_err);
25531 + atomic_inc_unchecked(&vcc->stats->tx_err);
25532 return -EINVAL;
25533 }
25534
25535 @@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25536 vcc->pop(vcc, skb);
25537 else
25538 dev_kfree_skb_any(skb);
25539 - atomic_inc(&vcc->stats->tx_err);
25540 + atomic_inc_unchecked(&vcc->stats->tx_err);
25541 return -EINVAL;
25542 }
25543 #endif
25544 @@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25545 vcc->pop(vcc, skb);
25546 else
25547 dev_kfree_skb_any(skb);
25548 - atomic_inc(&vcc->stats->tx_err);
25549 + atomic_inc_unchecked(&vcc->stats->tx_err);
25550 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25551 return -ENOMEM;
25552 }
25553 @@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25554 vcc->pop(vcc, skb);
25555 else
25556 dev_kfree_skb_any(skb);
25557 - atomic_inc(&vcc->stats->tx_err);
25558 + atomic_inc_unchecked(&vcc->stats->tx_err);
25559 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25560 return -ENOMEM;
25561 }
25562 @@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25563 __enqueue_tpd(he_dev, tpd, cid);
25564 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25565
25566 - atomic_inc(&vcc->stats->tx);
25567 + atomic_inc_unchecked(&vcc->stats->tx);
25568
25569 return 0;
25570 }
25571 diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
25572 index b812103..e391a49 100644
25573 --- a/drivers/atm/horizon.c
25574 +++ b/drivers/atm/horizon.c
25575 @@ -1035,7 +1035,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
25576 {
25577 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
25578 // VC layer stats
25579 - atomic_inc(&vcc->stats->rx);
25580 + atomic_inc_unchecked(&vcc->stats->rx);
25581 __net_timestamp(skb);
25582 // end of our responsibility
25583 vcc->push (vcc, skb);
25584 @@ -1187,7 +1187,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
25585 dev->tx_iovec = NULL;
25586
25587 // VC layer stats
25588 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25589 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25590
25591 // free the skb
25592 hrz_kfree_skb (skb);
25593 diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
25594 index 1c05212..c28e200 100644
25595 --- a/drivers/atm/idt77252.c
25596 +++ b/drivers/atm/idt77252.c
25597 @@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
25598 else
25599 dev_kfree_skb(skb);
25600
25601 - atomic_inc(&vcc->stats->tx);
25602 + atomic_inc_unchecked(&vcc->stats->tx);
25603 }
25604
25605 atomic_dec(&scq->used);
25606 @@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25607 if ((sb = dev_alloc_skb(64)) == NULL) {
25608 printk("%s: Can't allocate buffers for aal0.\n",
25609 card->name);
25610 - atomic_add(i, &vcc->stats->rx_drop);
25611 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
25612 break;
25613 }
25614 if (!atm_charge(vcc, sb->truesize)) {
25615 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
25616 card->name);
25617 - atomic_add(i - 1, &vcc->stats->rx_drop);
25618 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
25619 dev_kfree_skb(sb);
25620 break;
25621 }
25622 @@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25623 ATM_SKB(sb)->vcc = vcc;
25624 __net_timestamp(sb);
25625 vcc->push(vcc, sb);
25626 - atomic_inc(&vcc->stats->rx);
25627 + atomic_inc_unchecked(&vcc->stats->rx);
25628
25629 cell += ATM_CELL_PAYLOAD;
25630 }
25631 @@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25632 "(CDC: %08x)\n",
25633 card->name, len, rpp->len, readl(SAR_REG_CDC));
25634 recycle_rx_pool_skb(card, rpp);
25635 - atomic_inc(&vcc->stats->rx_err);
25636 + atomic_inc_unchecked(&vcc->stats->rx_err);
25637 return;
25638 }
25639 if (stat & SAR_RSQE_CRC) {
25640 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
25641 recycle_rx_pool_skb(card, rpp);
25642 - atomic_inc(&vcc->stats->rx_err);
25643 + atomic_inc_unchecked(&vcc->stats->rx_err);
25644 return;
25645 }
25646 if (skb_queue_len(&rpp->queue) > 1) {
25647 @@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25648 RXPRINTK("%s: Can't alloc RX skb.\n",
25649 card->name);
25650 recycle_rx_pool_skb(card, rpp);
25651 - atomic_inc(&vcc->stats->rx_err);
25652 + atomic_inc_unchecked(&vcc->stats->rx_err);
25653 return;
25654 }
25655 if (!atm_charge(vcc, skb->truesize)) {
25656 @@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25657 __net_timestamp(skb);
25658
25659 vcc->push(vcc, skb);
25660 - atomic_inc(&vcc->stats->rx);
25661 + atomic_inc_unchecked(&vcc->stats->rx);
25662
25663 return;
25664 }
25665 @@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25666 __net_timestamp(skb);
25667
25668 vcc->push(vcc, skb);
25669 - atomic_inc(&vcc->stats->rx);
25670 + atomic_inc_unchecked(&vcc->stats->rx);
25671
25672 if (skb->truesize > SAR_FB_SIZE_3)
25673 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
25674 @@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
25675 if (vcc->qos.aal != ATM_AAL0) {
25676 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
25677 card->name, vpi, vci);
25678 - atomic_inc(&vcc->stats->rx_drop);
25679 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25680 goto drop;
25681 }
25682
25683 if ((sb = dev_alloc_skb(64)) == NULL) {
25684 printk("%s: Can't allocate buffers for AAL0.\n",
25685 card->name);
25686 - atomic_inc(&vcc->stats->rx_err);
25687 + atomic_inc_unchecked(&vcc->stats->rx_err);
25688 goto drop;
25689 }
25690
25691 @@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
25692 ATM_SKB(sb)->vcc = vcc;
25693 __net_timestamp(sb);
25694 vcc->push(vcc, sb);
25695 - atomic_inc(&vcc->stats->rx);
25696 + atomic_inc_unchecked(&vcc->stats->rx);
25697
25698 drop:
25699 skb_pull(queue, 64);
25700 @@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
25701
25702 if (vc == NULL) {
25703 printk("%s: NULL connection in send().\n", card->name);
25704 - atomic_inc(&vcc->stats->tx_err);
25705 + atomic_inc_unchecked(&vcc->stats->tx_err);
25706 dev_kfree_skb(skb);
25707 return -EINVAL;
25708 }
25709 if (!test_bit(VCF_TX, &vc->flags)) {
25710 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
25711 - atomic_inc(&vcc->stats->tx_err);
25712 + atomic_inc_unchecked(&vcc->stats->tx_err);
25713 dev_kfree_skb(skb);
25714 return -EINVAL;
25715 }
25716 @@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
25717 break;
25718 default:
25719 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
25720 - atomic_inc(&vcc->stats->tx_err);
25721 + atomic_inc_unchecked(&vcc->stats->tx_err);
25722 dev_kfree_skb(skb);
25723 return -EINVAL;
25724 }
25725
25726 if (skb_shinfo(skb)->nr_frags != 0) {
25727 printk("%s: No scatter-gather yet.\n", card->name);
25728 - atomic_inc(&vcc->stats->tx_err);
25729 + atomic_inc_unchecked(&vcc->stats->tx_err);
25730 dev_kfree_skb(skb);
25731 return -EINVAL;
25732 }
25733 @@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
25734
25735 err = queue_skb(card, vc, skb, oam);
25736 if (err) {
25737 - atomic_inc(&vcc->stats->tx_err);
25738 + atomic_inc_unchecked(&vcc->stats->tx_err);
25739 dev_kfree_skb(skb);
25740 return err;
25741 }
25742 @@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
25743 skb = dev_alloc_skb(64);
25744 if (!skb) {
25745 printk("%s: Out of memory in send_oam().\n", card->name);
25746 - atomic_inc(&vcc->stats->tx_err);
25747 + atomic_inc_unchecked(&vcc->stats->tx_err);
25748 return -ENOMEM;
25749 }
25750 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
25751 diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
25752 index 3d0c2b0..45441fa 100644
25753 --- a/drivers/atm/iphase.c
25754 +++ b/drivers/atm/iphase.c
25755 @@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
25756 status = (u_short) (buf_desc_ptr->desc_mode);
25757 if (status & (RX_CER | RX_PTE | RX_OFL))
25758 {
25759 - atomic_inc(&vcc->stats->rx_err);
25760 + atomic_inc_unchecked(&vcc->stats->rx_err);
25761 IF_ERR(printk("IA: bad packet, dropping it");)
25762 if (status & RX_CER) {
25763 IF_ERR(printk(" cause: packet CRC error\n");)
25764 @@ -1169,7 +1169,7 @@ static int rx_pkt(struct atm_dev *dev)
25765 len = dma_addr - buf_addr;
25766 if (len > iadev->rx_buf_sz) {
25767 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
25768 - atomic_inc(&vcc->stats->rx_err);
25769 + atomic_inc_unchecked(&vcc->stats->rx_err);
25770 goto out_free_desc;
25771 }
25772
25773 @@ -1319,7 +1319,7 @@ static void rx_dle_intr(struct atm_dev *dev)
25774 ia_vcc = INPH_IA_VCC(vcc);
25775 if (ia_vcc == NULL)
25776 {
25777 - atomic_inc(&vcc->stats->rx_err);
25778 + atomic_inc_unchecked(&vcc->stats->rx_err);
25779 dev_kfree_skb_any(skb);
25780 atm_return(vcc, atm_guess_pdu2truesize(len));
25781 goto INCR_DLE;
25782 @@ -1331,7 +1331,7 @@ static void rx_dle_intr(struct atm_dev *dev)
25783 if ((length > iadev->rx_buf_sz) || (length >
25784 (skb->len - sizeof(struct cpcs_trailer))))
25785 {
25786 - atomic_inc(&vcc->stats->rx_err);
25787 + atomic_inc_unchecked(&vcc->stats->rx_err);
25788 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
25789 length, skb->len);)
25790 dev_kfree_skb_any(skb);
25791 @@ -1347,7 +1347,7 @@ static void rx_dle_intr(struct atm_dev *dev)
25792
25793 IF_RX(printk("rx_dle_intr: skb push");)
25794 vcc->push(vcc,skb);
25795 - atomic_inc(&vcc->stats->rx);
25796 + atomic_inc_unchecked(&vcc->stats->rx);
25797 iadev->rx_pkt_cnt++;
25798 }
25799 INCR_DLE:
25800 @@ -2827,15 +2827,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
25801 {
25802 struct k_sonet_stats *stats;
25803 stats = &PRIV(_ia_dev[board])->sonet_stats;
25804 - printk("section_bip: %d\n", atomic_read(&stats->section_bip));
25805 - printk("line_bip : %d\n", atomic_read(&stats->line_bip));
25806 - printk("path_bip : %d\n", atomic_read(&stats->path_bip));
25807 - printk("line_febe : %d\n", atomic_read(&stats->line_febe));
25808 - printk("path_febe : %d\n", atomic_read(&stats->path_febe));
25809 - printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
25810 - printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
25811 - printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
25812 - printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
25813 + printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
25814 + printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
25815 + printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
25816 + printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
25817 + printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
25818 + printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
25819 + printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
25820 + printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
25821 + printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
25822 }
25823 ia_cmds.status = 0;
25824 break;
25825 @@ -2940,7 +2940,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
25826 if ((desc == 0) || (desc > iadev->num_tx_desc))
25827 {
25828 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
25829 - atomic_inc(&vcc->stats->tx);
25830 + atomic_inc_unchecked(&vcc->stats->tx);
25831 if (vcc->pop)
25832 vcc->pop(vcc, skb);
25833 else
25834 @@ -3045,14 +3045,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
25835 ATM_DESC(skb) = vcc->vci;
25836 skb_queue_tail(&iadev->tx_dma_q, skb);
25837
25838 - atomic_inc(&vcc->stats->tx);
25839 + atomic_inc_unchecked(&vcc->stats->tx);
25840 iadev->tx_pkt_cnt++;
25841 /* Increment transaction counter */
25842 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
25843
25844 #if 0
25845 /* add flow control logic */
25846 - if (atomic_read(&vcc->stats->tx) % 20 == 0) {
25847 + if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
25848 if (iavcc->vc_desc_cnt > 10) {
25849 vcc->tx_quota = vcc->tx_quota * 3 / 4;
25850 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
25851 diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
25852 index f556969..0da15eb 100644
25853 --- a/drivers/atm/lanai.c
25854 +++ b/drivers/atm/lanai.c
25855 @@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
25856 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
25857 lanai_endtx(lanai, lvcc);
25858 lanai_free_skb(lvcc->tx.atmvcc, skb);
25859 - atomic_inc(&lvcc->tx.atmvcc->stats->tx);
25860 + atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
25861 }
25862
25863 /* Try to fill the buffer - don't call unless there is backlog */
25864 @@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
25865 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
25866 __net_timestamp(skb);
25867 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
25868 - atomic_inc(&lvcc->rx.atmvcc->stats->rx);
25869 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
25870 out:
25871 lvcc->rx.buf.ptr = end;
25872 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
25873 @@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
25874 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
25875 "vcc %d\n", lanai->number, (unsigned int) s, vci);
25876 lanai->stats.service_rxnotaal5++;
25877 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25878 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25879 return 0;
25880 }
25881 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
25882 @@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
25883 int bytes;
25884 read_unlock(&vcc_sklist_lock);
25885 DPRINTK("got trashed rx pdu on vci %d\n", vci);
25886 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25887 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25888 lvcc->stats.x.aal5.service_trash++;
25889 bytes = (SERVICE_GET_END(s) * 16) -
25890 (((unsigned long) lvcc->rx.buf.ptr) -
25891 @@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
25892 }
25893 if (s & SERVICE_STREAM) {
25894 read_unlock(&vcc_sklist_lock);
25895 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25896 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25897 lvcc->stats.x.aal5.service_stream++;
25898 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
25899 "PDU on VCI %d!\n", lanai->number, vci);
25900 @@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
25901 return 0;
25902 }
25903 DPRINTK("got rx crc error on vci %d\n", vci);
25904 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25905 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25906 lvcc->stats.x.aal5.service_rxcrc++;
25907 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
25908 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
25909 diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
25910 index 1c70c45..300718d 100644
25911 --- a/drivers/atm/nicstar.c
25912 +++ b/drivers/atm/nicstar.c
25913 @@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
25914 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
25915 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
25916 card->index);
25917 - atomic_inc(&vcc->stats->tx_err);
25918 + atomic_inc_unchecked(&vcc->stats->tx_err);
25919 dev_kfree_skb_any(skb);
25920 return -EINVAL;
25921 }
25922 @@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
25923 if (!vc->tx) {
25924 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
25925 card->index);
25926 - atomic_inc(&vcc->stats->tx_err);
25927 + atomic_inc_unchecked(&vcc->stats->tx_err);
25928 dev_kfree_skb_any(skb);
25929 return -EINVAL;
25930 }
25931 @@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
25932 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
25933 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
25934 card->index);
25935 - atomic_inc(&vcc->stats->tx_err);
25936 + atomic_inc_unchecked(&vcc->stats->tx_err);
25937 dev_kfree_skb_any(skb);
25938 return -EINVAL;
25939 }
25940
25941 if (skb_shinfo(skb)->nr_frags != 0) {
25942 printk("nicstar%d: No scatter-gather yet.\n", card->index);
25943 - atomic_inc(&vcc->stats->tx_err);
25944 + atomic_inc_unchecked(&vcc->stats->tx_err);
25945 dev_kfree_skb_any(skb);
25946 return -EINVAL;
25947 }
25948 @@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
25949 }
25950
25951 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
25952 - atomic_inc(&vcc->stats->tx_err);
25953 + atomic_inc_unchecked(&vcc->stats->tx_err);
25954 dev_kfree_skb_any(skb);
25955 return -EIO;
25956 }
25957 - atomic_inc(&vcc->stats->tx);
25958 + atomic_inc_unchecked(&vcc->stats->tx);
25959
25960 return 0;
25961 }
25962 @@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
25963 printk
25964 ("nicstar%d: Can't allocate buffers for aal0.\n",
25965 card->index);
25966 - atomic_add(i, &vcc->stats->rx_drop);
25967 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
25968 break;
25969 }
25970 if (!atm_charge(vcc, sb->truesize)) {
25971 RXPRINTK
25972 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
25973 card->index);
25974 - atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
25975 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
25976 dev_kfree_skb_any(sb);
25977 break;
25978 }
25979 @@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
25980 ATM_SKB(sb)->vcc = vcc;
25981 __net_timestamp(sb);
25982 vcc->push(vcc, sb);
25983 - atomic_inc(&vcc->stats->rx);
25984 + atomic_inc_unchecked(&vcc->stats->rx);
25985 cell += ATM_CELL_PAYLOAD;
25986 }
25987
25988 @@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
25989 if (iovb == NULL) {
25990 printk("nicstar%d: Out of iovec buffers.\n",
25991 card->index);
25992 - atomic_inc(&vcc->stats->rx_drop);
25993 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25994 recycle_rx_buf(card, skb);
25995 return;
25996 }
25997 @@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
25998 small or large buffer itself. */
25999 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
26000 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
26001 - atomic_inc(&vcc->stats->rx_err);
26002 + atomic_inc_unchecked(&vcc->stats->rx_err);
26003 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
26004 NS_MAX_IOVECS);
26005 NS_PRV_IOVCNT(iovb) = 0;
26006 @@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26007 ("nicstar%d: Expected a small buffer, and this is not one.\n",
26008 card->index);
26009 which_list(card, skb);
26010 - atomic_inc(&vcc->stats->rx_err);
26011 + atomic_inc_unchecked(&vcc->stats->rx_err);
26012 recycle_rx_buf(card, skb);
26013 vc->rx_iov = NULL;
26014 recycle_iov_buf(card, iovb);
26015 @@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26016 ("nicstar%d: Expected a large buffer, and this is not one.\n",
26017 card->index);
26018 which_list(card, skb);
26019 - atomic_inc(&vcc->stats->rx_err);
26020 + atomic_inc_unchecked(&vcc->stats->rx_err);
26021 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
26022 NS_PRV_IOVCNT(iovb));
26023 vc->rx_iov = NULL;
26024 @@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26025 printk(" - PDU size mismatch.\n");
26026 else
26027 printk(".\n");
26028 - atomic_inc(&vcc->stats->rx_err);
26029 + atomic_inc_unchecked(&vcc->stats->rx_err);
26030 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
26031 NS_PRV_IOVCNT(iovb));
26032 vc->rx_iov = NULL;
26033 @@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26034 /* skb points to a small buffer */
26035 if (!atm_charge(vcc, skb->truesize)) {
26036 push_rxbufs(card, skb);
26037 - atomic_inc(&vcc->stats->rx_drop);
26038 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26039 } else {
26040 skb_put(skb, len);
26041 dequeue_sm_buf(card, skb);
26042 @@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26043 ATM_SKB(skb)->vcc = vcc;
26044 __net_timestamp(skb);
26045 vcc->push(vcc, skb);
26046 - atomic_inc(&vcc->stats->rx);
26047 + atomic_inc_unchecked(&vcc->stats->rx);
26048 }
26049 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
26050 struct sk_buff *sb;
26051 @@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26052 if (len <= NS_SMBUFSIZE) {
26053 if (!atm_charge(vcc, sb->truesize)) {
26054 push_rxbufs(card, sb);
26055 - atomic_inc(&vcc->stats->rx_drop);
26056 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26057 } else {
26058 skb_put(sb, len);
26059 dequeue_sm_buf(card, sb);
26060 @@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26061 ATM_SKB(sb)->vcc = vcc;
26062 __net_timestamp(sb);
26063 vcc->push(vcc, sb);
26064 - atomic_inc(&vcc->stats->rx);
26065 + atomic_inc_unchecked(&vcc->stats->rx);
26066 }
26067
26068 push_rxbufs(card, skb);
26069 @@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26070
26071 if (!atm_charge(vcc, skb->truesize)) {
26072 push_rxbufs(card, skb);
26073 - atomic_inc(&vcc->stats->rx_drop);
26074 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26075 } else {
26076 dequeue_lg_buf(card, skb);
26077 #ifdef NS_USE_DESTRUCTORS
26078 @@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26079 ATM_SKB(skb)->vcc = vcc;
26080 __net_timestamp(skb);
26081 vcc->push(vcc, skb);
26082 - atomic_inc(&vcc->stats->rx);
26083 + atomic_inc_unchecked(&vcc->stats->rx);
26084 }
26085
26086 push_rxbufs(card, sb);
26087 @@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26088 printk
26089 ("nicstar%d: Out of huge buffers.\n",
26090 card->index);
26091 - atomic_inc(&vcc->stats->rx_drop);
26092 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26093 recycle_iovec_rx_bufs(card,
26094 (struct iovec *)
26095 iovb->data,
26096 @@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26097 card->hbpool.count++;
26098 } else
26099 dev_kfree_skb_any(hb);
26100 - atomic_inc(&vcc->stats->rx_drop);
26101 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26102 } else {
26103 /* Copy the small buffer to the huge buffer */
26104 sb = (struct sk_buff *)iov->iov_base;
26105 @@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26106 #endif /* NS_USE_DESTRUCTORS */
26107 __net_timestamp(hb);
26108 vcc->push(vcc, hb);
26109 - atomic_inc(&vcc->stats->rx);
26110 + atomic_inc_unchecked(&vcc->stats->rx);
26111 }
26112 }
26113
26114 diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
26115 index 5d1d076..12fbca4 100644
26116 --- a/drivers/atm/solos-pci.c
26117 +++ b/drivers/atm/solos-pci.c
26118 @@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
26119 }
26120 atm_charge(vcc, skb->truesize);
26121 vcc->push(vcc, skb);
26122 - atomic_inc(&vcc->stats->rx);
26123 + atomic_inc_unchecked(&vcc->stats->rx);
26124 break;
26125
26126 case PKT_STATUS:
26127 @@ -1008,7 +1008,7 @@ static uint32_t fpga_tx(struct solos_card *card)
26128 vcc = SKB_CB(oldskb)->vcc;
26129
26130 if (vcc) {
26131 - atomic_inc(&vcc->stats->tx);
26132 + atomic_inc_unchecked(&vcc->stats->tx);
26133 solos_pop(vcc, oldskb);
26134 } else
26135 dev_kfree_skb_irq(oldskb);
26136 diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
26137 index 90f1ccc..04c4a1e 100644
26138 --- a/drivers/atm/suni.c
26139 +++ b/drivers/atm/suni.c
26140 @@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(sunis_lock);
26141
26142
26143 #define ADD_LIMITED(s,v) \
26144 - atomic_add((v),&stats->s); \
26145 - if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
26146 + atomic_add_unchecked((v),&stats->s); \
26147 + if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
26148
26149
26150 static void suni_hz(unsigned long from_timer)
26151 diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
26152 index 5120a96..e2572bd 100644
26153 --- a/drivers/atm/uPD98402.c
26154 +++ b/drivers/atm/uPD98402.c
26155 @@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
26156 struct sonet_stats tmp;
26157 int error = 0;
26158
26159 - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
26160 + atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
26161 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
26162 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
26163 if (zero && !error) {
26164 @@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
26165
26166
26167 #define ADD_LIMITED(s,v) \
26168 - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
26169 - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
26170 - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
26171 + { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
26172 + if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
26173 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
26174
26175
26176 static void stat_event(struct atm_dev *dev)
26177 @@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
26178 if (reason & uPD98402_INT_PFM) stat_event(dev);
26179 if (reason & uPD98402_INT_PCO) {
26180 (void) GET(PCOCR); /* clear interrupt cause */
26181 - atomic_add(GET(HECCT),
26182 + atomic_add_unchecked(GET(HECCT),
26183 &PRIV(dev)->sonet_stats.uncorr_hcs);
26184 }
26185 if ((reason & uPD98402_INT_RFO) &&
26186 @@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
26187 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
26188 uPD98402_INT_LOS),PIMR); /* enable them */
26189 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
26190 - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
26191 - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
26192 - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
26193 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
26194 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
26195 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
26196 return 0;
26197 }
26198
26199 diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
26200 index d889f56..17eb71e 100644
26201 --- a/drivers/atm/zatm.c
26202 +++ b/drivers/atm/zatm.c
26203 @@ -460,7 +460,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
26204 }
26205 if (!size) {
26206 dev_kfree_skb_irq(skb);
26207 - if (vcc) atomic_inc(&vcc->stats->rx_err);
26208 + if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
26209 continue;
26210 }
26211 if (!atm_charge(vcc,skb->truesize)) {
26212 @@ -470,7 +470,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
26213 skb->len = size;
26214 ATM_SKB(skb)->vcc = vcc;
26215 vcc->push(vcc,skb);
26216 - atomic_inc(&vcc->stats->rx);
26217 + atomic_inc_unchecked(&vcc->stats->rx);
26218 }
26219 zout(pos & 0xffff,MTA(mbx));
26220 #if 0 /* probably a stupid idea */
26221 @@ -734,7 +734,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
26222 skb_queue_head(&zatm_vcc->backlog,skb);
26223 break;
26224 }
26225 - atomic_inc(&vcc->stats->tx);
26226 + atomic_inc_unchecked(&vcc->stats->tx);
26227 wake_up(&zatm_vcc->tx_wait);
26228 }
26229
26230 diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
26231 index a4760e0..51283cf 100644
26232 --- a/drivers/base/devtmpfs.c
26233 +++ b/drivers/base/devtmpfs.c
26234 @@ -368,7 +368,7 @@ int devtmpfs_mount(const char *mntdir)
26235 if (!thread)
26236 return 0;
26237
26238 - err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
26239 + err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
26240 if (err)
26241 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
26242 else
26243 diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
26244 index caf995f..6f76697 100644
26245 --- a/drivers/base/power/wakeup.c
26246 +++ b/drivers/base/power/wakeup.c
26247 @@ -30,14 +30,14 @@ bool events_check_enabled;
26248 * They need to be modified together atomically, so it's better to use one
26249 * atomic variable to hold them both.
26250 */
26251 -static atomic_t combined_event_count = ATOMIC_INIT(0);
26252 +static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
26253
26254 #define IN_PROGRESS_BITS (sizeof(int) * 4)
26255 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
26256
26257 static void split_counters(unsigned int *cnt, unsigned int *inpr)
26258 {
26259 - unsigned int comb = atomic_read(&combined_event_count);
26260 + unsigned int comb = atomic_read_unchecked(&combined_event_count);
26261
26262 *cnt = (comb >> IN_PROGRESS_BITS);
26263 *inpr = comb & MAX_IN_PROGRESS;
26264 @@ -353,7 +353,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
26265 ws->last_time = ktime_get();
26266
26267 /* Increment the counter of events in progress. */
26268 - atomic_inc(&combined_event_count);
26269 + atomic_inc_unchecked(&combined_event_count);
26270 }
26271
26272 /**
26273 @@ -443,7 +443,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
26274 * Increment the counter of registered wakeup events and decrement the
26275 * couter of wakeup events in progress simultaneously.
26276 */
26277 - atomic_add(MAX_IN_PROGRESS, &combined_event_count);
26278 + atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
26279 }
26280
26281 /**
26282 diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
26283 index b0f553b..77b928b 100644
26284 --- a/drivers/block/cciss.c
26285 +++ b/drivers/block/cciss.c
26286 @@ -1198,6 +1198,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
26287 int err;
26288 u32 cp;
26289
26290 + memset(&arg64, 0, sizeof(arg64));
26291 +
26292 err = 0;
26293 err |=
26294 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
26295 @@ -3007,7 +3009,7 @@ static void start_io(ctlr_info_t *h)
26296 while (!list_empty(&h->reqQ)) {
26297 c = list_entry(h->reqQ.next, CommandList_struct, list);
26298 /* can't do anything if fifo is full */
26299 - if ((h->access.fifo_full(h))) {
26300 + if ((h->access->fifo_full(h))) {
26301 dev_warn(&h->pdev->dev, "fifo full\n");
26302 break;
26303 }
26304 @@ -3017,7 +3019,7 @@ static void start_io(ctlr_info_t *h)
26305 h->Qdepth--;
26306
26307 /* Tell the controller execute command */
26308 - h->access.submit_command(h, c);
26309 + h->access->submit_command(h, c);
26310
26311 /* Put job onto the completed Q */
26312 addQ(&h->cmpQ, c);
26313 @@ -3443,17 +3445,17 @@ startio:
26314
26315 static inline unsigned long get_next_completion(ctlr_info_t *h)
26316 {
26317 - return h->access.command_completed(h);
26318 + return h->access->command_completed(h);
26319 }
26320
26321 static inline int interrupt_pending(ctlr_info_t *h)
26322 {
26323 - return h->access.intr_pending(h);
26324 + return h->access->intr_pending(h);
26325 }
26326
26327 static inline long interrupt_not_for_us(ctlr_info_t *h)
26328 {
26329 - return ((h->access.intr_pending(h) == 0) ||
26330 + return ((h->access->intr_pending(h) == 0) ||
26331 (h->interrupts_enabled == 0));
26332 }
26333
26334 @@ -3486,7 +3488,7 @@ static inline u32 next_command(ctlr_info_t *h)
26335 u32 a;
26336
26337 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
26338 - return h->access.command_completed(h);
26339 + return h->access->command_completed(h);
26340
26341 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
26342 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
26343 @@ -4044,7 +4046,7 @@ static void __devinit cciss_put_controller_into_performant_mode(ctlr_info_t *h)
26344 trans_support & CFGTBL_Trans_use_short_tags);
26345
26346 /* Change the access methods to the performant access methods */
26347 - h->access = SA5_performant_access;
26348 + h->access = &SA5_performant_access;
26349 h->transMethod = CFGTBL_Trans_Performant;
26350
26351 return;
26352 @@ -4316,7 +4318,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *h)
26353 if (prod_index < 0)
26354 return -ENODEV;
26355 h->product_name = products[prod_index].product_name;
26356 - h->access = *(products[prod_index].access);
26357 + h->access = products[prod_index].access;
26358
26359 if (cciss_board_disabled(h)) {
26360 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
26361 @@ -5041,7 +5043,7 @@ reinit_after_soft_reset:
26362 }
26363
26364 /* make sure the board interrupts are off */
26365 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
26366 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
26367 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
26368 if (rc)
26369 goto clean2;
26370 @@ -5093,7 +5095,7 @@ reinit_after_soft_reset:
26371 * fake ones to scoop up any residual completions.
26372 */
26373 spin_lock_irqsave(&h->lock, flags);
26374 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
26375 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
26376 spin_unlock_irqrestore(&h->lock, flags);
26377 free_irq(h->intr[h->intr_mode], h);
26378 rc = cciss_request_irq(h, cciss_msix_discard_completions,
26379 @@ -5113,9 +5115,9 @@ reinit_after_soft_reset:
26380 dev_info(&h->pdev->dev, "Board READY.\n");
26381 dev_info(&h->pdev->dev,
26382 "Waiting for stale completions to drain.\n");
26383 - h->access.set_intr_mask(h, CCISS_INTR_ON);
26384 + h->access->set_intr_mask(h, CCISS_INTR_ON);
26385 msleep(10000);
26386 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
26387 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
26388
26389 rc = controller_reset_failed(h->cfgtable);
26390 if (rc)
26391 @@ -5138,7 +5140,7 @@ reinit_after_soft_reset:
26392 cciss_scsi_setup(h);
26393
26394 /* Turn the interrupts on so we can service requests */
26395 - h->access.set_intr_mask(h, CCISS_INTR_ON);
26396 + h->access->set_intr_mask(h, CCISS_INTR_ON);
26397
26398 /* Get the firmware version */
26399 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
26400 @@ -5211,7 +5213,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
26401 kfree(flush_buf);
26402 if (return_code != IO_OK)
26403 dev_warn(&h->pdev->dev, "Error flushing cache\n");
26404 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
26405 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
26406 free_irq(h->intr[h->intr_mode], h);
26407 }
26408
26409 diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
26410 index 7fda30e..eb5dfe0 100644
26411 --- a/drivers/block/cciss.h
26412 +++ b/drivers/block/cciss.h
26413 @@ -101,7 +101,7 @@ struct ctlr_info
26414 /* information about each logical volume */
26415 drive_info_struct *drv[CISS_MAX_LUN];
26416
26417 - struct access_method access;
26418 + struct access_method *access;
26419
26420 /* queue and queue Info */
26421 struct list_head reqQ;
26422 diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
26423 index 9125bbe..eede5c8 100644
26424 --- a/drivers/block/cpqarray.c
26425 +++ b/drivers/block/cpqarray.c
26426 @@ -404,7 +404,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
26427 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
26428 goto Enomem4;
26429 }
26430 - hba[i]->access.set_intr_mask(hba[i], 0);
26431 + hba[i]->access->set_intr_mask(hba[i], 0);
26432 if (request_irq(hba[i]->intr, do_ida_intr,
26433 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
26434 {
26435 @@ -459,7 +459,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
26436 add_timer(&hba[i]->timer);
26437
26438 /* Enable IRQ now that spinlock and rate limit timer are set up */
26439 - hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
26440 + hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
26441
26442 for(j=0; j<NWD; j++) {
26443 struct gendisk *disk = ida_gendisk[i][j];
26444 @@ -694,7 +694,7 @@ DBGINFO(
26445 for(i=0; i<NR_PRODUCTS; i++) {
26446 if (board_id == products[i].board_id) {
26447 c->product_name = products[i].product_name;
26448 - c->access = *(products[i].access);
26449 + c->access = products[i].access;
26450 break;
26451 }
26452 }
26453 @@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detect(void)
26454 hba[ctlr]->intr = intr;
26455 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
26456 hba[ctlr]->product_name = products[j].product_name;
26457 - hba[ctlr]->access = *(products[j].access);
26458 + hba[ctlr]->access = products[j].access;
26459 hba[ctlr]->ctlr = ctlr;
26460 hba[ctlr]->board_id = board_id;
26461 hba[ctlr]->pci_dev = NULL; /* not PCI */
26462 @@ -980,7 +980,7 @@ static void start_io(ctlr_info_t *h)
26463
26464 while((c = h->reqQ) != NULL) {
26465 /* Can't do anything if we're busy */
26466 - if (h->access.fifo_full(h) == 0)
26467 + if (h->access->fifo_full(h) == 0)
26468 return;
26469
26470 /* Get the first entry from the request Q */
26471 @@ -988,7 +988,7 @@ static void start_io(ctlr_info_t *h)
26472 h->Qdepth--;
26473
26474 /* Tell the controller to do our bidding */
26475 - h->access.submit_command(h, c);
26476 + h->access->submit_command(h, c);
26477
26478 /* Get onto the completion Q */
26479 addQ(&h->cmpQ, c);
26480 @@ -1050,7 +1050,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
26481 unsigned long flags;
26482 __u32 a,a1;
26483
26484 - istat = h->access.intr_pending(h);
26485 + istat = h->access->intr_pending(h);
26486 /* Is this interrupt for us? */
26487 if (istat == 0)
26488 return IRQ_NONE;
26489 @@ -1061,7 +1061,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
26490 */
26491 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
26492 if (istat & FIFO_NOT_EMPTY) {
26493 - while((a = h->access.command_completed(h))) {
26494 + while((a = h->access->command_completed(h))) {
26495 a1 = a; a &= ~3;
26496 if ((c = h->cmpQ) == NULL)
26497 {
26498 @@ -1449,11 +1449,11 @@ static int sendcmd(
26499 /*
26500 * Disable interrupt
26501 */
26502 - info_p->access.set_intr_mask(info_p, 0);
26503 + info_p->access->set_intr_mask(info_p, 0);
26504 /* Make sure there is room in the command FIFO */
26505 /* Actually it should be completely empty at this time. */
26506 for (i = 200000; i > 0; i--) {
26507 - temp = info_p->access.fifo_full(info_p);
26508 + temp = info_p->access->fifo_full(info_p);
26509 if (temp != 0) {
26510 break;
26511 }
26512 @@ -1466,7 +1466,7 @@ DBG(
26513 /*
26514 * Send the cmd
26515 */
26516 - info_p->access.submit_command(info_p, c);
26517 + info_p->access->submit_command(info_p, c);
26518 complete = pollcomplete(ctlr);
26519
26520 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
26521 @@ -1549,9 +1549,9 @@ static int revalidate_allvol(ctlr_info_t *host)
26522 * we check the new geometry. Then turn interrupts back on when
26523 * we're done.
26524 */
26525 - host->access.set_intr_mask(host, 0);
26526 + host->access->set_intr_mask(host, 0);
26527 getgeometry(ctlr);
26528 - host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
26529 + host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
26530
26531 for(i=0; i<NWD; i++) {
26532 struct gendisk *disk = ida_gendisk[ctlr][i];
26533 @@ -1591,7 +1591,7 @@ static int pollcomplete(int ctlr)
26534 /* Wait (up to 2 seconds) for a command to complete */
26535
26536 for (i = 200000; i > 0; i--) {
26537 - done = hba[ctlr]->access.command_completed(hba[ctlr]);
26538 + done = hba[ctlr]->access->command_completed(hba[ctlr]);
26539 if (done == 0) {
26540 udelay(10); /* a short fixed delay */
26541 } else
26542 diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
26543 index be73e9d..7fbf140 100644
26544 --- a/drivers/block/cpqarray.h
26545 +++ b/drivers/block/cpqarray.h
26546 @@ -99,7 +99,7 @@ struct ctlr_info {
26547 drv_info_t drv[NWD];
26548 struct proc_dir_entry *proc;
26549
26550 - struct access_method access;
26551 + struct access_method *access;
26552
26553 cmdlist_t *reqQ;
26554 cmdlist_t *cmpQ;
26555 diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
26556 index 9cf2035..bffca95 100644
26557 --- a/drivers/block/drbd/drbd_int.h
26558 +++ b/drivers/block/drbd/drbd_int.h
26559 @@ -736,7 +736,7 @@ struct drbd_request;
26560 struct drbd_epoch {
26561 struct list_head list;
26562 unsigned int barrier_nr;
26563 - atomic_t epoch_size; /* increased on every request added. */
26564 + atomic_unchecked_t epoch_size; /* increased on every request added. */
26565 atomic_t active; /* increased on every req. added, and dec on every finished. */
26566 unsigned long flags;
26567 };
26568 @@ -1108,7 +1108,7 @@ struct drbd_conf {
26569 void *int_dig_in;
26570 void *int_dig_vv;
26571 wait_queue_head_t seq_wait;
26572 - atomic_t packet_seq;
26573 + atomic_unchecked_t packet_seq;
26574 unsigned int peer_seq;
26575 spinlock_t peer_seq_lock;
26576 unsigned int minor;
26577 @@ -1617,30 +1617,30 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
26578
26579 static inline void drbd_tcp_cork(struct socket *sock)
26580 {
26581 - int __user val = 1;
26582 + int val = 1;
26583 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
26584 - (char __user *)&val, sizeof(val));
26585 + (char __force_user *)&val, sizeof(val));
26586 }
26587
26588 static inline void drbd_tcp_uncork(struct socket *sock)
26589 {
26590 - int __user val = 0;
26591 + int val = 0;
26592 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
26593 - (char __user *)&val, sizeof(val));
26594 + (char __force_user *)&val, sizeof(val));
26595 }
26596
26597 static inline void drbd_tcp_nodelay(struct socket *sock)
26598 {
26599 - int __user val = 1;
26600 + int val = 1;
26601 (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
26602 - (char __user *)&val, sizeof(val));
26603 + (char __force_user *)&val, sizeof(val));
26604 }
26605
26606 static inline void drbd_tcp_quickack(struct socket *sock)
26607 {
26608 - int __user val = 2;
26609 + int val = 2;
26610 (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
26611 - (char __user *)&val, sizeof(val));
26612 + (char __force_user *)&val, sizeof(val));
26613 }
26614
26615 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
26616 diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
26617 index 0358e55..bc33689 100644
26618 --- a/drivers/block/drbd/drbd_main.c
26619 +++ b/drivers/block/drbd/drbd_main.c
26620 @@ -2397,7 +2397,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
26621 p.sector = sector;
26622 p.block_id = block_id;
26623 p.blksize = blksize;
26624 - p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
26625 + p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
26626
26627 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
26628 return false;
26629 @@ -2696,7 +2696,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
26630 p.sector = cpu_to_be64(req->sector);
26631 p.block_id = (unsigned long)req;
26632 p.seq_num = cpu_to_be32(req->seq_num =
26633 - atomic_add_return(1, &mdev->packet_seq));
26634 + atomic_add_return_unchecked(1, &mdev->packet_seq));
26635
26636 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
26637
26638 @@ -2981,7 +2981,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
26639 atomic_set(&mdev->unacked_cnt, 0);
26640 atomic_set(&mdev->local_cnt, 0);
26641 atomic_set(&mdev->net_cnt, 0);
26642 - atomic_set(&mdev->packet_seq, 0);
26643 + atomic_set_unchecked(&mdev->packet_seq, 0);
26644 atomic_set(&mdev->pp_in_use, 0);
26645 atomic_set(&mdev->pp_in_use_by_net, 0);
26646 atomic_set(&mdev->rs_sect_in, 0);
26647 @@ -3063,8 +3063,8 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
26648 mdev->receiver.t_state);
26649
26650 /* no need to lock it, I'm the only thread alive */
26651 - if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
26652 - dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
26653 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
26654 + dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
26655 mdev->al_writ_cnt =
26656 mdev->bm_writ_cnt =
26657 mdev->read_cnt =
26658 diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
26659 index af2a250..219c74b 100644
26660 --- a/drivers/block/drbd/drbd_nl.c
26661 +++ b/drivers/block/drbd/drbd_nl.c
26662 @@ -2359,7 +2359,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
26663 module_put(THIS_MODULE);
26664 }
26665
26666 -static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
26667 +static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
26668
26669 static unsigned short *
26670 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
26671 @@ -2430,7 +2430,7 @@ void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state)
26672 cn_reply->id.idx = CN_IDX_DRBD;
26673 cn_reply->id.val = CN_VAL_DRBD;
26674
26675 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
26676 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
26677 cn_reply->ack = 0; /* not used here. */
26678 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
26679 (int)((char *)tl - (char *)reply->tag_list);
26680 @@ -2462,7 +2462,7 @@ void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name)
26681 cn_reply->id.idx = CN_IDX_DRBD;
26682 cn_reply->id.val = CN_VAL_DRBD;
26683
26684 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
26685 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
26686 cn_reply->ack = 0; /* not used here. */
26687 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
26688 (int)((char *)tl - (char *)reply->tag_list);
26689 @@ -2540,7 +2540,7 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
26690 cn_reply->id.idx = CN_IDX_DRBD;
26691 cn_reply->id.val = CN_VAL_DRBD;
26692
26693 - cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
26694 + cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
26695 cn_reply->ack = 0; // not used here.
26696 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
26697 (int)((char*)tl - (char*)reply->tag_list);
26698 @@ -2579,7 +2579,7 @@ void drbd_bcast_sync_progress(struct drbd_conf *mdev)
26699 cn_reply->id.idx = CN_IDX_DRBD;
26700 cn_reply->id.val = CN_VAL_DRBD;
26701
26702 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
26703 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
26704 cn_reply->ack = 0; /* not used here. */
26705 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
26706 (int)((char *)tl - (char *)reply->tag_list);
26707 diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
26708 index 43beaca..4a5b1dd 100644
26709 --- a/drivers/block/drbd/drbd_receiver.c
26710 +++ b/drivers/block/drbd/drbd_receiver.c
26711 @@ -894,7 +894,7 @@ retry:
26712 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
26713 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
26714
26715 - atomic_set(&mdev->packet_seq, 0);
26716 + atomic_set_unchecked(&mdev->packet_seq, 0);
26717 mdev->peer_seq = 0;
26718
26719 drbd_thread_start(&mdev->asender);
26720 @@ -985,7 +985,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
26721 do {
26722 next_epoch = NULL;
26723
26724 - epoch_size = atomic_read(&epoch->epoch_size);
26725 + epoch_size = atomic_read_unchecked(&epoch->epoch_size);
26726
26727 switch (ev & ~EV_CLEANUP) {
26728 case EV_PUT:
26729 @@ -1020,7 +1020,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
26730 rv = FE_DESTROYED;
26731 } else {
26732 epoch->flags = 0;
26733 - atomic_set(&epoch->epoch_size, 0);
26734 + atomic_set_unchecked(&epoch->epoch_size, 0);
26735 /* atomic_set(&epoch->active, 0); is already zero */
26736 if (rv == FE_STILL_LIVE)
26737 rv = FE_RECYCLED;
26738 @@ -1191,14 +1191,14 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
26739 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
26740 drbd_flush(mdev);
26741
26742 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
26743 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
26744 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
26745 if (epoch)
26746 break;
26747 }
26748
26749 epoch = mdev->current_epoch;
26750 - wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
26751 + wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
26752
26753 D_ASSERT(atomic_read(&epoch->active) == 0);
26754 D_ASSERT(epoch->flags == 0);
26755 @@ -1210,11 +1210,11 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
26756 }
26757
26758 epoch->flags = 0;
26759 - atomic_set(&epoch->epoch_size, 0);
26760 + atomic_set_unchecked(&epoch->epoch_size, 0);
26761 atomic_set(&epoch->active, 0);
26762
26763 spin_lock(&mdev->epoch_lock);
26764 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
26765 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
26766 list_add(&epoch->list, &mdev->current_epoch->list);
26767 mdev->current_epoch = epoch;
26768 mdev->epochs++;
26769 @@ -1663,7 +1663,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
26770 spin_unlock(&mdev->peer_seq_lock);
26771
26772 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
26773 - atomic_inc(&mdev->current_epoch->epoch_size);
26774 + atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
26775 return drbd_drain_block(mdev, data_size);
26776 }
26777
26778 @@ -1689,7 +1689,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
26779
26780 spin_lock(&mdev->epoch_lock);
26781 e->epoch = mdev->current_epoch;
26782 - atomic_inc(&e->epoch->epoch_size);
26783 + atomic_inc_unchecked(&e->epoch->epoch_size);
26784 atomic_inc(&e->epoch->active);
26785 spin_unlock(&mdev->epoch_lock);
26786
26787 @@ -3885,7 +3885,7 @@ static void drbd_disconnect(struct drbd_conf *mdev)
26788 D_ASSERT(list_empty(&mdev->done_ee));
26789
26790 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
26791 - atomic_set(&mdev->current_epoch->epoch_size, 0);
26792 + atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
26793 D_ASSERT(list_empty(&mdev->current_epoch->list));
26794 }
26795
26796 diff --git a/drivers/block/loop.c b/drivers/block/loop.c
26797 index 1e888c9..05cf1b0 100644
26798 --- a/drivers/block/loop.c
26799 +++ b/drivers/block/loop.c
26800 @@ -227,7 +227,7 @@ static int __do_lo_send_write(struct file *file,
26801 mm_segment_t old_fs = get_fs();
26802
26803 set_fs(get_ds());
26804 - bw = file->f_op->write(file, buf, len, &pos);
26805 + bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
26806 set_fs(old_fs);
26807 if (likely(bw == len))
26808 return 0;
26809 diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
26810 index 4364303..9adf4ee 100644
26811 --- a/drivers/char/Kconfig
26812 +++ b/drivers/char/Kconfig
26813 @@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
26814
26815 config DEVKMEM
26816 bool "/dev/kmem virtual device support"
26817 - default y
26818 + default n
26819 + depends on !GRKERNSEC_KMEM
26820 help
26821 Say Y here if you want to support the /dev/kmem device. The
26822 /dev/kmem device is rarely used, but can be used for certain
26823 @@ -596,6 +597,7 @@ config DEVPORT
26824 bool
26825 depends on !M68K
26826 depends on ISA || PCI
26827 + depends on !GRKERNSEC_KMEM
26828 default y
26829
26830 source "drivers/s390/char/Kconfig"
26831 diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
26832 index 2e04433..22afc64 100644
26833 --- a/drivers/char/agp/frontend.c
26834 +++ b/drivers/char/agp/frontend.c
26835 @@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
26836 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
26837 return -EFAULT;
26838
26839 - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
26840 + if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
26841 return -EFAULT;
26842
26843 client = agp_find_client_by_pid(reserve.pid);
26844 diff --git a/drivers/char/briq_panel.c b/drivers/char/briq_panel.c
26845 index 095ab90..afad0a4 100644
26846 --- a/drivers/char/briq_panel.c
26847 +++ b/drivers/char/briq_panel.c
26848 @@ -9,6 +9,7 @@
26849 #include <linux/types.h>
26850 #include <linux/errno.h>
26851 #include <linux/tty.h>
26852 +#include <linux/mutex.h>
26853 #include <linux/timer.h>
26854 #include <linux/kernel.h>
26855 #include <linux/wait.h>
26856 @@ -34,6 +35,7 @@ static int vfd_is_open;
26857 static unsigned char vfd[40];
26858 static int vfd_cursor;
26859 static unsigned char ledpb, led;
26860 +static DEFINE_MUTEX(vfd_mutex);
26861
26862 static void update_vfd(void)
26863 {
26864 @@ -140,12 +142,15 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
26865 if (!vfd_is_open)
26866 return -EBUSY;
26867
26868 + mutex_lock(&vfd_mutex);
26869 for (;;) {
26870 char c;
26871 if (!indx)
26872 break;
26873 - if (get_user(c, buf))
26874 + if (get_user(c, buf)) {
26875 + mutex_unlock(&vfd_mutex);
26876 return -EFAULT;
26877 + }
26878 if (esc) {
26879 set_led(c);
26880 esc = 0;
26881 @@ -175,6 +180,7 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
26882 buf++;
26883 }
26884 update_vfd();
26885 + mutex_unlock(&vfd_mutex);
26886
26887 return len;
26888 }
26889 diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
26890 index f773a9d..65cd683 100644
26891 --- a/drivers/char/genrtc.c
26892 +++ b/drivers/char/genrtc.c
26893 @@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file,
26894 switch (cmd) {
26895
26896 case RTC_PLL_GET:
26897 + memset(&pll, 0, sizeof(pll));
26898 if (get_rtc_pll(&pll))
26899 return -EINVAL;
26900 else
26901 diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
26902 index 0833896..cccce52 100644
26903 --- a/drivers/char/hpet.c
26904 +++ b/drivers/char/hpet.c
26905 @@ -572,7 +572,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
26906 }
26907
26908 static int
26909 -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
26910 +hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
26911 struct hpet_info *info)
26912 {
26913 struct hpet_timer __iomem *timer;
26914 diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
26915 index 58c0e63..46c16bf 100644
26916 --- a/drivers/char/ipmi/ipmi_msghandler.c
26917 +++ b/drivers/char/ipmi/ipmi_msghandler.c
26918 @@ -415,7 +415,7 @@ struct ipmi_smi {
26919 struct proc_dir_entry *proc_dir;
26920 char proc_dir_name[10];
26921
26922 - atomic_t stats[IPMI_NUM_STATS];
26923 + atomic_unchecked_t stats[IPMI_NUM_STATS];
26924
26925 /*
26926 * run_to_completion duplicate of smb_info, smi_info
26927 @@ -448,9 +448,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
26928
26929
26930 #define ipmi_inc_stat(intf, stat) \
26931 - atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
26932 + atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
26933 #define ipmi_get_stat(intf, stat) \
26934 - ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
26935 + ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
26936
26937 static int is_lan_addr(struct ipmi_addr *addr)
26938 {
26939 @@ -2868,7 +2868,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
26940 INIT_LIST_HEAD(&intf->cmd_rcvrs);
26941 init_waitqueue_head(&intf->waitq);
26942 for (i = 0; i < IPMI_NUM_STATS; i++)
26943 - atomic_set(&intf->stats[i], 0);
26944 + atomic_set_unchecked(&intf->stats[i], 0);
26945
26946 intf->proc_dir = NULL;
26947
26948 diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
26949 index 9397ab4..d01bee1 100644
26950 --- a/drivers/char/ipmi/ipmi_si_intf.c
26951 +++ b/drivers/char/ipmi/ipmi_si_intf.c
26952 @@ -277,7 +277,7 @@ struct smi_info {
26953 unsigned char slave_addr;
26954
26955 /* Counters and things for the proc filesystem. */
26956 - atomic_t stats[SI_NUM_STATS];
26957 + atomic_unchecked_t stats[SI_NUM_STATS];
26958
26959 struct task_struct *thread;
26960
26961 @@ -286,9 +286,9 @@ struct smi_info {
26962 };
26963
26964 #define smi_inc_stat(smi, stat) \
26965 - atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
26966 + atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
26967 #define smi_get_stat(smi, stat) \
26968 - ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
26969 + ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
26970
26971 #define SI_MAX_PARMS 4
26972
26973 @@ -3230,7 +3230,7 @@ static int try_smi_init(struct smi_info *new_smi)
26974 atomic_set(&new_smi->req_events, 0);
26975 new_smi->run_to_completion = 0;
26976 for (i = 0; i < SI_NUM_STATS; i++)
26977 - atomic_set(&new_smi->stats[i], 0);
26978 + atomic_set_unchecked(&new_smi->stats[i], 0);
26979
26980 new_smi->interrupt_disabled = 1;
26981 atomic_set(&new_smi->stop_operation, 0);
26982 diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
26983 index 1aeaaba..e018570 100644
26984 --- a/drivers/char/mbcs.c
26985 +++ b/drivers/char/mbcs.c
26986 @@ -800,7 +800,7 @@ static int mbcs_remove(struct cx_dev *dev)
26987 return 0;
26988 }
26989
26990 -static const struct cx_device_id __devinitdata mbcs_id_table[] = {
26991 +static const struct cx_device_id __devinitconst mbcs_id_table[] = {
26992 {
26993 .part_num = MBCS_PART_NUM,
26994 .mfg_num = MBCS_MFG_NUM,
26995 diff --git a/drivers/char/mem.c b/drivers/char/mem.c
26996 index 1451790..f705c30 100644
26997 --- a/drivers/char/mem.c
26998 +++ b/drivers/char/mem.c
26999 @@ -18,6 +18,7 @@
27000 #include <linux/raw.h>
27001 #include <linux/tty.h>
27002 #include <linux/capability.h>
27003 +#include <linux/security.h>
27004 #include <linux/ptrace.h>
27005 #include <linux/device.h>
27006 #include <linux/highmem.h>
27007 @@ -35,6 +36,10 @@
27008 # include <linux/efi.h>
27009 #endif
27010
27011 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
27012 +extern const struct file_operations grsec_fops;
27013 +#endif
27014 +
27015 static inline unsigned long size_inside_page(unsigned long start,
27016 unsigned long size)
27017 {
27018 @@ -66,9 +71,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27019
27020 while (cursor < to) {
27021 if (!devmem_is_allowed(pfn)) {
27022 +#ifdef CONFIG_GRKERNSEC_KMEM
27023 + gr_handle_mem_readwrite(from, to);
27024 +#else
27025 printk(KERN_INFO
27026 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
27027 current->comm, from, to);
27028 +#endif
27029 return 0;
27030 }
27031 cursor += PAGE_SIZE;
27032 @@ -76,6 +85,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27033 }
27034 return 1;
27035 }
27036 +#elif defined(CONFIG_GRKERNSEC_KMEM)
27037 +static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27038 +{
27039 + return 0;
27040 +}
27041 #else
27042 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27043 {
27044 @@ -118,6 +132,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
27045
27046 while (count > 0) {
27047 unsigned long remaining;
27048 + char *temp;
27049
27050 sz = size_inside_page(p, count);
27051
27052 @@ -133,7 +148,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
27053 if (!ptr)
27054 return -EFAULT;
27055
27056 - remaining = copy_to_user(buf, ptr, sz);
27057 +#ifdef CONFIG_PAX_USERCOPY
27058 + temp = kmalloc(sz, GFP_KERNEL);
27059 + if (!temp) {
27060 + unxlate_dev_mem_ptr(p, ptr);
27061 + return -ENOMEM;
27062 + }
27063 + memcpy(temp, ptr, sz);
27064 +#else
27065 + temp = ptr;
27066 +#endif
27067 +
27068 + remaining = copy_to_user(buf, temp, sz);
27069 +
27070 +#ifdef CONFIG_PAX_USERCOPY
27071 + kfree(temp);
27072 +#endif
27073 +
27074 unxlate_dev_mem_ptr(p, ptr);
27075 if (remaining)
27076 return -EFAULT;
27077 @@ -396,9 +427,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
27078 size_t count, loff_t *ppos)
27079 {
27080 unsigned long p = *ppos;
27081 - ssize_t low_count, read, sz;
27082 + ssize_t low_count, read, sz, err = 0;
27083 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
27084 - int err = 0;
27085
27086 read = 0;
27087 if (p < (unsigned long) high_memory) {
27088 @@ -420,6 +450,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
27089 }
27090 #endif
27091 while (low_count > 0) {
27092 + char *temp;
27093 +
27094 sz = size_inside_page(p, low_count);
27095
27096 /*
27097 @@ -429,7 +461,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
27098 */
27099 kbuf = xlate_dev_kmem_ptr((char *)p);
27100
27101 - if (copy_to_user(buf, kbuf, sz))
27102 +#ifdef CONFIG_PAX_USERCOPY
27103 + temp = kmalloc(sz, GFP_KERNEL);
27104 + if (!temp)
27105 + return -ENOMEM;
27106 + memcpy(temp, kbuf, sz);
27107 +#else
27108 + temp = kbuf;
27109 +#endif
27110 +
27111 + err = copy_to_user(buf, temp, sz);
27112 +
27113 +#ifdef CONFIG_PAX_USERCOPY
27114 + kfree(temp);
27115 +#endif
27116 +
27117 + if (err)
27118 return -EFAULT;
27119 buf += sz;
27120 p += sz;
27121 @@ -867,6 +914,9 @@ static const struct memdev {
27122 #ifdef CONFIG_CRASH_DUMP
27123 [12] = { "oldmem", 0, &oldmem_fops, NULL },
27124 #endif
27125 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
27126 + [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
27127 +#endif
27128 };
27129
27130 static int memory_open(struct inode *inode, struct file *filp)
27131 diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
27132 index da3cfee..a5a6606 100644
27133 --- a/drivers/char/nvram.c
27134 +++ b/drivers/char/nvram.c
27135 @@ -248,7 +248,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
27136
27137 spin_unlock_irq(&rtc_lock);
27138
27139 - if (copy_to_user(buf, contents, tmp - contents))
27140 + if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
27141 return -EFAULT;
27142
27143 *ppos = i;
27144 diff --git a/drivers/char/random.c b/drivers/char/random.c
27145 index 6035ab8..bdfe4fd 100644
27146 --- a/drivers/char/random.c
27147 +++ b/drivers/char/random.c
27148 @@ -261,8 +261,13 @@
27149 /*
27150 * Configuration information
27151 */
27152 +#ifdef CONFIG_GRKERNSEC_RANDNET
27153 +#define INPUT_POOL_WORDS 512
27154 +#define OUTPUT_POOL_WORDS 128
27155 +#else
27156 #define INPUT_POOL_WORDS 128
27157 #define OUTPUT_POOL_WORDS 32
27158 +#endif
27159 #define SEC_XFER_SIZE 512
27160 #define EXTRACT_SIZE 10
27161
27162 @@ -300,10 +305,17 @@ static struct poolinfo {
27163 int poolwords;
27164 int tap1, tap2, tap3, tap4, tap5;
27165 } poolinfo_table[] = {
27166 +#ifdef CONFIG_GRKERNSEC_RANDNET
27167 + /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
27168 + { 512, 411, 308, 208, 104, 1 },
27169 + /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
27170 + { 128, 103, 76, 51, 25, 1 },
27171 +#else
27172 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
27173 { 128, 103, 76, 51, 25, 1 },
27174 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
27175 { 32, 26, 20, 14, 7, 1 },
27176 +#endif
27177 #if 0
27178 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
27179 { 2048, 1638, 1231, 819, 411, 1 },
27180 @@ -909,7 +921,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
27181
27182 extract_buf(r, tmp);
27183 i = min_t(int, nbytes, EXTRACT_SIZE);
27184 - if (copy_to_user(buf, tmp, i)) {
27185 + if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
27186 ret = -EFAULT;
27187 break;
27188 }
27189 @@ -1228,7 +1240,7 @@ EXPORT_SYMBOL(generate_random_uuid);
27190 #include <linux/sysctl.h>
27191
27192 static int min_read_thresh = 8, min_write_thresh;
27193 -static int max_read_thresh = INPUT_POOL_WORDS * 32;
27194 +static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
27195 static int max_write_thresh = INPUT_POOL_WORDS * 32;
27196 static char sysctl_bootid[16];
27197
27198 diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
27199 index 1ee8ce7..b778bef 100644
27200 --- a/drivers/char/sonypi.c
27201 +++ b/drivers/char/sonypi.c
27202 @@ -55,6 +55,7 @@
27203 #include <asm/uaccess.h>
27204 #include <asm/io.h>
27205 #include <asm/system.h>
27206 +#include <asm/local.h>
27207
27208 #include <linux/sonypi.h>
27209
27210 @@ -491,7 +492,7 @@ static struct sonypi_device {
27211 spinlock_t fifo_lock;
27212 wait_queue_head_t fifo_proc_list;
27213 struct fasync_struct *fifo_async;
27214 - int open_count;
27215 + local_t open_count;
27216 int model;
27217 struct input_dev *input_jog_dev;
27218 struct input_dev *input_key_dev;
27219 @@ -898,7 +899,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
27220 static int sonypi_misc_release(struct inode *inode, struct file *file)
27221 {
27222 mutex_lock(&sonypi_device.lock);
27223 - sonypi_device.open_count--;
27224 + local_dec(&sonypi_device.open_count);
27225 mutex_unlock(&sonypi_device.lock);
27226 return 0;
27227 }
27228 @@ -907,9 +908,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
27229 {
27230 mutex_lock(&sonypi_device.lock);
27231 /* Flush input queue on first open */
27232 - if (!sonypi_device.open_count)
27233 + if (!local_read(&sonypi_device.open_count))
27234 kfifo_reset(&sonypi_device.fifo);
27235 - sonypi_device.open_count++;
27236 + local_inc(&sonypi_device.open_count);
27237 mutex_unlock(&sonypi_device.lock);
27238
27239 return 0;
27240 diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
27241 index 361a1df..2471eee 100644
27242 --- a/drivers/char/tpm/tpm.c
27243 +++ b/drivers/char/tpm/tpm.c
27244 @@ -414,7 +414,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
27245 chip->vendor.req_complete_val)
27246 goto out_recv;
27247
27248 - if ((status == chip->vendor.req_canceled)) {
27249 + if (status == chip->vendor.req_canceled) {
27250 dev_err(chip->dev, "Operation Canceled\n");
27251 rc = -ECANCELED;
27252 goto out;
27253 diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
27254 index 0636520..169c1d0 100644
27255 --- a/drivers/char/tpm/tpm_bios.c
27256 +++ b/drivers/char/tpm/tpm_bios.c
27257 @@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
27258 event = addr;
27259
27260 if ((event->event_type == 0 && event->event_size == 0) ||
27261 - ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
27262 + (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
27263 return NULL;
27264
27265 return addr;
27266 @@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
27267 return NULL;
27268
27269 if ((event->event_type == 0 && event->event_size == 0) ||
27270 - ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
27271 + (event->event_size >= limit - v - sizeof(struct tcpa_event)))
27272 return NULL;
27273
27274 (*pos)++;
27275 @@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
27276 int i;
27277
27278 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
27279 - seq_putc(m, data[i]);
27280 + if (!seq_putc(m, data[i]))
27281 + return -EFAULT;
27282
27283 return 0;
27284 }
27285 @@ -410,8 +411,13 @@ static int read_log(struct tpm_bios_log *log)
27286 log->bios_event_log_end = log->bios_event_log + len;
27287
27288 virt = acpi_os_map_memory(start, len);
27289 + if (!virt) {
27290 + kfree(log->bios_event_log);
27291 + log->bios_event_log = NULL;
27292 + return -EFAULT;
27293 + }
27294
27295 - memcpy(log->bios_event_log, virt, len);
27296 + memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
27297
27298 acpi_os_unmap_memory(virt, len);
27299 return 0;
27300 diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
27301 index 8e3c46d..c139b99 100644
27302 --- a/drivers/char/virtio_console.c
27303 +++ b/drivers/char/virtio_console.c
27304 @@ -563,7 +563,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
27305 if (to_user) {
27306 ssize_t ret;
27307
27308 - ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
27309 + ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
27310 if (ret)
27311 return -EFAULT;
27312 } else {
27313 @@ -662,7 +662,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
27314 if (!port_has_data(port) && !port->host_connected)
27315 return 0;
27316
27317 - return fill_readbuf(port, ubuf, count, true);
27318 + return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
27319 }
27320
27321 static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
27322 diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
27323 index eb1d864..39ee5a7 100644
27324 --- a/drivers/dma/dmatest.c
27325 +++ b/drivers/dma/dmatest.c
27326 @@ -591,7 +591,7 @@ static int dmatest_add_channel(struct dma_chan *chan)
27327 }
27328 if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
27329 cnt = dmatest_add_threads(dtc, DMA_PQ);
27330 - thread_count += cnt > 0 ?: 0;
27331 + thread_count += cnt > 0 ? cnt : 0;
27332 }
27333
27334 pr_info("dmatest: Started %u threads using %s\n",
27335 diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
27336 index c9eee6d..f9d5280 100644
27337 --- a/drivers/edac/amd64_edac.c
27338 +++ b/drivers/edac/amd64_edac.c
27339 @@ -2685,7 +2685,7 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
27340 * PCI core identifies what devices are on a system during boot, and then
27341 * inquiry this table to see if this driver is for a given device found.
27342 */
27343 -static const struct pci_device_id amd64_pci_table[] __devinitdata = {
27344 +static const struct pci_device_id amd64_pci_table[] __devinitconst = {
27345 {
27346 .vendor = PCI_VENDOR_ID_AMD,
27347 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
27348 diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
27349 index e47e73b..348e0bd 100644
27350 --- a/drivers/edac/amd76x_edac.c
27351 +++ b/drivers/edac/amd76x_edac.c
27352 @@ -321,7 +321,7 @@ static void __devexit amd76x_remove_one(struct pci_dev *pdev)
27353 edac_mc_free(mci);
27354 }
27355
27356 -static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
27357 +static const struct pci_device_id amd76x_pci_tbl[] __devinitconst = {
27358 {
27359 PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27360 AMD762},
27361 diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
27362 index 1af531a..3a8ff27 100644
27363 --- a/drivers/edac/e752x_edac.c
27364 +++ b/drivers/edac/e752x_edac.c
27365 @@ -1380,7 +1380,7 @@ static void __devexit e752x_remove_one(struct pci_dev *pdev)
27366 edac_mc_free(mci);
27367 }
27368
27369 -static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
27370 +static const struct pci_device_id e752x_pci_tbl[] __devinitconst = {
27371 {
27372 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27373 E7520},
27374 diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
27375 index 6ffb6d2..383d8d7 100644
27376 --- a/drivers/edac/e7xxx_edac.c
27377 +++ b/drivers/edac/e7xxx_edac.c
27378 @@ -525,7 +525,7 @@ static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
27379 edac_mc_free(mci);
27380 }
27381
27382 -static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
27383 +static const struct pci_device_id e7xxx_pci_tbl[] __devinitconst = {
27384 {
27385 PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27386 E7205},
27387 diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
27388 index 495198a..ac08c85 100644
27389 --- a/drivers/edac/edac_pci_sysfs.c
27390 +++ b/drivers/edac/edac_pci_sysfs.c
27391 @@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
27392 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
27393 static int edac_pci_poll_msec = 1000; /* one second workq period */
27394
27395 -static atomic_t pci_parity_count = ATOMIC_INIT(0);
27396 -static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
27397 +static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
27398 +static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
27399
27400 static struct kobject *edac_pci_top_main_kobj;
27401 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
27402 @@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27403 edac_printk(KERN_CRIT, EDAC_PCI,
27404 "Signaled System Error on %s\n",
27405 pci_name(dev));
27406 - atomic_inc(&pci_nonparity_count);
27407 + atomic_inc_unchecked(&pci_nonparity_count);
27408 }
27409
27410 if (status & (PCI_STATUS_PARITY)) {
27411 @@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27412 "Master Data Parity Error on %s\n",
27413 pci_name(dev));
27414
27415 - atomic_inc(&pci_parity_count);
27416 + atomic_inc_unchecked(&pci_parity_count);
27417 }
27418
27419 if (status & (PCI_STATUS_DETECTED_PARITY)) {
27420 @@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27421 "Detected Parity Error on %s\n",
27422 pci_name(dev));
27423
27424 - atomic_inc(&pci_parity_count);
27425 + atomic_inc_unchecked(&pci_parity_count);
27426 }
27427 }
27428
27429 @@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27430 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
27431 "Signaled System Error on %s\n",
27432 pci_name(dev));
27433 - atomic_inc(&pci_nonparity_count);
27434 + atomic_inc_unchecked(&pci_nonparity_count);
27435 }
27436
27437 if (status & (PCI_STATUS_PARITY)) {
27438 @@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27439 "Master Data Parity Error on "
27440 "%s\n", pci_name(dev));
27441
27442 - atomic_inc(&pci_parity_count);
27443 + atomic_inc_unchecked(&pci_parity_count);
27444 }
27445
27446 if (status & (PCI_STATUS_DETECTED_PARITY)) {
27447 @@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27448 "Detected Parity Error on %s\n",
27449 pci_name(dev));
27450
27451 - atomic_inc(&pci_parity_count);
27452 + atomic_inc_unchecked(&pci_parity_count);
27453 }
27454 }
27455 }
27456 @@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
27457 if (!check_pci_errors)
27458 return;
27459
27460 - before_count = atomic_read(&pci_parity_count);
27461 + before_count = atomic_read_unchecked(&pci_parity_count);
27462
27463 /* scan all PCI devices looking for a Parity Error on devices and
27464 * bridges.
27465 @@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
27466 /* Only if operator has selected panic on PCI Error */
27467 if (edac_pci_get_panic_on_pe()) {
27468 /* If the count is different 'after' from 'before' */
27469 - if (before_count != atomic_read(&pci_parity_count))
27470 + if (before_count != atomic_read_unchecked(&pci_parity_count))
27471 panic("EDAC: PCI Parity Error");
27472 }
27473 }
27474 diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
27475 index c0510b3..6e2a954 100644
27476 --- a/drivers/edac/i3000_edac.c
27477 +++ b/drivers/edac/i3000_edac.c
27478 @@ -470,7 +470,7 @@ static void __devexit i3000_remove_one(struct pci_dev *pdev)
27479 edac_mc_free(mci);
27480 }
27481
27482 -static const struct pci_device_id i3000_pci_tbl[] __devinitdata = {
27483 +static const struct pci_device_id i3000_pci_tbl[] __devinitconst = {
27484 {
27485 PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27486 I3000},
27487 diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
27488 index aa08497..7e6822a 100644
27489 --- a/drivers/edac/i3200_edac.c
27490 +++ b/drivers/edac/i3200_edac.c
27491 @@ -456,7 +456,7 @@ static void __devexit i3200_remove_one(struct pci_dev *pdev)
27492 edac_mc_free(mci);
27493 }
27494
27495 -static const struct pci_device_id i3200_pci_tbl[] __devinitdata = {
27496 +static const struct pci_device_id i3200_pci_tbl[] __devinitconst = {
27497 {
27498 PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27499 I3200},
27500 diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
27501 index 4dc3ac2..67d05a6 100644
27502 --- a/drivers/edac/i5000_edac.c
27503 +++ b/drivers/edac/i5000_edac.c
27504 @@ -1516,7 +1516,7 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev)
27505 *
27506 * The "E500P" device is the first device supported.
27507 */
27508 -static const struct pci_device_id i5000_pci_tbl[] __devinitdata = {
27509 +static const struct pci_device_id i5000_pci_tbl[] __devinitconst = {
27510 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16),
27511 .driver_data = I5000P},
27512
27513 diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
27514 index bcbdeec..9886d16 100644
27515 --- a/drivers/edac/i5100_edac.c
27516 +++ b/drivers/edac/i5100_edac.c
27517 @@ -1051,7 +1051,7 @@ static void __devexit i5100_remove_one(struct pci_dev *pdev)
27518 edac_mc_free(mci);
27519 }
27520
27521 -static const struct pci_device_id i5100_pci_tbl[] __devinitdata = {
27522 +static const struct pci_device_id i5100_pci_tbl[] __devinitconst = {
27523 /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
27524 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
27525 { 0, }
27526 diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
27527 index 74d6ec34..baff517 100644
27528 --- a/drivers/edac/i5400_edac.c
27529 +++ b/drivers/edac/i5400_edac.c
27530 @@ -1383,7 +1383,7 @@ static void __devexit i5400_remove_one(struct pci_dev *pdev)
27531 *
27532 * The "E500P" device is the first device supported.
27533 */
27534 -static const struct pci_device_id i5400_pci_tbl[] __devinitdata = {
27535 +static const struct pci_device_id i5400_pci_tbl[] __devinitconst = {
27536 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)},
27537 {0,} /* 0 terminated list. */
27538 };
27539 diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
27540 index 6104dba..e7ea8e1 100644
27541 --- a/drivers/edac/i7300_edac.c
27542 +++ b/drivers/edac/i7300_edac.c
27543 @@ -1192,7 +1192,7 @@ static void __devexit i7300_remove_one(struct pci_dev *pdev)
27544 *
27545 * Has only 8086:360c PCI ID
27546 */
27547 -static const struct pci_device_id i7300_pci_tbl[] __devinitdata = {
27548 +static const struct pci_device_id i7300_pci_tbl[] __devinitconst = {
27549 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_ERR)},
27550 {0,} /* 0 terminated list. */
27551 };
27552 diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
27553 index 70ad892..178943c 100644
27554 --- a/drivers/edac/i7core_edac.c
27555 +++ b/drivers/edac/i7core_edac.c
27556 @@ -391,7 +391,7 @@ static const struct pci_id_table pci_dev_table[] = {
27557 /*
27558 * pci_device_id table for which devices we are looking for
27559 */
27560 -static const struct pci_device_id i7core_pci_tbl[] __devinitdata = {
27561 +static const struct pci_device_id i7core_pci_tbl[] __devinitconst = {
27562 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)},
27563 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)},
27564 {0,} /* 0 terminated list. */
27565 diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
27566 index 4329d39..f3022ef 100644
27567 --- a/drivers/edac/i82443bxgx_edac.c
27568 +++ b/drivers/edac/i82443bxgx_edac.c
27569 @@ -380,7 +380,7 @@ static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
27570
27571 EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one);
27572
27573 -static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitdata = {
27574 +static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitconst = {
27575 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)},
27576 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)},
27577 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)},
27578 diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
27579 index 931a057..fd28340 100644
27580 --- a/drivers/edac/i82860_edac.c
27581 +++ b/drivers/edac/i82860_edac.c
27582 @@ -270,7 +270,7 @@ static void __devexit i82860_remove_one(struct pci_dev *pdev)
27583 edac_mc_free(mci);
27584 }
27585
27586 -static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
27587 +static const struct pci_device_id i82860_pci_tbl[] __devinitconst = {
27588 {
27589 PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27590 I82860},
27591 diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
27592 index 33864c6..01edc61 100644
27593 --- a/drivers/edac/i82875p_edac.c
27594 +++ b/drivers/edac/i82875p_edac.c
27595 @@ -511,7 +511,7 @@ static void __devexit i82875p_remove_one(struct pci_dev *pdev)
27596 edac_mc_free(mci);
27597 }
27598
27599 -static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
27600 +static const struct pci_device_id i82875p_pci_tbl[] __devinitconst = {
27601 {
27602 PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27603 I82875P},
27604 diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
27605 index a5da732..983363b 100644
27606 --- a/drivers/edac/i82975x_edac.c
27607 +++ b/drivers/edac/i82975x_edac.c
27608 @@ -604,7 +604,7 @@ static void __devexit i82975x_remove_one(struct pci_dev *pdev)
27609 edac_mc_free(mci);
27610 }
27611
27612 -static const struct pci_device_id i82975x_pci_tbl[] __devinitdata = {
27613 +static const struct pci_device_id i82975x_pci_tbl[] __devinitconst = {
27614 {
27615 PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27616 I82975X
27617 diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
27618 index 0106747..0b40417 100644
27619 --- a/drivers/edac/mce_amd.h
27620 +++ b/drivers/edac/mce_amd.h
27621 @@ -83,7 +83,7 @@ struct amd_decoder_ops {
27622 bool (*dc_mce)(u16, u8);
27623 bool (*ic_mce)(u16, u8);
27624 bool (*nb_mce)(u16, u8);
27625 -};
27626 +} __no_const;
27627
27628 void amd_report_gart_errors(bool);
27629 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
27630 diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
27631 index b153674..ad2ba9b 100644
27632 --- a/drivers/edac/r82600_edac.c
27633 +++ b/drivers/edac/r82600_edac.c
27634 @@ -373,7 +373,7 @@ static void __devexit r82600_remove_one(struct pci_dev *pdev)
27635 edac_mc_free(mci);
27636 }
27637
27638 -static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
27639 +static const struct pci_device_id r82600_pci_tbl[] __devinitconst = {
27640 {
27641 PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
27642 },
27643 diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
27644 index 7a402bf..af0b211 100644
27645 --- a/drivers/edac/sb_edac.c
27646 +++ b/drivers/edac/sb_edac.c
27647 @@ -367,7 +367,7 @@ static const struct pci_id_table pci_dev_descr_sbridge_table[] = {
27648 /*
27649 * pci_device_id table for which devices we are looking for
27650 */
27651 -static const struct pci_device_id sbridge_pci_tbl[] __devinitdata = {
27652 +static const struct pci_device_id sbridge_pci_tbl[] __devinitconst = {
27653 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA)},
27654 {0,} /* 0 terminated list. */
27655 };
27656 diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
27657 index b6f47de..c5acf3a 100644
27658 --- a/drivers/edac/x38_edac.c
27659 +++ b/drivers/edac/x38_edac.c
27660 @@ -440,7 +440,7 @@ static void __devexit x38_remove_one(struct pci_dev *pdev)
27661 edac_mc_free(mci);
27662 }
27663
27664 -static const struct pci_device_id x38_pci_tbl[] __devinitdata = {
27665 +static const struct pci_device_id x38_pci_tbl[] __devinitconst = {
27666 {
27667 PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27668 X38},
27669 diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
27670 index 85661b0..c784559a 100644
27671 --- a/drivers/firewire/core-card.c
27672 +++ b/drivers/firewire/core-card.c
27673 @@ -657,7 +657,7 @@ void fw_card_release(struct kref *kref)
27674
27675 void fw_core_remove_card(struct fw_card *card)
27676 {
27677 - struct fw_card_driver dummy_driver = dummy_driver_template;
27678 + fw_card_driver_no_const dummy_driver = dummy_driver_template;
27679
27680 card->driver->update_phy_reg(card, 4,
27681 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
27682 diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
27683 index 4799393..37bd3ab 100644
27684 --- a/drivers/firewire/core-cdev.c
27685 +++ b/drivers/firewire/core-cdev.c
27686 @@ -1331,8 +1331,7 @@ static int init_iso_resource(struct client *client,
27687 int ret;
27688
27689 if ((request->channels == 0 && request->bandwidth == 0) ||
27690 - request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
27691 - request->bandwidth < 0)
27692 + request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
27693 return -EINVAL;
27694
27695 r = kmalloc(sizeof(*r), GFP_KERNEL);
27696 diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
27697 index 855ab3f..11f4bbd 100644
27698 --- a/drivers/firewire/core-transaction.c
27699 +++ b/drivers/firewire/core-transaction.c
27700 @@ -37,6 +37,7 @@
27701 #include <linux/timer.h>
27702 #include <linux/types.h>
27703 #include <linux/workqueue.h>
27704 +#include <linux/sched.h>
27705
27706 #include <asm/byteorder.h>
27707
27708 diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
27709 index b45be57..5fad18b 100644
27710 --- a/drivers/firewire/core.h
27711 +++ b/drivers/firewire/core.h
27712 @@ -101,6 +101,7 @@ struct fw_card_driver {
27713
27714 int (*stop_iso)(struct fw_iso_context *ctx);
27715 };
27716 +typedef struct fw_card_driver __no_const fw_card_driver_no_const;
27717
27718 void fw_card_initialize(struct fw_card *card,
27719 const struct fw_card_driver *driver, struct device *device);
27720 diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
27721 index 153980b..4b4d046 100644
27722 --- a/drivers/firmware/dmi_scan.c
27723 +++ b/drivers/firmware/dmi_scan.c
27724 @@ -449,11 +449,6 @@ void __init dmi_scan_machine(void)
27725 }
27726 }
27727 else {
27728 - /*
27729 - * no iounmap() for that ioremap(); it would be a no-op, but
27730 - * it's so early in setup that sucker gets confused into doing
27731 - * what it shouldn't if we actually call it.
27732 - */
27733 p = dmi_ioremap(0xF0000, 0x10000);
27734 if (p == NULL)
27735 goto error;
27736 @@ -723,7 +718,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
27737 if (buf == NULL)
27738 return -1;
27739
27740 - dmi_table(buf, dmi_len, dmi_num, decode, private_data);
27741 + dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
27742
27743 iounmap(buf);
27744 return 0;
27745 diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
27746 index 98723cb..10ca85b 100644
27747 --- a/drivers/gpio/gpio-vr41xx.c
27748 +++ b/drivers/gpio/gpio-vr41xx.c
27749 @@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
27750 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
27751 maskl, pendl, maskh, pendh);
27752
27753 - atomic_inc(&irq_err_count);
27754 + atomic_inc_unchecked(&irq_err_count);
27755
27756 return -EINVAL;
27757 }
27758 diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
27759 index 8323fc3..5c1d755 100644
27760 --- a/drivers/gpu/drm/drm_crtc.c
27761 +++ b/drivers/gpu/drm/drm_crtc.c
27762 @@ -1379,7 +1379,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
27763 */
27764 if ((out_resp->count_modes >= mode_count) && mode_count) {
27765 copied = 0;
27766 - mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
27767 + mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
27768 list_for_each_entry(mode, &connector->modes, head) {
27769 drm_crtc_convert_to_umode(&u_mode, mode);
27770 if (copy_to_user(mode_ptr + copied,
27771 @@ -1394,8 +1394,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
27772
27773 if ((out_resp->count_props >= props_count) && props_count) {
27774 copied = 0;
27775 - prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
27776 - prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
27777 + prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
27778 + prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
27779 for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
27780 if (connector->property_ids[i] != 0) {
27781 if (put_user(connector->property_ids[i],
27782 @@ -1417,7 +1417,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
27783
27784 if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
27785 copied = 0;
27786 - encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
27787 + encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
27788 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
27789 if (connector->encoder_ids[i] != 0) {
27790 if (put_user(connector->encoder_ids[i],
27791 @@ -1576,7 +1576,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
27792 }
27793
27794 for (i = 0; i < crtc_req->count_connectors; i++) {
27795 - set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
27796 + set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
27797 if (get_user(out_id, &set_connectors_ptr[i])) {
27798 ret = -EFAULT;
27799 goto out;
27800 @@ -1857,7 +1857,7 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
27801 fb = obj_to_fb(obj);
27802
27803 num_clips = r->num_clips;
27804 - clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr;
27805 + clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr;
27806
27807 if (!num_clips != !clips_ptr) {
27808 ret = -EINVAL;
27809 @@ -2283,7 +2283,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
27810 out_resp->flags = property->flags;
27811
27812 if ((out_resp->count_values >= value_count) && value_count) {
27813 - values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
27814 + values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
27815 for (i = 0; i < value_count; i++) {
27816 if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
27817 ret = -EFAULT;
27818 @@ -2296,7 +2296,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
27819 if (property->flags & DRM_MODE_PROP_ENUM) {
27820 if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
27821 copied = 0;
27822 - enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
27823 + enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
27824 list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
27825
27826 if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
27827 @@ -2319,7 +2319,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
27828 if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
27829 copied = 0;
27830 blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
27831 - blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
27832 + blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
27833
27834 list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
27835 if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
27836 @@ -2380,7 +2380,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
27837 struct drm_mode_get_blob *out_resp = data;
27838 struct drm_property_blob *blob;
27839 int ret = 0;
27840 - void *blob_ptr;
27841 + void __user *blob_ptr;
27842
27843 if (!drm_core_check_feature(dev, DRIVER_MODESET))
27844 return -EINVAL;
27845 @@ -2394,7 +2394,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
27846 blob = obj_to_blob(obj);
27847
27848 if (out_resp->length == blob->length) {
27849 - blob_ptr = (void *)(unsigned long)out_resp->data;
27850 + blob_ptr = (void __user *)(unsigned long)out_resp->data;
27851 if (copy_to_user(blob_ptr, blob->data, blob->length)){
27852 ret = -EFAULT;
27853 goto done;
27854 diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
27855 index d2619d7..bd6bd00 100644
27856 --- a/drivers/gpu/drm/drm_crtc_helper.c
27857 +++ b/drivers/gpu/drm/drm_crtc_helper.c
27858 @@ -279,7 +279,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
27859 struct drm_crtc *tmp;
27860 int crtc_mask = 1;
27861
27862 - WARN(!crtc, "checking null crtc?\n");
27863 + BUG_ON(!crtc);
27864
27865 dev = crtc->dev;
27866
27867 diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
27868 index 40c187c..5746164 100644
27869 --- a/drivers/gpu/drm/drm_drv.c
27870 +++ b/drivers/gpu/drm/drm_drv.c
27871 @@ -308,7 +308,7 @@ module_exit(drm_core_exit);
27872 /**
27873 * Copy and IOCTL return string to user space
27874 */
27875 -static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
27876 +static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
27877 {
27878 int len;
27879
27880 @@ -387,7 +387,7 @@ long drm_ioctl(struct file *filp,
27881
27882 dev = file_priv->minor->dev;
27883 atomic_inc(&dev->ioctl_count);
27884 - atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
27885 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
27886 ++file_priv->ioctl_count;
27887
27888 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
27889 diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
27890 index 828bf65..cdaa0e9 100644
27891 --- a/drivers/gpu/drm/drm_fops.c
27892 +++ b/drivers/gpu/drm/drm_fops.c
27893 @@ -71,7 +71,7 @@ static int drm_setup(struct drm_device * dev)
27894 }
27895
27896 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
27897 - atomic_set(&dev->counts[i], 0);
27898 + atomic_set_unchecked(&dev->counts[i], 0);
27899
27900 dev->sigdata.lock = NULL;
27901
27902 @@ -135,8 +135,8 @@ int drm_open(struct inode *inode, struct file *filp)
27903
27904 retcode = drm_open_helper(inode, filp, dev);
27905 if (!retcode) {
27906 - atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
27907 - if (!dev->open_count++)
27908 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
27909 + if (local_inc_return(&dev->open_count) == 1)
27910 retcode = drm_setup(dev);
27911 }
27912 if (!retcode) {
27913 @@ -473,7 +473,7 @@ int drm_release(struct inode *inode, struct file *filp)
27914
27915 mutex_lock(&drm_global_mutex);
27916
27917 - DRM_DEBUG("open_count = %d\n", dev->open_count);
27918 + DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
27919
27920 if (dev->driver->preclose)
27921 dev->driver->preclose(dev, file_priv);
27922 @@ -485,7 +485,7 @@ int drm_release(struct inode *inode, struct file *filp)
27923 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
27924 task_pid_nr(current),
27925 (long)old_encode_dev(file_priv->minor->device),
27926 - dev->open_count);
27927 + local_read(&dev->open_count));
27928
27929 /* Release any auth tokens that might point to this file_priv,
27930 (do that under the drm_global_mutex) */
27931 @@ -571,8 +571,8 @@ int drm_release(struct inode *inode, struct file *filp)
27932 * End inline drm_release
27933 */
27934
27935 - atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
27936 - if (!--dev->open_count) {
27937 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
27938 + if (local_dec_and_test(&dev->open_count)) {
27939 if (atomic_read(&dev->ioctl_count)) {
27940 DRM_ERROR("Device busy: %d\n",
27941 atomic_read(&dev->ioctl_count));
27942 diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
27943 index c87dc96..326055d 100644
27944 --- a/drivers/gpu/drm/drm_global.c
27945 +++ b/drivers/gpu/drm/drm_global.c
27946 @@ -36,7 +36,7 @@
27947 struct drm_global_item {
27948 struct mutex mutex;
27949 void *object;
27950 - int refcount;
27951 + atomic_t refcount;
27952 };
27953
27954 static struct drm_global_item glob[DRM_GLOBAL_NUM];
27955 @@ -49,7 +49,7 @@ void drm_global_init(void)
27956 struct drm_global_item *item = &glob[i];
27957 mutex_init(&item->mutex);
27958 item->object = NULL;
27959 - item->refcount = 0;
27960 + atomic_set(&item->refcount, 0);
27961 }
27962 }
27963
27964 @@ -59,7 +59,7 @@ void drm_global_release(void)
27965 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
27966 struct drm_global_item *item = &glob[i];
27967 BUG_ON(item->object != NULL);
27968 - BUG_ON(item->refcount != 0);
27969 + BUG_ON(atomic_read(&item->refcount) != 0);
27970 }
27971 }
27972
27973 @@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
27974 void *object;
27975
27976 mutex_lock(&item->mutex);
27977 - if (item->refcount == 0) {
27978 + if (atomic_read(&item->refcount) == 0) {
27979 item->object = kzalloc(ref->size, GFP_KERNEL);
27980 if (unlikely(item->object == NULL)) {
27981 ret = -ENOMEM;
27982 @@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
27983 goto out_err;
27984
27985 }
27986 - ++item->refcount;
27987 + atomic_inc(&item->refcount);
27988 ref->object = item->object;
27989 object = item->object;
27990 mutex_unlock(&item->mutex);
27991 @@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
27992 struct drm_global_item *item = &glob[ref->global_type];
27993
27994 mutex_lock(&item->mutex);
27995 - BUG_ON(item->refcount == 0);
27996 + BUG_ON(atomic_read(&item->refcount) == 0);
27997 BUG_ON(ref->object != item->object);
27998 - if (--item->refcount == 0) {
27999 + if (atomic_dec_and_test(&item->refcount)) {
28000 ref->release(ref);
28001 item->object = NULL;
28002 }
28003 diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
28004 index ab1162d..42587b2 100644
28005 --- a/drivers/gpu/drm/drm_info.c
28006 +++ b/drivers/gpu/drm/drm_info.c
28007 @@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
28008 struct drm_local_map *map;
28009 struct drm_map_list *r_list;
28010
28011 - /* Hardcoded from _DRM_FRAME_BUFFER,
28012 - _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
28013 - _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
28014 - const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
28015 + static const char * const types[] = {
28016 + [_DRM_FRAME_BUFFER] = "FB",
28017 + [_DRM_REGISTERS] = "REG",
28018 + [_DRM_SHM] = "SHM",
28019 + [_DRM_AGP] = "AGP",
28020 + [_DRM_SCATTER_GATHER] = "SG",
28021 + [_DRM_CONSISTENT] = "PCI",
28022 + [_DRM_GEM] = "GEM" };
28023 const char *type;
28024 int i;
28025
28026 @@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
28027 map = r_list->map;
28028 if (!map)
28029 continue;
28030 - if (map->type < 0 || map->type > 5)
28031 + if (map->type >= ARRAY_SIZE(types))
28032 type = "??";
28033 else
28034 type = types[map->type];
28035 @@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, void *data)
28036 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
28037 vma->vm_flags & VM_LOCKED ? 'l' : '-',
28038 vma->vm_flags & VM_IO ? 'i' : '-',
28039 +#ifdef CONFIG_GRKERNSEC_HIDESYM
28040 + 0);
28041 +#else
28042 vma->vm_pgoff);
28043 +#endif
28044
28045 #if defined(__i386__)
28046 pgprot = pgprot_val(vma->vm_page_prot);
28047 diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
28048 index ddd70db..40321e6 100644
28049 --- a/drivers/gpu/drm/drm_ioc32.c
28050 +++ b/drivers/gpu/drm/drm_ioc32.c
28051 @@ -456,7 +456,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
28052 request = compat_alloc_user_space(nbytes);
28053 if (!access_ok(VERIFY_WRITE, request, nbytes))
28054 return -EFAULT;
28055 - list = (struct drm_buf_desc *) (request + 1);
28056 + list = (struct drm_buf_desc __user *) (request + 1);
28057
28058 if (__put_user(count, &request->count)
28059 || __put_user(list, &request->list))
28060 @@ -517,7 +517,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
28061 request = compat_alloc_user_space(nbytes);
28062 if (!access_ok(VERIFY_WRITE, request, nbytes))
28063 return -EFAULT;
28064 - list = (struct drm_buf_pub *) (request + 1);
28065 + list = (struct drm_buf_pub __user *) (request + 1);
28066
28067 if (__put_user(count, &request->count)
28068 || __put_user(list, &request->list))
28069 diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
28070 index 904d7e9..ab88581 100644
28071 --- a/drivers/gpu/drm/drm_ioctl.c
28072 +++ b/drivers/gpu/drm/drm_ioctl.c
28073 @@ -256,7 +256,7 @@ int drm_getstats(struct drm_device *dev, void *data,
28074 stats->data[i].value =
28075 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
28076 else
28077 - stats->data[i].value = atomic_read(&dev->counts[i]);
28078 + stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
28079 stats->data[i].type = dev->types[i];
28080 }
28081
28082 diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
28083 index 632ae24..244cf4a 100644
28084 --- a/drivers/gpu/drm/drm_lock.c
28085 +++ b/drivers/gpu/drm/drm_lock.c
28086 @@ -89,7 +89,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
28087 if (drm_lock_take(&master->lock, lock->context)) {
28088 master->lock.file_priv = file_priv;
28089 master->lock.lock_time = jiffies;
28090 - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
28091 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
28092 break; /* Got lock */
28093 }
28094
28095 @@ -160,7 +160,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
28096 return -EINVAL;
28097 }
28098
28099 - atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
28100 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
28101
28102 if (drm_lock_free(&master->lock, lock->context)) {
28103 /* FIXME: Should really bail out here. */
28104 diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
28105 index 8f371e8..9f85d52 100644
28106 --- a/drivers/gpu/drm/i810/i810_dma.c
28107 +++ b/drivers/gpu/drm/i810/i810_dma.c
28108 @@ -950,8 +950,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
28109 dma->buflist[vertex->idx],
28110 vertex->discard, vertex->used);
28111
28112 - atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
28113 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
28114 + atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
28115 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
28116 sarea_priv->last_enqueue = dev_priv->counter - 1;
28117 sarea_priv->last_dispatch = (int)hw_status[5];
28118
28119 @@ -1111,8 +1111,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
28120 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
28121 mc->last_render);
28122
28123 - atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
28124 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
28125 + atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
28126 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
28127 sarea_priv->last_enqueue = dev_priv->counter - 1;
28128 sarea_priv->last_dispatch = (int)hw_status[5];
28129
28130 diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
28131 index c9339f4..f5e1b9d 100644
28132 --- a/drivers/gpu/drm/i810/i810_drv.h
28133 +++ b/drivers/gpu/drm/i810/i810_drv.h
28134 @@ -108,8 +108,8 @@ typedef struct drm_i810_private {
28135 int page_flipping;
28136
28137 wait_queue_head_t irq_queue;
28138 - atomic_t irq_received;
28139 - atomic_t irq_emitted;
28140 + atomic_unchecked_t irq_received;
28141 + atomic_unchecked_t irq_emitted;
28142
28143 int front_offset;
28144 } drm_i810_private_t;
28145 diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
28146 index b2e3c97..58cf079 100644
28147 --- a/drivers/gpu/drm/i915/i915_debugfs.c
28148 +++ b/drivers/gpu/drm/i915/i915_debugfs.c
28149 @@ -499,7 +499,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
28150 I915_READ(GTIMR));
28151 }
28152 seq_printf(m, "Interrupts received: %d\n",
28153 - atomic_read(&dev_priv->irq_received));
28154 + atomic_read_unchecked(&dev_priv->irq_received));
28155 for (i = 0; i < I915_NUM_RINGS; i++) {
28156 if (IS_GEN6(dev) || IS_GEN7(dev)) {
28157 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
28158 @@ -1232,7 +1232,7 @@ static int i915_opregion(struct seq_file *m, void *unused)
28159 return ret;
28160
28161 if (opregion->header)
28162 - seq_write(m, opregion->header, OPREGION_SIZE);
28163 + seq_write(m, (const void __force_kernel *)opregion->header, OPREGION_SIZE);
28164
28165 mutex_unlock(&dev->struct_mutex);
28166
28167 diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
28168 index c4da951..3c59c5c 100644
28169 --- a/drivers/gpu/drm/i915/i915_dma.c
28170 +++ b/drivers/gpu/drm/i915/i915_dma.c
28171 @@ -1172,7 +1172,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
28172 bool can_switch;
28173
28174 spin_lock(&dev->count_lock);
28175 - can_switch = (dev->open_count == 0);
28176 + can_switch = (local_read(&dev->open_count) == 0);
28177 spin_unlock(&dev->count_lock);
28178 return can_switch;
28179 }
28180 diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
28181 index ae294a0..1755461 100644
28182 --- a/drivers/gpu/drm/i915/i915_drv.h
28183 +++ b/drivers/gpu/drm/i915/i915_drv.h
28184 @@ -229,7 +229,7 @@ struct drm_i915_display_funcs {
28185 /* render clock increase/decrease */
28186 /* display clock increase/decrease */
28187 /* pll clock increase/decrease */
28188 -};
28189 +} __no_const;
28190
28191 struct intel_device_info {
28192 u8 gen;
28193 @@ -318,7 +318,7 @@ typedef struct drm_i915_private {
28194 int current_page;
28195 int page_flipping;
28196
28197 - atomic_t irq_received;
28198 + atomic_unchecked_t irq_received;
28199
28200 /* protects the irq masks */
28201 spinlock_t irq_lock;
28202 @@ -893,7 +893,7 @@ struct drm_i915_gem_object {
28203 * will be page flipped away on the next vblank. When it
28204 * reaches 0, dev_priv->pending_flip_queue will be woken up.
28205 */
28206 - atomic_t pending_flip;
28207 + atomic_unchecked_t pending_flip;
28208 };
28209
28210 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
28211 @@ -1273,7 +1273,7 @@ extern int intel_setup_gmbus(struct drm_device *dev);
28212 extern void intel_teardown_gmbus(struct drm_device *dev);
28213 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
28214 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
28215 -extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
28216 +static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
28217 {
28218 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
28219 }
28220 diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
28221 index b9da890..cad1d98 100644
28222 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
28223 +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
28224 @@ -189,7 +189,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
28225 i915_gem_clflush_object(obj);
28226
28227 if (obj->base.pending_write_domain)
28228 - cd->flips |= atomic_read(&obj->pending_flip);
28229 + cd->flips |= atomic_read_unchecked(&obj->pending_flip);
28230
28231 /* The actual obj->write_domain will be updated with
28232 * pending_write_domain after we emit the accumulated flush for all
28233 @@ -882,9 +882,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
28234
28235 static int
28236 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
28237 - int count)
28238 + unsigned int count)
28239 {
28240 - int i;
28241 + unsigned int i;
28242
28243 for (i = 0; i < count; i++) {
28244 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
28245 diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
28246 index d47a53b..61154c2 100644
28247 --- a/drivers/gpu/drm/i915/i915_irq.c
28248 +++ b/drivers/gpu/drm/i915/i915_irq.c
28249 @@ -475,7 +475,7 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
28250 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
28251 struct drm_i915_master_private *master_priv;
28252
28253 - atomic_inc(&dev_priv->irq_received);
28254 + atomic_inc_unchecked(&dev_priv->irq_received);
28255
28256 /* disable master interrupt before clearing iir */
28257 de_ier = I915_READ(DEIER);
28258 @@ -566,7 +566,7 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
28259 struct drm_i915_master_private *master_priv;
28260 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
28261
28262 - atomic_inc(&dev_priv->irq_received);
28263 + atomic_inc_unchecked(&dev_priv->irq_received);
28264
28265 if (IS_GEN6(dev))
28266 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
28267 @@ -1231,7 +1231,7 @@ static irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
28268 int ret = IRQ_NONE, pipe;
28269 bool blc_event = false;
28270
28271 - atomic_inc(&dev_priv->irq_received);
28272 + atomic_inc_unchecked(&dev_priv->irq_received);
28273
28274 iir = I915_READ(IIR);
28275
28276 @@ -1750,7 +1750,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
28277 {
28278 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
28279
28280 - atomic_set(&dev_priv->irq_received, 0);
28281 + atomic_set_unchecked(&dev_priv->irq_received, 0);
28282
28283 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
28284 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
28285 @@ -1938,7 +1938,7 @@ static void i915_driver_irq_preinstall(struct drm_device * dev)
28286 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
28287 int pipe;
28288
28289 - atomic_set(&dev_priv->irq_received, 0);
28290 + atomic_set_unchecked(&dev_priv->irq_received, 0);
28291
28292 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
28293 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
28294 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
28295 index daa5743..c0757a9 100644
28296 --- a/drivers/gpu/drm/i915/intel_display.c
28297 +++ b/drivers/gpu/drm/i915/intel_display.c
28298 @@ -2230,7 +2230,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
28299
28300 wait_event(dev_priv->pending_flip_queue,
28301 atomic_read(&dev_priv->mm.wedged) ||
28302 - atomic_read(&obj->pending_flip) == 0);
28303 + atomic_read_unchecked(&obj->pending_flip) == 0);
28304
28305 /* Big Hammer, we also need to ensure that any pending
28306 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
28307 @@ -2851,7 +2851,7 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
28308 obj = to_intel_framebuffer(crtc->fb)->obj;
28309 dev_priv = crtc->dev->dev_private;
28310 wait_event(dev_priv->pending_flip_queue,
28311 - atomic_read(&obj->pending_flip) == 0);
28312 + atomic_read_unchecked(&obj->pending_flip) == 0);
28313 }
28314
28315 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
28316 @@ -6952,7 +6952,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
28317
28318 atomic_clear_mask(1 << intel_crtc->plane,
28319 &obj->pending_flip.counter);
28320 - if (atomic_read(&obj->pending_flip) == 0)
28321 + if (atomic_read_unchecked(&obj->pending_flip) == 0)
28322 wake_up(&dev_priv->pending_flip_queue);
28323
28324 schedule_work(&work->work);
28325 @@ -7242,7 +7242,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
28326 /* Block clients from rendering to the new back buffer until
28327 * the flip occurs and the object is no longer visible.
28328 */
28329 - atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
28330 + atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
28331
28332 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
28333 if (ret)
28334 @@ -7256,7 +7256,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
28335 return 0;
28336
28337 cleanup_pending:
28338 - atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
28339 + atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
28340 drm_gem_object_unreference(&work->old_fb_obj->base);
28341 drm_gem_object_unreference(&obj->base);
28342 mutex_unlock(&dev->struct_mutex);
28343 diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
28344 index 54558a0..2d97005 100644
28345 --- a/drivers/gpu/drm/mga/mga_drv.h
28346 +++ b/drivers/gpu/drm/mga/mga_drv.h
28347 @@ -120,9 +120,9 @@ typedef struct drm_mga_private {
28348 u32 clear_cmd;
28349 u32 maccess;
28350
28351 - atomic_t vbl_received; /**< Number of vblanks received. */
28352 + atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
28353 wait_queue_head_t fence_queue;
28354 - atomic_t last_fence_retired;
28355 + atomic_unchecked_t last_fence_retired;
28356 u32 next_fence_to_post;
28357
28358 unsigned int fb_cpp;
28359 diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
28360 index 2581202..f230a8d9 100644
28361 --- a/drivers/gpu/drm/mga/mga_irq.c
28362 +++ b/drivers/gpu/drm/mga/mga_irq.c
28363 @@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
28364 if (crtc != 0)
28365 return 0;
28366
28367 - return atomic_read(&dev_priv->vbl_received);
28368 + return atomic_read_unchecked(&dev_priv->vbl_received);
28369 }
28370
28371
28372 @@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
28373 /* VBLANK interrupt */
28374 if (status & MGA_VLINEPEN) {
28375 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
28376 - atomic_inc(&dev_priv->vbl_received);
28377 + atomic_inc_unchecked(&dev_priv->vbl_received);
28378 drm_handle_vblank(dev, 0);
28379 handled = 1;
28380 }
28381 @@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
28382 if ((prim_start & ~0x03) != (prim_end & ~0x03))
28383 MGA_WRITE(MGA_PRIMEND, prim_end);
28384
28385 - atomic_inc(&dev_priv->last_fence_retired);
28386 + atomic_inc_unchecked(&dev_priv->last_fence_retired);
28387 DRM_WAKEUP(&dev_priv->fence_queue);
28388 handled = 1;
28389 }
28390 @@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
28391 * using fences.
28392 */
28393 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
28394 - (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
28395 + (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
28396 - *sequence) <= (1 << 23)));
28397
28398 *sequence = cur_fence;
28399 diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
28400 index 5fc201b..7b032b9 100644
28401 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c
28402 +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
28403 @@ -201,7 +201,7 @@ struct methods {
28404 const char desc[8];
28405 void (*loadbios)(struct drm_device *, uint8_t *);
28406 const bool rw;
28407 -};
28408 +} __do_const;
28409
28410 static struct methods shadow_methods[] = {
28411 { "PRAMIN", load_vbios_pramin, true },
28412 @@ -5474,7 +5474,7 @@ parse_bit_U_tbl_entry(struct drm_device *dev, struct nvbios *bios,
28413 struct bit_table {
28414 const char id;
28415 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
28416 -};
28417 +} __no_const;
28418
28419 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
28420
28421 diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
28422 index 4c0be3a..5757582 100644
28423 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h
28424 +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
28425 @@ -238,7 +238,7 @@ struct nouveau_channel {
28426 struct list_head pending;
28427 uint32_t sequence;
28428 uint32_t sequence_ack;
28429 - atomic_t last_sequence_irq;
28430 + atomic_unchecked_t last_sequence_irq;
28431 struct nouveau_vma vma;
28432 } fence;
28433
28434 @@ -319,7 +319,7 @@ struct nouveau_exec_engine {
28435 u32 handle, u16 class);
28436 void (*set_tile_region)(struct drm_device *dev, int i);
28437 void (*tlb_flush)(struct drm_device *, int engine);
28438 -};
28439 +} __no_const;
28440
28441 struct nouveau_instmem_engine {
28442 void *priv;
28443 @@ -341,13 +341,13 @@ struct nouveau_instmem_engine {
28444 struct nouveau_mc_engine {
28445 int (*init)(struct drm_device *dev);
28446 void (*takedown)(struct drm_device *dev);
28447 -};
28448 +} __no_const;
28449
28450 struct nouveau_timer_engine {
28451 int (*init)(struct drm_device *dev);
28452 void (*takedown)(struct drm_device *dev);
28453 uint64_t (*read)(struct drm_device *dev);
28454 -};
28455 +} __no_const;
28456
28457 struct nouveau_fb_engine {
28458 int num_tiles;
28459 @@ -558,7 +558,7 @@ struct nouveau_vram_engine {
28460 void (*put)(struct drm_device *, struct nouveau_mem **);
28461
28462 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
28463 -};
28464 +} __no_const;
28465
28466 struct nouveau_engine {
28467 struct nouveau_instmem_engine instmem;
28468 @@ -706,7 +706,7 @@ struct drm_nouveau_private {
28469 struct drm_global_reference mem_global_ref;
28470 struct ttm_bo_global_ref bo_global_ref;
28471 struct ttm_bo_device bdev;
28472 - atomic_t validate_sequence;
28473 + atomic_unchecked_t validate_sequence;
28474 } ttm;
28475
28476 struct {
28477 diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
28478 index 2f6daae..c9d7b9e 100644
28479 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c
28480 +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
28481 @@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_channel *chan)
28482 if (USE_REFCNT(dev))
28483 sequence = nvchan_rd32(chan, 0x48);
28484 else
28485 - sequence = atomic_read(&chan->fence.last_sequence_irq);
28486 + sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
28487
28488 if (chan->fence.sequence_ack == sequence)
28489 goto out;
28490 @@ -539,7 +539,7 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
28491 return ret;
28492 }
28493
28494 - atomic_set(&chan->fence.last_sequence_irq, 0);
28495 + atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
28496 return 0;
28497 }
28498
28499 diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
28500 index 7ce3fde..cb3ea04 100644
28501 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c
28502 +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
28503 @@ -314,7 +314,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
28504 int trycnt = 0;
28505 int ret, i;
28506
28507 - sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
28508 + sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
28509 retry:
28510 if (++trycnt > 100000) {
28511 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
28512 diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
28513 index d8831ab..0ba8356 100644
28514 --- a/drivers/gpu/drm/nouveau/nouveau_state.c
28515 +++ b/drivers/gpu/drm/nouveau/nouveau_state.c
28516 @@ -542,7 +542,7 @@ static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev)
28517 bool can_switch;
28518
28519 spin_lock(&dev->count_lock);
28520 - can_switch = (dev->open_count == 0);
28521 + can_switch = (local_read(&dev->open_count) == 0);
28522 spin_unlock(&dev->count_lock);
28523 return can_switch;
28524 }
28525 diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
28526 index dbdea8e..cd6eeeb 100644
28527 --- a/drivers/gpu/drm/nouveau/nv04_graph.c
28528 +++ b/drivers/gpu/drm/nouveau/nv04_graph.c
28529 @@ -554,7 +554,7 @@ static int
28530 nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
28531 u32 class, u32 mthd, u32 data)
28532 {
28533 - atomic_set(&chan->fence.last_sequence_irq, data);
28534 + atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
28535 return 0;
28536 }
28537
28538 diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
28539 index bcac90b..53bfc76 100644
28540 --- a/drivers/gpu/drm/r128/r128_cce.c
28541 +++ b/drivers/gpu/drm/r128/r128_cce.c
28542 @@ -378,7 +378,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
28543
28544 /* GH: Simple idle check.
28545 */
28546 - atomic_set(&dev_priv->idle_count, 0);
28547 + atomic_set_unchecked(&dev_priv->idle_count, 0);
28548
28549 /* We don't support anything other than bus-mastering ring mode,
28550 * but the ring can be in either AGP or PCI space for the ring
28551 diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
28552 index 930c71b..499aded 100644
28553 --- a/drivers/gpu/drm/r128/r128_drv.h
28554 +++ b/drivers/gpu/drm/r128/r128_drv.h
28555 @@ -90,14 +90,14 @@ typedef struct drm_r128_private {
28556 int is_pci;
28557 unsigned long cce_buffers_offset;
28558
28559 - atomic_t idle_count;
28560 + atomic_unchecked_t idle_count;
28561
28562 int page_flipping;
28563 int current_page;
28564 u32 crtc_offset;
28565 u32 crtc_offset_cntl;
28566
28567 - atomic_t vbl_received;
28568 + atomic_unchecked_t vbl_received;
28569
28570 u32 color_fmt;
28571 unsigned int front_offset;
28572 diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
28573 index 429d5a0..7e899ed 100644
28574 --- a/drivers/gpu/drm/r128/r128_irq.c
28575 +++ b/drivers/gpu/drm/r128/r128_irq.c
28576 @@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
28577 if (crtc != 0)
28578 return 0;
28579
28580 - return atomic_read(&dev_priv->vbl_received);
28581 + return atomic_read_unchecked(&dev_priv->vbl_received);
28582 }
28583
28584 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
28585 @@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
28586 /* VBLANK interrupt */
28587 if (status & R128_CRTC_VBLANK_INT) {
28588 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
28589 - atomic_inc(&dev_priv->vbl_received);
28590 + atomic_inc_unchecked(&dev_priv->vbl_received);
28591 drm_handle_vblank(dev, 0);
28592 return IRQ_HANDLED;
28593 }
28594 diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
28595 index a9e33ce..09edd4b 100644
28596 --- a/drivers/gpu/drm/r128/r128_state.c
28597 +++ b/drivers/gpu/drm/r128/r128_state.c
28598 @@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
28599
28600 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
28601 {
28602 - if (atomic_read(&dev_priv->idle_count) == 0)
28603 + if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
28604 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
28605 else
28606 - atomic_set(&dev_priv->idle_count, 0);
28607 + atomic_set_unchecked(&dev_priv->idle_count, 0);
28608 }
28609
28610 #endif
28611 diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
28612 index 5a82b6b..9e69c73 100644
28613 --- a/drivers/gpu/drm/radeon/mkregtable.c
28614 +++ b/drivers/gpu/drm/radeon/mkregtable.c
28615 @@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
28616 regex_t mask_rex;
28617 regmatch_t match[4];
28618 char buf[1024];
28619 - size_t end;
28620 + long end;
28621 int len;
28622 int done = 0;
28623 int r;
28624 unsigned o;
28625 struct offset *offset;
28626 char last_reg_s[10];
28627 - int last_reg;
28628 + unsigned long last_reg;
28629
28630 if (regcomp
28631 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
28632 diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
28633 index 8227e76..ce0b195 100644
28634 --- a/drivers/gpu/drm/radeon/radeon.h
28635 +++ b/drivers/gpu/drm/radeon/radeon.h
28636 @@ -192,7 +192,7 @@ extern int sumo_get_temp(struct radeon_device *rdev);
28637 */
28638 struct radeon_fence_driver {
28639 uint32_t scratch_reg;
28640 - atomic_t seq;
28641 + atomic_unchecked_t seq;
28642 uint32_t last_seq;
28643 unsigned long last_jiffies;
28644 unsigned long last_timeout;
28645 @@ -530,7 +530,7 @@ struct r600_blit_cp_primitives {
28646 int x2, int y2);
28647 void (*draw_auto)(struct radeon_device *rdev);
28648 void (*set_default_state)(struct radeon_device *rdev);
28649 -};
28650 +} __no_const;
28651
28652 struct r600_blit {
28653 struct mutex mutex;
28654 @@ -954,7 +954,7 @@ struct radeon_asic {
28655 void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
28656 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
28657 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
28658 -};
28659 +} __no_const;
28660
28661 /*
28662 * Asic structures
28663 diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
28664 index 9231564..78b00fd 100644
28665 --- a/drivers/gpu/drm/radeon/radeon_device.c
28666 +++ b/drivers/gpu/drm/radeon/radeon_device.c
28667 @@ -687,7 +687,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
28668 bool can_switch;
28669
28670 spin_lock(&dev->count_lock);
28671 - can_switch = (dev->open_count == 0);
28672 + can_switch = (local_read(&dev->open_count) == 0);
28673 spin_unlock(&dev->count_lock);
28674 return can_switch;
28675 }
28676 diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
28677 index a1b59ca..86f2d44 100644
28678 --- a/drivers/gpu/drm/radeon/radeon_drv.h
28679 +++ b/drivers/gpu/drm/radeon/radeon_drv.h
28680 @@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
28681
28682 /* SW interrupt */
28683 wait_queue_head_t swi_queue;
28684 - atomic_t swi_emitted;
28685 + atomic_unchecked_t swi_emitted;
28686 int vblank_crtc;
28687 uint32_t irq_enable_reg;
28688 uint32_t r500_disp_irq_reg;
28689 diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
28690 index 76ec0e9..6feb1a3 100644
28691 --- a/drivers/gpu/drm/radeon/radeon_fence.c
28692 +++ b/drivers/gpu/drm/radeon/radeon_fence.c
28693 @@ -78,7 +78,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
28694 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
28695 return 0;
28696 }
28697 - fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
28698 + fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
28699 if (!rdev->cp.ready)
28700 /* FIXME: cp is not running assume everythings is done right
28701 * away
28702 @@ -373,7 +373,7 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
28703 return r;
28704 }
28705 radeon_fence_write(rdev, 0);
28706 - atomic_set(&rdev->fence_drv.seq, 0);
28707 + atomic_set_unchecked(&rdev->fence_drv.seq, 0);
28708 INIT_LIST_HEAD(&rdev->fence_drv.created);
28709 INIT_LIST_HEAD(&rdev->fence_drv.emited);
28710 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
28711 diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
28712 index 48b7cea..342236f 100644
28713 --- a/drivers/gpu/drm/radeon/radeon_ioc32.c
28714 +++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
28715 @@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
28716 request = compat_alloc_user_space(sizeof(*request));
28717 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
28718 || __put_user(req32.param, &request->param)
28719 - || __put_user((void __user *)(unsigned long)req32.value,
28720 + || __put_user((unsigned long)req32.value,
28721 &request->value))
28722 return -EFAULT;
28723
28724 diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
28725 index 00da384..32f972d 100644
28726 --- a/drivers/gpu/drm/radeon/radeon_irq.c
28727 +++ b/drivers/gpu/drm/radeon/radeon_irq.c
28728 @@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_device * dev)
28729 unsigned int ret;
28730 RING_LOCALS;
28731
28732 - atomic_inc(&dev_priv->swi_emitted);
28733 - ret = atomic_read(&dev_priv->swi_emitted);
28734 + atomic_inc_unchecked(&dev_priv->swi_emitted);
28735 + ret = atomic_read_unchecked(&dev_priv->swi_emitted);
28736
28737 BEGIN_RING(4);
28738 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
28739 @@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
28740 drm_radeon_private_t *dev_priv =
28741 (drm_radeon_private_t *) dev->dev_private;
28742
28743 - atomic_set(&dev_priv->swi_emitted, 0);
28744 + atomic_set_unchecked(&dev_priv->swi_emitted, 0);
28745 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
28746
28747 dev->max_vblank_count = 0x001fffff;
28748 diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
28749 index e8422ae..d22d4a8 100644
28750 --- a/drivers/gpu/drm/radeon/radeon_state.c
28751 +++ b/drivers/gpu/drm/radeon/radeon_state.c
28752 @@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
28753 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
28754 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
28755
28756 - if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
28757 + if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
28758 sarea_priv->nbox * sizeof(depth_boxes[0])))
28759 return -EFAULT;
28760
28761 @@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
28762 {
28763 drm_radeon_private_t *dev_priv = dev->dev_private;
28764 drm_radeon_getparam_t *param = data;
28765 - int value;
28766 + int value = 0;
28767
28768 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
28769
28770 diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
28771 index 0b5468b..9c4b308 100644
28772 --- a/drivers/gpu/drm/radeon/radeon_ttm.c
28773 +++ b/drivers/gpu/drm/radeon/radeon_ttm.c
28774 @@ -672,8 +672,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
28775 }
28776 if (unlikely(ttm_vm_ops == NULL)) {
28777 ttm_vm_ops = vma->vm_ops;
28778 - radeon_ttm_vm_ops = *ttm_vm_ops;
28779 - radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
28780 + pax_open_kernel();
28781 + memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
28782 + *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
28783 + pax_close_kernel();
28784 }
28785 vma->vm_ops = &radeon_ttm_vm_ops;
28786 return 0;
28787 diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
28788 index a9049ed..501f284 100644
28789 --- a/drivers/gpu/drm/radeon/rs690.c
28790 +++ b/drivers/gpu/drm/radeon/rs690.c
28791 @@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
28792 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
28793 rdev->pm.sideport_bandwidth.full)
28794 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
28795 - read_delay_latency.full = dfixed_const(370 * 800 * 1000);
28796 + read_delay_latency.full = dfixed_const(800 * 1000);
28797 read_delay_latency.full = dfixed_div(read_delay_latency,
28798 rdev->pm.igp_sideport_mclk);
28799 + a.full = dfixed_const(370);
28800 + read_delay_latency.full = dfixed_mul(read_delay_latency, a);
28801 } else {
28802 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
28803 rdev->pm.k8_bandwidth.full)
28804 diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
28805 index 727e93d..1565650 100644
28806 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
28807 +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
28808 @@ -398,9 +398,9 @@ static int ttm_pool_get_num_unused_pages(void)
28809 static int ttm_pool_mm_shrink(struct shrinker *shrink,
28810 struct shrink_control *sc)
28811 {
28812 - static atomic_t start_pool = ATOMIC_INIT(0);
28813 + static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
28814 unsigned i;
28815 - unsigned pool_offset = atomic_add_return(1, &start_pool);
28816 + unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
28817 struct ttm_page_pool *pool;
28818 int shrink_pages = sc->nr_to_scan;
28819
28820 diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
28821 index 9cf87d9..2000b7d 100644
28822 --- a/drivers/gpu/drm/via/via_drv.h
28823 +++ b/drivers/gpu/drm/via/via_drv.h
28824 @@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
28825 typedef uint32_t maskarray_t[5];
28826
28827 typedef struct drm_via_irq {
28828 - atomic_t irq_received;
28829 + atomic_unchecked_t irq_received;
28830 uint32_t pending_mask;
28831 uint32_t enable_mask;
28832 wait_queue_head_t irq_queue;
28833 @@ -75,7 +75,7 @@ typedef struct drm_via_private {
28834 struct timeval last_vblank;
28835 int last_vblank_valid;
28836 unsigned usec_per_vblank;
28837 - atomic_t vbl_received;
28838 + atomic_unchecked_t vbl_received;
28839 drm_via_state_t hc_state;
28840 char pci_buf[VIA_PCI_BUF_SIZE];
28841 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
28842 diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
28843 index d391f48..10c8ca3 100644
28844 --- a/drivers/gpu/drm/via/via_irq.c
28845 +++ b/drivers/gpu/drm/via/via_irq.c
28846 @@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
28847 if (crtc != 0)
28848 return 0;
28849
28850 - return atomic_read(&dev_priv->vbl_received);
28851 + return atomic_read_unchecked(&dev_priv->vbl_received);
28852 }
28853
28854 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
28855 @@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
28856
28857 status = VIA_READ(VIA_REG_INTERRUPT);
28858 if (status & VIA_IRQ_VBLANK_PENDING) {
28859 - atomic_inc(&dev_priv->vbl_received);
28860 - if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
28861 + atomic_inc_unchecked(&dev_priv->vbl_received);
28862 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
28863 do_gettimeofday(&cur_vblank);
28864 if (dev_priv->last_vblank_valid) {
28865 dev_priv->usec_per_vblank =
28866 @@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
28867 dev_priv->last_vblank = cur_vblank;
28868 dev_priv->last_vblank_valid = 1;
28869 }
28870 - if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
28871 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
28872 DRM_DEBUG("US per vblank is: %u\n",
28873 dev_priv->usec_per_vblank);
28874 }
28875 @@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
28876
28877 for (i = 0; i < dev_priv->num_irqs; ++i) {
28878 if (status & cur_irq->pending_mask) {
28879 - atomic_inc(&cur_irq->irq_received);
28880 + atomic_inc_unchecked(&cur_irq->irq_received);
28881 DRM_WAKEUP(&cur_irq->irq_queue);
28882 handled = 1;
28883 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
28884 @@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
28885 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
28886 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
28887 masks[irq][4]));
28888 - cur_irq_sequence = atomic_read(&cur_irq->irq_received);
28889 + cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
28890 } else {
28891 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
28892 (((cur_irq_sequence =
28893 - atomic_read(&cur_irq->irq_received)) -
28894 + atomic_read_unchecked(&cur_irq->irq_received)) -
28895 *sequence) <= (1 << 23)));
28896 }
28897 *sequence = cur_irq_sequence;
28898 @@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
28899 }
28900
28901 for (i = 0; i < dev_priv->num_irqs; ++i) {
28902 - atomic_set(&cur_irq->irq_received, 0);
28903 + atomic_set_unchecked(&cur_irq->irq_received, 0);
28904 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
28905 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
28906 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
28907 @@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
28908 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
28909 case VIA_IRQ_RELATIVE:
28910 irqwait->request.sequence +=
28911 - atomic_read(&cur_irq->irq_received);
28912 + atomic_read_unchecked(&cur_irq->irq_received);
28913 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
28914 case VIA_IRQ_ABSOLUTE:
28915 break;
28916 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
28917 index dc27970..f18b008 100644
28918 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
28919 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
28920 @@ -260,7 +260,7 @@ struct vmw_private {
28921 * Fencing and IRQs.
28922 */
28923
28924 - atomic_t marker_seq;
28925 + atomic_unchecked_t marker_seq;
28926 wait_queue_head_t fence_queue;
28927 wait_queue_head_t fifo_queue;
28928 int fence_queue_waiters; /* Protected by hw_mutex */
28929 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
28930 index a0c2f12..68ae6cb 100644
28931 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
28932 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
28933 @@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
28934 (unsigned int) min,
28935 (unsigned int) fifo->capabilities);
28936
28937 - atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
28938 + atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
28939 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
28940 vmw_marker_queue_init(&fifo->marker_queue);
28941 return vmw_fifo_send_fence(dev_priv, &dummy);
28942 @@ -355,7 +355,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
28943 if (reserveable)
28944 iowrite32(bytes, fifo_mem +
28945 SVGA_FIFO_RESERVED);
28946 - return fifo_mem + (next_cmd >> 2);
28947 + return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
28948 } else {
28949 need_bounce = true;
28950 }
28951 @@ -475,7 +475,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
28952
28953 fm = vmw_fifo_reserve(dev_priv, bytes);
28954 if (unlikely(fm == NULL)) {
28955 - *seqno = atomic_read(&dev_priv->marker_seq);
28956 + *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
28957 ret = -ENOMEM;
28958 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
28959 false, 3*HZ);
28960 @@ -483,7 +483,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
28961 }
28962
28963 do {
28964 - *seqno = atomic_add_return(1, &dev_priv->marker_seq);
28965 + *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
28966 } while (*seqno == 0);
28967
28968 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
28969 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
28970 index cabc95f..14b3d77 100644
28971 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
28972 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
28973 @@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
28974 * emitted. Then the fence is stale and signaled.
28975 */
28976
28977 - ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
28978 + ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
28979 > VMW_FENCE_WRAP);
28980
28981 return ret;
28982 @@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
28983
28984 if (fifo_idle)
28985 down_read(&fifo_state->rwsem);
28986 - signal_seq = atomic_read(&dev_priv->marker_seq);
28987 + signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
28988 ret = 0;
28989
28990 for (;;) {
28991 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
28992 index 8a8725c..afed796 100644
28993 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
28994 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
28995 @@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
28996 while (!vmw_lag_lt(queue, us)) {
28997 spin_lock(&queue->lock);
28998 if (list_empty(&queue->head))
28999 - seqno = atomic_read(&dev_priv->marker_seq);
29000 + seqno = atomic_read_unchecked(&dev_priv->marker_seq);
29001 else {
29002 marker = list_first_entry(&queue->head,
29003 struct vmw_marker, head);
29004 diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
29005 index bb656d8..4169fca 100644
29006 --- a/drivers/hid/hid-core.c
29007 +++ b/drivers/hid/hid-core.c
29008 @@ -2012,7 +2012,7 @@ static bool hid_ignore(struct hid_device *hdev)
29009
29010 int hid_add_device(struct hid_device *hdev)
29011 {
29012 - static atomic_t id = ATOMIC_INIT(0);
29013 + static atomic_unchecked_t id = ATOMIC_INIT(0);
29014 int ret;
29015
29016 if (WARN_ON(hdev->status & HID_STAT_ADDED))
29017 @@ -2027,7 +2027,7 @@ int hid_add_device(struct hid_device *hdev)
29018 /* XXX hack, any other cleaner solution after the driver core
29019 * is converted to allow more than 20 bytes as the device name? */
29020 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
29021 - hdev->vendor, hdev->product, atomic_inc_return(&id));
29022 + hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
29023
29024 hid_debug_register(hdev, dev_name(&hdev->dev));
29025 ret = device_add(&hdev->dev);
29026 diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
29027 index 4ef02b2..8a96831 100644
29028 --- a/drivers/hid/usbhid/hiddev.c
29029 +++ b/drivers/hid/usbhid/hiddev.c
29030 @@ -624,7 +624,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
29031 break;
29032
29033 case HIDIOCAPPLICATION:
29034 - if (arg < 0 || arg >= hid->maxapplication)
29035 + if (arg >= hid->maxapplication)
29036 break;
29037
29038 for (i = 0; i < hid->maxcollection; i++)
29039 diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
29040 index 4065374..10ed7dc 100644
29041 --- a/drivers/hv/channel.c
29042 +++ b/drivers/hv/channel.c
29043 @@ -400,8 +400,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
29044 int ret = 0;
29045 int t;
29046
29047 - next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
29048 - atomic_inc(&vmbus_connection.next_gpadl_handle);
29049 + next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
29050 + atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
29051
29052 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
29053 if (ret)
29054 diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
29055 index 0fb100e..baf87e5 100644
29056 --- a/drivers/hv/hv.c
29057 +++ b/drivers/hv/hv.c
29058 @@ -132,7 +132,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
29059 u64 output_address = (output) ? virt_to_phys(output) : 0;
29060 u32 output_address_hi = output_address >> 32;
29061 u32 output_address_lo = output_address & 0xFFFFFFFF;
29062 - void *hypercall_page = hv_context.hypercall_page;
29063 + void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
29064
29065 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
29066 "=a"(hv_status_lo) : "d" (control_hi),
29067 diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
29068 index 0aee112..b72d21f 100644
29069 --- a/drivers/hv/hyperv_vmbus.h
29070 +++ b/drivers/hv/hyperv_vmbus.h
29071 @@ -556,7 +556,7 @@ enum vmbus_connect_state {
29072 struct vmbus_connection {
29073 enum vmbus_connect_state conn_state;
29074
29075 - atomic_t next_gpadl_handle;
29076 + atomic_unchecked_t next_gpadl_handle;
29077
29078 /*
29079 * Represents channel interrupts. Each bit position represents a
29080 diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
29081 index d2d0a2a..90b8f4d 100644
29082 --- a/drivers/hv/vmbus_drv.c
29083 +++ b/drivers/hv/vmbus_drv.c
29084 @@ -663,10 +663,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
29085 {
29086 int ret = 0;
29087
29088 - static atomic_t device_num = ATOMIC_INIT(0);
29089 + static atomic_unchecked_t device_num = ATOMIC_INIT(0);
29090
29091 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
29092 - atomic_inc_return(&device_num));
29093 + atomic_inc_return_unchecked(&device_num));
29094
29095 child_device_obj->device.bus = &hv_bus;
29096 child_device_obj->device.parent = &hv_acpi_dev->dev;
29097 diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
29098 index 66f6729..2d6de0a 100644
29099 --- a/drivers/hwmon/acpi_power_meter.c
29100 +++ b/drivers/hwmon/acpi_power_meter.c
29101 @@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
29102 return res;
29103
29104 temp /= 1000;
29105 - if (temp < 0)
29106 - return -EINVAL;
29107
29108 mutex_lock(&resource->lock);
29109 resource->trip[attr->index - 7] = temp;
29110 diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
29111 index 5357925..6cf0418 100644
29112 --- a/drivers/hwmon/sht15.c
29113 +++ b/drivers/hwmon/sht15.c
29114 @@ -166,7 +166,7 @@ struct sht15_data {
29115 int supply_uV;
29116 bool supply_uV_valid;
29117 struct work_struct update_supply_work;
29118 - atomic_t interrupt_handled;
29119 + atomic_unchecked_t interrupt_handled;
29120 };
29121
29122 /**
29123 @@ -509,13 +509,13 @@ static int sht15_measurement(struct sht15_data *data,
29124 return ret;
29125
29126 gpio_direction_input(data->pdata->gpio_data);
29127 - atomic_set(&data->interrupt_handled, 0);
29128 + atomic_set_unchecked(&data->interrupt_handled, 0);
29129
29130 enable_irq(gpio_to_irq(data->pdata->gpio_data));
29131 if (gpio_get_value(data->pdata->gpio_data) == 0) {
29132 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
29133 /* Only relevant if the interrupt hasn't occurred. */
29134 - if (!atomic_read(&data->interrupt_handled))
29135 + if (!atomic_read_unchecked(&data->interrupt_handled))
29136 schedule_work(&data->read_work);
29137 }
29138 ret = wait_event_timeout(data->wait_queue,
29139 @@ -782,7 +782,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
29140
29141 /* First disable the interrupt */
29142 disable_irq_nosync(irq);
29143 - atomic_inc(&data->interrupt_handled);
29144 + atomic_inc_unchecked(&data->interrupt_handled);
29145 /* Then schedule a reading work struct */
29146 if (data->state != SHT15_READING_NOTHING)
29147 schedule_work(&data->read_work);
29148 @@ -804,11 +804,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
29149 * If not, then start the interrupt again - care here as could
29150 * have gone low in meantime so verify it hasn't!
29151 */
29152 - atomic_set(&data->interrupt_handled, 0);
29153 + atomic_set_unchecked(&data->interrupt_handled, 0);
29154 enable_irq(gpio_to_irq(data->pdata->gpio_data));
29155 /* If still not occurred or another handler has been scheduled */
29156 if (gpio_get_value(data->pdata->gpio_data)
29157 - || atomic_read(&data->interrupt_handled))
29158 + || atomic_read_unchecked(&data->interrupt_handled))
29159 return;
29160 }
29161
29162 diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
29163 index 378fcb5..5e91fa8 100644
29164 --- a/drivers/i2c/busses/i2c-amd756-s4882.c
29165 +++ b/drivers/i2c/busses/i2c-amd756-s4882.c
29166 @@ -43,7 +43,7 @@
29167 extern struct i2c_adapter amd756_smbus;
29168
29169 static struct i2c_adapter *s4882_adapter;
29170 -static struct i2c_algorithm *s4882_algo;
29171 +static i2c_algorithm_no_const *s4882_algo;
29172
29173 /* Wrapper access functions for multiplexed SMBus */
29174 static DEFINE_MUTEX(amd756_lock);
29175 diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
29176 index 29015eb..af2d8e9 100644
29177 --- a/drivers/i2c/busses/i2c-nforce2-s4985.c
29178 +++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
29179 @@ -41,7 +41,7 @@
29180 extern struct i2c_adapter *nforce2_smbus;
29181
29182 static struct i2c_adapter *s4985_adapter;
29183 -static struct i2c_algorithm *s4985_algo;
29184 +static i2c_algorithm_no_const *s4985_algo;
29185
29186 /* Wrapper access functions for multiplexed SMBus */
29187 static DEFINE_MUTEX(nforce2_lock);
29188 diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
29189 index d7a4833..7fae376 100644
29190 --- a/drivers/i2c/i2c-mux.c
29191 +++ b/drivers/i2c/i2c-mux.c
29192 @@ -28,7 +28,7 @@
29193 /* multiplexer per channel data */
29194 struct i2c_mux_priv {
29195 struct i2c_adapter adap;
29196 - struct i2c_algorithm algo;
29197 + i2c_algorithm_no_const algo;
29198
29199 struct i2c_adapter *parent;
29200 void *mux_dev; /* the mux chip/device */
29201 diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c
29202 index 57d00ca..0145194 100644
29203 --- a/drivers/ide/aec62xx.c
29204 +++ b/drivers/ide/aec62xx.c
29205 @@ -181,7 +181,7 @@ static const struct ide_port_ops atp86x_port_ops = {
29206 .cable_detect = atp86x_cable_detect,
29207 };
29208
29209 -static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
29210 +static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
29211 { /* 0: AEC6210 */
29212 .name = DRV_NAME,
29213 .init_chipset = init_chipset_aec62xx,
29214 diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
29215 index 2c8016a..911a27c 100644
29216 --- a/drivers/ide/alim15x3.c
29217 +++ b/drivers/ide/alim15x3.c
29218 @@ -512,7 +512,7 @@ static const struct ide_dma_ops ali_dma_ops = {
29219 .dma_sff_read_status = ide_dma_sff_read_status,
29220 };
29221
29222 -static const struct ide_port_info ali15x3_chipset __devinitdata = {
29223 +static const struct ide_port_info ali15x3_chipset __devinitconst = {
29224 .name = DRV_NAME,
29225 .init_chipset = init_chipset_ali15x3,
29226 .init_hwif = init_hwif_ali15x3,
29227 diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
29228 index 3747b25..56fc995 100644
29229 --- a/drivers/ide/amd74xx.c
29230 +++ b/drivers/ide/amd74xx.c
29231 @@ -223,7 +223,7 @@ static const struct ide_port_ops amd_port_ops = {
29232 .udma_mask = udma, \
29233 }
29234
29235 -static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
29236 +static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
29237 /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
29238 /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
29239 /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
29240 diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
29241 index 15f0ead..cb43480 100644
29242 --- a/drivers/ide/atiixp.c
29243 +++ b/drivers/ide/atiixp.c
29244 @@ -139,7 +139,7 @@ static const struct ide_port_ops atiixp_port_ops = {
29245 .cable_detect = atiixp_cable_detect,
29246 };
29247
29248 -static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
29249 +static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
29250 { /* 0: IXP200/300/400/700 */
29251 .name = DRV_NAME,
29252 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
29253 diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
29254 index 5f80312..d1fc438 100644
29255 --- a/drivers/ide/cmd64x.c
29256 +++ b/drivers/ide/cmd64x.c
29257 @@ -327,7 +327,7 @@ static const struct ide_dma_ops cmd646_rev1_dma_ops = {
29258 .dma_sff_read_status = ide_dma_sff_read_status,
29259 };
29260
29261 -static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
29262 +static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
29263 { /* 0: CMD643 */
29264 .name = DRV_NAME,
29265 .init_chipset = init_chipset_cmd64x,
29266 diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
29267 index 2c1e5f7..1444762 100644
29268 --- a/drivers/ide/cs5520.c
29269 +++ b/drivers/ide/cs5520.c
29270 @@ -94,7 +94,7 @@ static const struct ide_port_ops cs5520_port_ops = {
29271 .set_dma_mode = cs5520_set_dma_mode,
29272 };
29273
29274 -static const struct ide_port_info cyrix_chipset __devinitdata = {
29275 +static const struct ide_port_info cyrix_chipset __devinitconst = {
29276 .name = DRV_NAME,
29277 .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
29278 .port_ops = &cs5520_port_ops,
29279 diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c
29280 index 4dc4eb9..49b40ad 100644
29281 --- a/drivers/ide/cs5530.c
29282 +++ b/drivers/ide/cs5530.c
29283 @@ -245,7 +245,7 @@ static const struct ide_port_ops cs5530_port_ops = {
29284 .udma_filter = cs5530_udma_filter,
29285 };
29286
29287 -static const struct ide_port_info cs5530_chipset __devinitdata = {
29288 +static const struct ide_port_info cs5530_chipset __devinitconst = {
29289 .name = DRV_NAME,
29290 .init_chipset = init_chipset_cs5530,
29291 .init_hwif = init_hwif_cs5530,
29292 diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c
29293 index 5059faf..18d4c85 100644
29294 --- a/drivers/ide/cs5535.c
29295 +++ b/drivers/ide/cs5535.c
29296 @@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_port_ops = {
29297 .cable_detect = cs5535_cable_detect,
29298 };
29299
29300 -static const struct ide_port_info cs5535_chipset __devinitdata = {
29301 +static const struct ide_port_info cs5535_chipset __devinitconst = {
29302 .name = DRV_NAME,
29303 .port_ops = &cs5535_port_ops,
29304 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
29305 diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
29306 index 847553f..3ffb49d 100644
29307 --- a/drivers/ide/cy82c693.c
29308 +++ b/drivers/ide/cy82c693.c
29309 @@ -163,7 +163,7 @@ static const struct ide_port_ops cy82c693_port_ops = {
29310 .set_dma_mode = cy82c693_set_dma_mode,
29311 };
29312
29313 -static const struct ide_port_info cy82c693_chipset __devinitdata = {
29314 +static const struct ide_port_info cy82c693_chipset __devinitconst = {
29315 .name = DRV_NAME,
29316 .init_iops = init_iops_cy82c693,
29317 .port_ops = &cy82c693_port_ops,
29318 diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
29319 index 58c51cd..4aec3b8 100644
29320 --- a/drivers/ide/hpt366.c
29321 +++ b/drivers/ide/hpt366.c
29322 @@ -443,7 +443,7 @@ static struct hpt_timings hpt37x_timings = {
29323 }
29324 };
29325
29326 -static const struct hpt_info hpt36x __devinitdata = {
29327 +static const struct hpt_info hpt36x __devinitconst = {
29328 .chip_name = "HPT36x",
29329 .chip_type = HPT36x,
29330 .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
29331 @@ -451,7 +451,7 @@ static const struct hpt_info hpt36x __devinitdata = {
29332 .timings = &hpt36x_timings
29333 };
29334
29335 -static const struct hpt_info hpt370 __devinitdata = {
29336 +static const struct hpt_info hpt370 __devinitconst = {
29337 .chip_name = "HPT370",
29338 .chip_type = HPT370,
29339 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
29340 @@ -459,7 +459,7 @@ static const struct hpt_info hpt370 __devinitdata = {
29341 .timings = &hpt37x_timings
29342 };
29343
29344 -static const struct hpt_info hpt370a __devinitdata = {
29345 +static const struct hpt_info hpt370a __devinitconst = {
29346 .chip_name = "HPT370A",
29347 .chip_type = HPT370A,
29348 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
29349 @@ -467,7 +467,7 @@ static const struct hpt_info hpt370a __devinitdata = {
29350 .timings = &hpt37x_timings
29351 };
29352
29353 -static const struct hpt_info hpt374 __devinitdata = {
29354 +static const struct hpt_info hpt374 __devinitconst = {
29355 .chip_name = "HPT374",
29356 .chip_type = HPT374,
29357 .udma_mask = ATA_UDMA5,
29358 @@ -475,7 +475,7 @@ static const struct hpt_info hpt374 __devinitdata = {
29359 .timings = &hpt37x_timings
29360 };
29361
29362 -static const struct hpt_info hpt372 __devinitdata = {
29363 +static const struct hpt_info hpt372 __devinitconst = {
29364 .chip_name = "HPT372",
29365 .chip_type = HPT372,
29366 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29367 @@ -483,7 +483,7 @@ static const struct hpt_info hpt372 __devinitdata = {
29368 .timings = &hpt37x_timings
29369 };
29370
29371 -static const struct hpt_info hpt372a __devinitdata = {
29372 +static const struct hpt_info hpt372a __devinitconst = {
29373 .chip_name = "HPT372A",
29374 .chip_type = HPT372A,
29375 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29376 @@ -491,7 +491,7 @@ static const struct hpt_info hpt372a __devinitdata = {
29377 .timings = &hpt37x_timings
29378 };
29379
29380 -static const struct hpt_info hpt302 __devinitdata = {
29381 +static const struct hpt_info hpt302 __devinitconst = {
29382 .chip_name = "HPT302",
29383 .chip_type = HPT302,
29384 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29385 @@ -499,7 +499,7 @@ static const struct hpt_info hpt302 __devinitdata = {
29386 .timings = &hpt37x_timings
29387 };
29388
29389 -static const struct hpt_info hpt371 __devinitdata = {
29390 +static const struct hpt_info hpt371 __devinitconst = {
29391 .chip_name = "HPT371",
29392 .chip_type = HPT371,
29393 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29394 @@ -507,7 +507,7 @@ static const struct hpt_info hpt371 __devinitdata = {
29395 .timings = &hpt37x_timings
29396 };
29397
29398 -static const struct hpt_info hpt372n __devinitdata = {
29399 +static const struct hpt_info hpt372n __devinitconst = {
29400 .chip_name = "HPT372N",
29401 .chip_type = HPT372N,
29402 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29403 @@ -515,7 +515,7 @@ static const struct hpt_info hpt372n __devinitdata = {
29404 .timings = &hpt37x_timings
29405 };
29406
29407 -static const struct hpt_info hpt302n __devinitdata = {
29408 +static const struct hpt_info hpt302n __devinitconst = {
29409 .chip_name = "HPT302N",
29410 .chip_type = HPT302N,
29411 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29412 @@ -523,7 +523,7 @@ static const struct hpt_info hpt302n __devinitdata = {
29413 .timings = &hpt37x_timings
29414 };
29415
29416 -static const struct hpt_info hpt371n __devinitdata = {
29417 +static const struct hpt_info hpt371n __devinitconst = {
29418 .chip_name = "HPT371N",
29419 .chip_type = HPT371N,
29420 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29421 @@ -1361,7 +1361,7 @@ static const struct ide_dma_ops hpt36x_dma_ops = {
29422 .dma_sff_read_status = ide_dma_sff_read_status,
29423 };
29424
29425 -static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
29426 +static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
29427 { /* 0: HPT36x */
29428 .name = DRV_NAME,
29429 .init_chipset = init_chipset_hpt366,
29430 diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
29431 index 8126824..55a2798 100644
29432 --- a/drivers/ide/ide-cd.c
29433 +++ b/drivers/ide/ide-cd.c
29434 @@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
29435 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
29436 if ((unsigned long)buf & alignment
29437 || blk_rq_bytes(rq) & q->dma_pad_mask
29438 - || object_is_on_stack(buf))
29439 + || object_starts_on_stack(buf))
29440 drive->dma = 0;
29441 }
29442 }
29443 diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
29444 index a743e68..1cfd674 100644
29445 --- a/drivers/ide/ide-pci-generic.c
29446 +++ b/drivers/ide/ide-pci-generic.c
29447 @@ -53,7 +53,7 @@ static const struct ide_port_ops netcell_port_ops = {
29448 .udma_mask = ATA_UDMA6, \
29449 }
29450
29451 -static const struct ide_port_info generic_chipsets[] __devinitdata = {
29452 +static const struct ide_port_info generic_chipsets[] __devinitconst = {
29453 /* 0: Unknown */
29454 DECLARE_GENERIC_PCI_DEV(0),
29455
29456 diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c
29457 index 560e66d..d5dd180 100644
29458 --- a/drivers/ide/it8172.c
29459 +++ b/drivers/ide/it8172.c
29460 @@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_port_ops = {
29461 .set_dma_mode = it8172_set_dma_mode,
29462 };
29463
29464 -static const struct ide_port_info it8172_port_info __devinitdata = {
29465 +static const struct ide_port_info it8172_port_info __devinitconst = {
29466 .name = DRV_NAME,
29467 .port_ops = &it8172_port_ops,
29468 .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
29469 diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c
29470 index 46816ba..1847aeb 100644
29471 --- a/drivers/ide/it8213.c
29472 +++ b/drivers/ide/it8213.c
29473 @@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_port_ops = {
29474 .cable_detect = it8213_cable_detect,
29475 };
29476
29477 -static const struct ide_port_info it8213_chipset __devinitdata = {
29478 +static const struct ide_port_info it8213_chipset __devinitconst = {
29479 .name = DRV_NAME,
29480 .enablebits = { {0x41, 0x80, 0x80} },
29481 .port_ops = &it8213_port_ops,
29482 diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
29483 index 2e3169f..c5611db 100644
29484 --- a/drivers/ide/it821x.c
29485 +++ b/drivers/ide/it821x.c
29486 @@ -630,7 +630,7 @@ static const struct ide_port_ops it821x_port_ops = {
29487 .cable_detect = it821x_cable_detect,
29488 };
29489
29490 -static const struct ide_port_info it821x_chipset __devinitdata = {
29491 +static const struct ide_port_info it821x_chipset __devinitconst = {
29492 .name = DRV_NAME,
29493 .init_chipset = init_chipset_it821x,
29494 .init_hwif = init_hwif_it821x,
29495 diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c
29496 index 74c2c4a..efddd7d 100644
29497 --- a/drivers/ide/jmicron.c
29498 +++ b/drivers/ide/jmicron.c
29499 @@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron_port_ops = {
29500 .cable_detect = jmicron_cable_detect,
29501 };
29502
29503 -static const struct ide_port_info jmicron_chipset __devinitdata = {
29504 +static const struct ide_port_info jmicron_chipset __devinitconst = {
29505 .name = DRV_NAME,
29506 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
29507 .port_ops = &jmicron_port_ops,
29508 diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c
29509 index 95327a2..73f78d8 100644
29510 --- a/drivers/ide/ns87415.c
29511 +++ b/drivers/ide/ns87415.c
29512 @@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_dma_ops = {
29513 .dma_sff_read_status = superio_dma_sff_read_status,
29514 };
29515
29516 -static const struct ide_port_info ns87415_chipset __devinitdata = {
29517 +static const struct ide_port_info ns87415_chipset __devinitconst = {
29518 .name = DRV_NAME,
29519 .init_hwif = init_hwif_ns87415,
29520 .tp_ops = &ns87415_tp_ops,
29521 diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c
29522 index 1a53a4c..39edc66 100644
29523 --- a/drivers/ide/opti621.c
29524 +++ b/drivers/ide/opti621.c
29525 @@ -131,7 +131,7 @@ static const struct ide_port_ops opti621_port_ops = {
29526 .set_pio_mode = opti621_set_pio_mode,
29527 };
29528
29529 -static const struct ide_port_info opti621_chipset __devinitdata = {
29530 +static const struct ide_port_info opti621_chipset __devinitconst = {
29531 .name = DRV_NAME,
29532 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
29533 .port_ops = &opti621_port_ops,
29534 diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
29535 index 9546fe2..2e5ceb6 100644
29536 --- a/drivers/ide/pdc202xx_new.c
29537 +++ b/drivers/ide/pdc202xx_new.c
29538 @@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_port_ops = {
29539 .udma_mask = udma, \
29540 }
29541
29542 -static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
29543 +static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
29544 /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
29545 /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
29546 };
29547 diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
29548 index 3a35ec6..5634510 100644
29549 --- a/drivers/ide/pdc202xx_old.c
29550 +++ b/drivers/ide/pdc202xx_old.c
29551 @@ -270,7 +270,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
29552 .max_sectors = sectors, \
29553 }
29554
29555 -static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
29556 +static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
29557 { /* 0: PDC20246 */
29558 .name = DRV_NAME,
29559 .init_chipset = init_chipset_pdc202xx,
29560 diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
29561 index 1892e81..fe0fd60 100644
29562 --- a/drivers/ide/piix.c
29563 +++ b/drivers/ide/piix.c
29564 @@ -344,7 +344,7 @@ static const struct ide_port_ops ich_port_ops = {
29565 .udma_mask = udma, \
29566 }
29567
29568 -static const struct ide_port_info piix_pci_info[] __devinitdata = {
29569 +static const struct ide_port_info piix_pci_info[] __devinitconst = {
29570 /* 0: MPIIX */
29571 { /*
29572 * MPIIX actually has only a single IDE channel mapped to
29573 diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c
29574 index a6414a8..c04173e 100644
29575 --- a/drivers/ide/rz1000.c
29576 +++ b/drivers/ide/rz1000.c
29577 @@ -38,7 +38,7 @@ static int __devinit rz1000_disable_readahead(struct pci_dev *dev)
29578 }
29579 }
29580
29581 -static const struct ide_port_info rz1000_chipset __devinitdata = {
29582 +static const struct ide_port_info rz1000_chipset __devinitconst = {
29583 .name = DRV_NAME,
29584 .host_flags = IDE_HFLAG_NO_DMA,
29585 };
29586 diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c
29587 index 356b9b5..d4758eb 100644
29588 --- a/drivers/ide/sc1200.c
29589 +++ b/drivers/ide/sc1200.c
29590 @@ -291,7 +291,7 @@ static const struct ide_dma_ops sc1200_dma_ops = {
29591 .dma_sff_read_status = ide_dma_sff_read_status,
29592 };
29593
29594 -static const struct ide_port_info sc1200_chipset __devinitdata = {
29595 +static const struct ide_port_info sc1200_chipset __devinitconst = {
29596 .name = DRV_NAME,
29597 .port_ops = &sc1200_port_ops,
29598 .dma_ops = &sc1200_dma_ops,
29599 diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
29600 index b7f5b0c..9701038 100644
29601 --- a/drivers/ide/scc_pata.c
29602 +++ b/drivers/ide/scc_pata.c
29603 @@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_ops = {
29604 .dma_sff_read_status = scc_dma_sff_read_status,
29605 };
29606
29607 -static const struct ide_port_info scc_chipset __devinitdata = {
29608 +static const struct ide_port_info scc_chipset __devinitconst = {
29609 .name = "sccIDE",
29610 .init_iops = init_iops_scc,
29611 .init_dma = scc_init_dma,
29612 diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
29613 index 35fb8da..24d72ef 100644
29614 --- a/drivers/ide/serverworks.c
29615 +++ b/drivers/ide/serverworks.c
29616 @@ -337,7 +337,7 @@ static const struct ide_port_ops svwks_port_ops = {
29617 .cable_detect = svwks_cable_detect,
29618 };
29619
29620 -static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
29621 +static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
29622 { /* 0: OSB4 */
29623 .name = DRV_NAME,
29624 .init_chipset = init_chipset_svwks,
29625 diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
29626 index ddeda44..46f7e30 100644
29627 --- a/drivers/ide/siimage.c
29628 +++ b/drivers/ide/siimage.c
29629 @@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_ops = {
29630 .udma_mask = ATA_UDMA6, \
29631 }
29632
29633 -static const struct ide_port_info siimage_chipsets[] __devinitdata = {
29634 +static const struct ide_port_info siimage_chipsets[] __devinitconst = {
29635 /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
29636 /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
29637 };
29638 diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
29639 index 4a00225..09e61b4 100644
29640 --- a/drivers/ide/sis5513.c
29641 +++ b/drivers/ide/sis5513.c
29642 @@ -563,7 +563,7 @@ static const struct ide_port_ops sis_ata133_port_ops = {
29643 .cable_detect = sis_cable_detect,
29644 };
29645
29646 -static const struct ide_port_info sis5513_chipset __devinitdata = {
29647 +static const struct ide_port_info sis5513_chipset __devinitconst = {
29648 .name = DRV_NAME,
29649 .init_chipset = init_chipset_sis5513,
29650 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
29651 diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
29652 index f21dc2a..d051cd2 100644
29653 --- a/drivers/ide/sl82c105.c
29654 +++ b/drivers/ide/sl82c105.c
29655 @@ -299,7 +299,7 @@ static const struct ide_dma_ops sl82c105_dma_ops = {
29656 .dma_sff_read_status = ide_dma_sff_read_status,
29657 };
29658
29659 -static const struct ide_port_info sl82c105_chipset __devinitdata = {
29660 +static const struct ide_port_info sl82c105_chipset __devinitconst = {
29661 .name = DRV_NAME,
29662 .init_chipset = init_chipset_sl82c105,
29663 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
29664 diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
29665 index 864ffe0..863a5e9 100644
29666 --- a/drivers/ide/slc90e66.c
29667 +++ b/drivers/ide/slc90e66.c
29668 @@ -132,7 +132,7 @@ static const struct ide_port_ops slc90e66_port_ops = {
29669 .cable_detect = slc90e66_cable_detect,
29670 };
29671
29672 -static const struct ide_port_info slc90e66_chipset __devinitdata = {
29673 +static const struct ide_port_info slc90e66_chipset __devinitconst = {
29674 .name = DRV_NAME,
29675 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
29676 .port_ops = &slc90e66_port_ops,
29677 diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
29678 index 4799d5c..1794678 100644
29679 --- a/drivers/ide/tc86c001.c
29680 +++ b/drivers/ide/tc86c001.c
29681 @@ -192,7 +192,7 @@ static const struct ide_dma_ops tc86c001_dma_ops = {
29682 .dma_sff_read_status = ide_dma_sff_read_status,
29683 };
29684
29685 -static const struct ide_port_info tc86c001_chipset __devinitdata = {
29686 +static const struct ide_port_info tc86c001_chipset __devinitconst = {
29687 .name = DRV_NAME,
29688 .init_hwif = init_hwif_tc86c001,
29689 .port_ops = &tc86c001_port_ops,
29690 diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
29691 index 281c914..55ce1b8 100644
29692 --- a/drivers/ide/triflex.c
29693 +++ b/drivers/ide/triflex.c
29694 @@ -92,7 +92,7 @@ static const struct ide_port_ops triflex_port_ops = {
29695 .set_dma_mode = triflex_set_mode,
29696 };
29697
29698 -static const struct ide_port_info triflex_device __devinitdata = {
29699 +static const struct ide_port_info triflex_device __devinitconst = {
29700 .name = DRV_NAME,
29701 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
29702 .port_ops = &triflex_port_ops,
29703 diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c
29704 index 4b42ca0..e494a98 100644
29705 --- a/drivers/ide/trm290.c
29706 +++ b/drivers/ide/trm290.c
29707 @@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops = {
29708 .dma_check = trm290_dma_check,
29709 };
29710
29711 -static const struct ide_port_info trm290_chipset __devinitdata = {
29712 +static const struct ide_port_info trm290_chipset __devinitconst = {
29713 .name = DRV_NAME,
29714 .init_hwif = init_hwif_trm290,
29715 .tp_ops = &trm290_tp_ops,
29716 diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
29717 index f46f49c..eb77678 100644
29718 --- a/drivers/ide/via82cxxx.c
29719 +++ b/drivers/ide/via82cxxx.c
29720 @@ -403,7 +403,7 @@ static const struct ide_port_ops via_port_ops = {
29721 .cable_detect = via82cxxx_cable_detect,
29722 };
29723
29724 -static const struct ide_port_info via82cxxx_chipset __devinitdata = {
29725 +static const struct ide_port_info via82cxxx_chipset __devinitconst = {
29726 .name = DRV_NAME,
29727 .init_chipset = init_chipset_via82cxxx,
29728 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
29729 diff --git a/drivers/ieee802154/fakehard.c b/drivers/ieee802154/fakehard.c
29730 index eb0e2cc..14241c7 100644
29731 --- a/drivers/ieee802154/fakehard.c
29732 +++ b/drivers/ieee802154/fakehard.c
29733 @@ -386,7 +386,7 @@ static int __devinit ieee802154fake_probe(struct platform_device *pdev)
29734 phy->transmit_power = 0xbf;
29735
29736 dev->netdev_ops = &fake_ops;
29737 - dev->ml_priv = &fake_mlme;
29738 + dev->ml_priv = (void *)&fake_mlme;
29739
29740 priv = netdev_priv(dev);
29741 priv->phy = phy;
29742 diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
29743 index 8b72f39..55df4c8 100644
29744 --- a/drivers/infiniband/core/cm.c
29745 +++ b/drivers/infiniband/core/cm.c
29746 @@ -114,7 +114,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
29747
29748 struct cm_counter_group {
29749 struct kobject obj;
29750 - atomic_long_t counter[CM_ATTR_COUNT];
29751 + atomic_long_unchecked_t counter[CM_ATTR_COUNT];
29752 };
29753
29754 struct cm_counter_attribute {
29755 @@ -1394,7 +1394,7 @@ static void cm_dup_req_handler(struct cm_work *work,
29756 struct ib_mad_send_buf *msg = NULL;
29757 int ret;
29758
29759 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29760 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29761 counter[CM_REQ_COUNTER]);
29762
29763 /* Quick state check to discard duplicate REQs. */
29764 @@ -1778,7 +1778,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
29765 if (!cm_id_priv)
29766 return;
29767
29768 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29769 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29770 counter[CM_REP_COUNTER]);
29771 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
29772 if (ret)
29773 @@ -1945,7 +1945,7 @@ static int cm_rtu_handler(struct cm_work *work)
29774 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
29775 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
29776 spin_unlock_irq(&cm_id_priv->lock);
29777 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29778 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29779 counter[CM_RTU_COUNTER]);
29780 goto out;
29781 }
29782 @@ -2128,7 +2128,7 @@ static int cm_dreq_handler(struct cm_work *work)
29783 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
29784 dreq_msg->local_comm_id);
29785 if (!cm_id_priv) {
29786 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29787 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29788 counter[CM_DREQ_COUNTER]);
29789 cm_issue_drep(work->port, work->mad_recv_wc);
29790 return -EINVAL;
29791 @@ -2153,7 +2153,7 @@ static int cm_dreq_handler(struct cm_work *work)
29792 case IB_CM_MRA_REP_RCVD:
29793 break;
29794 case IB_CM_TIMEWAIT:
29795 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29796 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29797 counter[CM_DREQ_COUNTER]);
29798 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
29799 goto unlock;
29800 @@ -2167,7 +2167,7 @@ static int cm_dreq_handler(struct cm_work *work)
29801 cm_free_msg(msg);
29802 goto deref;
29803 case IB_CM_DREQ_RCVD:
29804 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29805 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29806 counter[CM_DREQ_COUNTER]);
29807 goto unlock;
29808 default:
29809 @@ -2534,7 +2534,7 @@ static int cm_mra_handler(struct cm_work *work)
29810 ib_modify_mad(cm_id_priv->av.port->mad_agent,
29811 cm_id_priv->msg, timeout)) {
29812 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
29813 - atomic_long_inc(&work->port->
29814 + atomic_long_inc_unchecked(&work->port->
29815 counter_group[CM_RECV_DUPLICATES].
29816 counter[CM_MRA_COUNTER]);
29817 goto out;
29818 @@ -2543,7 +2543,7 @@ static int cm_mra_handler(struct cm_work *work)
29819 break;
29820 case IB_CM_MRA_REQ_RCVD:
29821 case IB_CM_MRA_REP_RCVD:
29822 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29823 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29824 counter[CM_MRA_COUNTER]);
29825 /* fall through */
29826 default:
29827 @@ -2705,7 +2705,7 @@ static int cm_lap_handler(struct cm_work *work)
29828 case IB_CM_LAP_IDLE:
29829 break;
29830 case IB_CM_MRA_LAP_SENT:
29831 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29832 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29833 counter[CM_LAP_COUNTER]);
29834 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
29835 goto unlock;
29836 @@ -2721,7 +2721,7 @@ static int cm_lap_handler(struct cm_work *work)
29837 cm_free_msg(msg);
29838 goto deref;
29839 case IB_CM_LAP_RCVD:
29840 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29841 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29842 counter[CM_LAP_COUNTER]);
29843 goto unlock;
29844 default:
29845 @@ -3005,7 +3005,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
29846 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
29847 if (cur_cm_id_priv) {
29848 spin_unlock_irq(&cm.lock);
29849 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29850 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29851 counter[CM_SIDR_REQ_COUNTER]);
29852 goto out; /* Duplicate message. */
29853 }
29854 @@ -3217,10 +3217,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
29855 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
29856 msg->retries = 1;
29857
29858 - atomic_long_add(1 + msg->retries,
29859 + atomic_long_add_unchecked(1 + msg->retries,
29860 &port->counter_group[CM_XMIT].counter[attr_index]);
29861 if (msg->retries)
29862 - atomic_long_add(msg->retries,
29863 + atomic_long_add_unchecked(msg->retries,
29864 &port->counter_group[CM_XMIT_RETRIES].
29865 counter[attr_index]);
29866
29867 @@ -3430,7 +3430,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
29868 }
29869
29870 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
29871 - atomic_long_inc(&port->counter_group[CM_RECV].
29872 + atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
29873 counter[attr_id - CM_ATTR_ID_OFFSET]);
29874
29875 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
29876 @@ -3635,7 +3635,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
29877 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
29878
29879 return sprintf(buf, "%ld\n",
29880 - atomic_long_read(&group->counter[cm_attr->index]));
29881 + atomic_long_read_unchecked(&group->counter[cm_attr->index]));
29882 }
29883
29884 static const struct sysfs_ops cm_counter_ops = {
29885 diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
29886 index 176c8f9..2627b62 100644
29887 --- a/drivers/infiniband/core/fmr_pool.c
29888 +++ b/drivers/infiniband/core/fmr_pool.c
29889 @@ -98,8 +98,8 @@ struct ib_fmr_pool {
29890
29891 struct task_struct *thread;
29892
29893 - atomic_t req_ser;
29894 - atomic_t flush_ser;
29895 + atomic_unchecked_t req_ser;
29896 + atomic_unchecked_t flush_ser;
29897
29898 wait_queue_head_t force_wait;
29899 };
29900 @@ -180,10 +180,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
29901 struct ib_fmr_pool *pool = pool_ptr;
29902
29903 do {
29904 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
29905 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
29906 ib_fmr_batch_release(pool);
29907
29908 - atomic_inc(&pool->flush_ser);
29909 + atomic_inc_unchecked(&pool->flush_ser);
29910 wake_up_interruptible(&pool->force_wait);
29911
29912 if (pool->flush_function)
29913 @@ -191,7 +191,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
29914 }
29915
29916 set_current_state(TASK_INTERRUPTIBLE);
29917 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
29918 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
29919 !kthread_should_stop())
29920 schedule();
29921 __set_current_state(TASK_RUNNING);
29922 @@ -283,8 +283,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
29923 pool->dirty_watermark = params->dirty_watermark;
29924 pool->dirty_len = 0;
29925 spin_lock_init(&pool->pool_lock);
29926 - atomic_set(&pool->req_ser, 0);
29927 - atomic_set(&pool->flush_ser, 0);
29928 + atomic_set_unchecked(&pool->req_ser, 0);
29929 + atomic_set_unchecked(&pool->flush_ser, 0);
29930 init_waitqueue_head(&pool->force_wait);
29931
29932 pool->thread = kthread_run(ib_fmr_cleanup_thread,
29933 @@ -412,11 +412,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
29934 }
29935 spin_unlock_irq(&pool->pool_lock);
29936
29937 - serial = atomic_inc_return(&pool->req_ser);
29938 + serial = atomic_inc_return_unchecked(&pool->req_ser);
29939 wake_up_process(pool->thread);
29940
29941 if (wait_event_interruptible(pool->force_wait,
29942 - atomic_read(&pool->flush_ser) - serial >= 0))
29943 + atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
29944 return -EINTR;
29945
29946 return 0;
29947 @@ -526,7 +526,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
29948 } else {
29949 list_add_tail(&fmr->list, &pool->dirty_list);
29950 if (++pool->dirty_len >= pool->dirty_watermark) {
29951 - atomic_inc(&pool->req_ser);
29952 + atomic_inc_unchecked(&pool->req_ser);
29953 wake_up_process(pool->thread);
29954 }
29955 }
29956 diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
29957 index 40c8353..946b0e4 100644
29958 --- a/drivers/infiniband/hw/cxgb4/mem.c
29959 +++ b/drivers/infiniband/hw/cxgb4/mem.c
29960 @@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
29961 int err;
29962 struct fw_ri_tpte tpt;
29963 u32 stag_idx;
29964 - static atomic_t key;
29965 + static atomic_unchecked_t key;
29966
29967 if (c4iw_fatal_error(rdev))
29968 return -EIO;
29969 @@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
29970 &rdev->resource.tpt_fifo_lock);
29971 if (!stag_idx)
29972 return -ENOMEM;
29973 - *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
29974 + *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
29975 }
29976 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
29977 __func__, stag_state, type, pdid, stag_idx);
29978 diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
29979 index 79b3dbc..96e5fcc 100644
29980 --- a/drivers/infiniband/hw/ipath/ipath_rc.c
29981 +++ b/drivers/infiniband/hw/ipath/ipath_rc.c
29982 @@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
29983 struct ib_atomic_eth *ateth;
29984 struct ipath_ack_entry *e;
29985 u64 vaddr;
29986 - atomic64_t *maddr;
29987 + atomic64_unchecked_t *maddr;
29988 u64 sdata;
29989 u32 rkey;
29990 u8 next;
29991 @@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
29992 IB_ACCESS_REMOTE_ATOMIC)))
29993 goto nack_acc_unlck;
29994 /* Perform atomic OP and save result. */
29995 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
29996 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
29997 sdata = be64_to_cpu(ateth->swap_data);
29998 e = &qp->s_ack_queue[qp->r_head_ack_queue];
29999 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
30000 - (u64) atomic64_add_return(sdata, maddr) - sdata :
30001 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
30002 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
30003 be64_to_cpu(ateth->compare_data),
30004 sdata);
30005 diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
30006 index 1f95bba..9530f87 100644
30007 --- a/drivers/infiniband/hw/ipath/ipath_ruc.c
30008 +++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
30009 @@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
30010 unsigned long flags;
30011 struct ib_wc wc;
30012 u64 sdata;
30013 - atomic64_t *maddr;
30014 + atomic64_unchecked_t *maddr;
30015 enum ib_wc_status send_status;
30016
30017 /*
30018 @@ -382,11 +382,11 @@ again:
30019 IB_ACCESS_REMOTE_ATOMIC)))
30020 goto acc_err;
30021 /* Perform atomic OP and save result. */
30022 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
30023 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
30024 sdata = wqe->wr.wr.atomic.compare_add;
30025 *(u64 *) sqp->s_sge.sge.vaddr =
30026 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
30027 - (u64) atomic64_add_return(sdata, maddr) - sdata :
30028 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
30029 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
30030 sdata, wqe->wr.wr.atomic.swap);
30031 goto send_comp;
30032 diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
30033 index 5965b3d..16817fb 100644
30034 --- a/drivers/infiniband/hw/nes/nes.c
30035 +++ b/drivers/infiniband/hw/nes/nes.c
30036 @@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
30037 LIST_HEAD(nes_adapter_list);
30038 static LIST_HEAD(nes_dev_list);
30039
30040 -atomic_t qps_destroyed;
30041 +atomic_unchecked_t qps_destroyed;
30042
30043 static unsigned int ee_flsh_adapter;
30044 static unsigned int sysfs_nonidx_addr;
30045 @@ -272,7 +272,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
30046 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
30047 struct nes_adapter *nesadapter = nesdev->nesadapter;
30048
30049 - atomic_inc(&qps_destroyed);
30050 + atomic_inc_unchecked(&qps_destroyed);
30051
30052 /* Free the control structures */
30053
30054 diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
30055 index 568b4f1..5ea3eff 100644
30056 --- a/drivers/infiniband/hw/nes/nes.h
30057 +++ b/drivers/infiniband/hw/nes/nes.h
30058 @@ -178,17 +178,17 @@ extern unsigned int nes_debug_level;
30059 extern unsigned int wqm_quanta;
30060 extern struct list_head nes_adapter_list;
30061
30062 -extern atomic_t cm_connects;
30063 -extern atomic_t cm_accepts;
30064 -extern atomic_t cm_disconnects;
30065 -extern atomic_t cm_closes;
30066 -extern atomic_t cm_connecteds;
30067 -extern atomic_t cm_connect_reqs;
30068 -extern atomic_t cm_rejects;
30069 -extern atomic_t mod_qp_timouts;
30070 -extern atomic_t qps_created;
30071 -extern atomic_t qps_destroyed;
30072 -extern atomic_t sw_qps_destroyed;
30073 +extern atomic_unchecked_t cm_connects;
30074 +extern atomic_unchecked_t cm_accepts;
30075 +extern atomic_unchecked_t cm_disconnects;
30076 +extern atomic_unchecked_t cm_closes;
30077 +extern atomic_unchecked_t cm_connecteds;
30078 +extern atomic_unchecked_t cm_connect_reqs;
30079 +extern atomic_unchecked_t cm_rejects;
30080 +extern atomic_unchecked_t mod_qp_timouts;
30081 +extern atomic_unchecked_t qps_created;
30082 +extern atomic_unchecked_t qps_destroyed;
30083 +extern atomic_unchecked_t sw_qps_destroyed;
30084 extern u32 mh_detected;
30085 extern u32 mh_pauses_sent;
30086 extern u32 cm_packets_sent;
30087 @@ -197,16 +197,16 @@ extern u32 cm_packets_created;
30088 extern u32 cm_packets_received;
30089 extern u32 cm_packets_dropped;
30090 extern u32 cm_packets_retrans;
30091 -extern atomic_t cm_listens_created;
30092 -extern atomic_t cm_listens_destroyed;
30093 +extern atomic_unchecked_t cm_listens_created;
30094 +extern atomic_unchecked_t cm_listens_destroyed;
30095 extern u32 cm_backlog_drops;
30096 -extern atomic_t cm_loopbacks;
30097 -extern atomic_t cm_nodes_created;
30098 -extern atomic_t cm_nodes_destroyed;
30099 -extern atomic_t cm_accel_dropped_pkts;
30100 -extern atomic_t cm_resets_recvd;
30101 -extern atomic_t pau_qps_created;
30102 -extern atomic_t pau_qps_destroyed;
30103 +extern atomic_unchecked_t cm_loopbacks;
30104 +extern atomic_unchecked_t cm_nodes_created;
30105 +extern atomic_unchecked_t cm_nodes_destroyed;
30106 +extern atomic_unchecked_t cm_accel_dropped_pkts;
30107 +extern atomic_unchecked_t cm_resets_recvd;
30108 +extern atomic_unchecked_t pau_qps_created;
30109 +extern atomic_unchecked_t pau_qps_destroyed;
30110
30111 extern u32 int_mod_timer_init;
30112 extern u32 int_mod_cq_depth_256;
30113 diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
30114 index 0a52d72..0642f36 100644
30115 --- a/drivers/infiniband/hw/nes/nes_cm.c
30116 +++ b/drivers/infiniband/hw/nes/nes_cm.c
30117 @@ -68,14 +68,14 @@ u32 cm_packets_dropped;
30118 u32 cm_packets_retrans;
30119 u32 cm_packets_created;
30120 u32 cm_packets_received;
30121 -atomic_t cm_listens_created;
30122 -atomic_t cm_listens_destroyed;
30123 +atomic_unchecked_t cm_listens_created;
30124 +atomic_unchecked_t cm_listens_destroyed;
30125 u32 cm_backlog_drops;
30126 -atomic_t cm_loopbacks;
30127 -atomic_t cm_nodes_created;
30128 -atomic_t cm_nodes_destroyed;
30129 -atomic_t cm_accel_dropped_pkts;
30130 -atomic_t cm_resets_recvd;
30131 +atomic_unchecked_t cm_loopbacks;
30132 +atomic_unchecked_t cm_nodes_created;
30133 +atomic_unchecked_t cm_nodes_destroyed;
30134 +atomic_unchecked_t cm_accel_dropped_pkts;
30135 +atomic_unchecked_t cm_resets_recvd;
30136
30137 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
30138 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
30139 @@ -148,13 +148,13 @@ static struct nes_cm_ops nes_cm_api = {
30140
30141 static struct nes_cm_core *g_cm_core;
30142
30143 -atomic_t cm_connects;
30144 -atomic_t cm_accepts;
30145 -atomic_t cm_disconnects;
30146 -atomic_t cm_closes;
30147 -atomic_t cm_connecteds;
30148 -atomic_t cm_connect_reqs;
30149 -atomic_t cm_rejects;
30150 +atomic_unchecked_t cm_connects;
30151 +atomic_unchecked_t cm_accepts;
30152 +atomic_unchecked_t cm_disconnects;
30153 +atomic_unchecked_t cm_closes;
30154 +atomic_unchecked_t cm_connecteds;
30155 +atomic_unchecked_t cm_connect_reqs;
30156 +atomic_unchecked_t cm_rejects;
30157
30158 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
30159 {
30160 @@ -1271,7 +1271,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
30161 kfree(listener);
30162 listener = NULL;
30163 ret = 0;
30164 - atomic_inc(&cm_listens_destroyed);
30165 + atomic_inc_unchecked(&cm_listens_destroyed);
30166 } else {
30167 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
30168 }
30169 @@ -1473,7 +1473,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
30170 cm_node->rem_mac);
30171
30172 add_hte_node(cm_core, cm_node);
30173 - atomic_inc(&cm_nodes_created);
30174 + atomic_inc_unchecked(&cm_nodes_created);
30175
30176 return cm_node;
30177 }
30178 @@ -1531,7 +1531,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
30179 }
30180
30181 atomic_dec(&cm_core->node_cnt);
30182 - atomic_inc(&cm_nodes_destroyed);
30183 + atomic_inc_unchecked(&cm_nodes_destroyed);
30184 nesqp = cm_node->nesqp;
30185 if (nesqp) {
30186 nesqp->cm_node = NULL;
30187 @@ -1595,7 +1595,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
30188
30189 static void drop_packet(struct sk_buff *skb)
30190 {
30191 - atomic_inc(&cm_accel_dropped_pkts);
30192 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
30193 dev_kfree_skb_any(skb);
30194 }
30195
30196 @@ -1658,7 +1658,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
30197 {
30198
30199 int reset = 0; /* whether to send reset in case of err.. */
30200 - atomic_inc(&cm_resets_recvd);
30201 + atomic_inc_unchecked(&cm_resets_recvd);
30202 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
30203 " refcnt=%d\n", cm_node, cm_node->state,
30204 atomic_read(&cm_node->ref_count));
30205 @@ -2299,7 +2299,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
30206 rem_ref_cm_node(cm_node->cm_core, cm_node);
30207 return NULL;
30208 }
30209 - atomic_inc(&cm_loopbacks);
30210 + atomic_inc_unchecked(&cm_loopbacks);
30211 loopbackremotenode->loopbackpartner = cm_node;
30212 loopbackremotenode->tcp_cntxt.rcv_wscale =
30213 NES_CM_DEFAULT_RCV_WND_SCALE;
30214 @@ -2574,7 +2574,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
30215 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
30216 else {
30217 rem_ref_cm_node(cm_core, cm_node);
30218 - atomic_inc(&cm_accel_dropped_pkts);
30219 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
30220 dev_kfree_skb_any(skb);
30221 }
30222 break;
30223 @@ -2880,7 +2880,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
30224
30225 if ((cm_id) && (cm_id->event_handler)) {
30226 if (issue_disconn) {
30227 - atomic_inc(&cm_disconnects);
30228 + atomic_inc_unchecked(&cm_disconnects);
30229 cm_event.event = IW_CM_EVENT_DISCONNECT;
30230 cm_event.status = disconn_status;
30231 cm_event.local_addr = cm_id->local_addr;
30232 @@ -2902,7 +2902,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
30233 }
30234
30235 if (issue_close) {
30236 - atomic_inc(&cm_closes);
30237 + atomic_inc_unchecked(&cm_closes);
30238 nes_disconnect(nesqp, 1);
30239
30240 cm_id->provider_data = nesqp;
30241 @@ -3038,7 +3038,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
30242
30243 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
30244 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
30245 - atomic_inc(&cm_accepts);
30246 + atomic_inc_unchecked(&cm_accepts);
30247
30248 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
30249 netdev_refcnt_read(nesvnic->netdev));
30250 @@ -3240,7 +3240,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
30251 struct nes_cm_core *cm_core;
30252 u8 *start_buff;
30253
30254 - atomic_inc(&cm_rejects);
30255 + atomic_inc_unchecked(&cm_rejects);
30256 cm_node = (struct nes_cm_node *)cm_id->provider_data;
30257 loopback = cm_node->loopbackpartner;
30258 cm_core = cm_node->cm_core;
30259 @@ -3300,7 +3300,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
30260 ntohl(cm_id->local_addr.sin_addr.s_addr),
30261 ntohs(cm_id->local_addr.sin_port));
30262
30263 - atomic_inc(&cm_connects);
30264 + atomic_inc_unchecked(&cm_connects);
30265 nesqp->active_conn = 1;
30266
30267 /* cache the cm_id in the qp */
30268 @@ -3406,7 +3406,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
30269 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
30270 return err;
30271 }
30272 - atomic_inc(&cm_listens_created);
30273 + atomic_inc_unchecked(&cm_listens_created);
30274 }
30275
30276 cm_id->add_ref(cm_id);
30277 @@ -3507,7 +3507,7 @@ static void cm_event_connected(struct nes_cm_event *event)
30278
30279 if (nesqp->destroyed)
30280 return;
30281 - atomic_inc(&cm_connecteds);
30282 + atomic_inc_unchecked(&cm_connecteds);
30283 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
30284 " local port 0x%04X. jiffies = %lu.\n",
30285 nesqp->hwqp.qp_id,
30286 @@ -3694,7 +3694,7 @@ static void cm_event_reset(struct nes_cm_event *event)
30287
30288 cm_id->add_ref(cm_id);
30289 ret = cm_id->event_handler(cm_id, &cm_event);
30290 - atomic_inc(&cm_closes);
30291 + atomic_inc_unchecked(&cm_closes);
30292 cm_event.event = IW_CM_EVENT_CLOSE;
30293 cm_event.status = 0;
30294 cm_event.provider_data = cm_id->provider_data;
30295 @@ -3730,7 +3730,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
30296 return;
30297 cm_id = cm_node->cm_id;
30298
30299 - atomic_inc(&cm_connect_reqs);
30300 + atomic_inc_unchecked(&cm_connect_reqs);
30301 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
30302 cm_node, cm_id, jiffies);
30303
30304 @@ -3770,7 +3770,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
30305 return;
30306 cm_id = cm_node->cm_id;
30307
30308 - atomic_inc(&cm_connect_reqs);
30309 + atomic_inc_unchecked(&cm_connect_reqs);
30310 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
30311 cm_node, cm_id, jiffies);
30312
30313 diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
30314 index b3b2a24..7bfaf1e 100644
30315 --- a/drivers/infiniband/hw/nes/nes_mgt.c
30316 +++ b/drivers/infiniband/hw/nes/nes_mgt.c
30317 @@ -40,8 +40,8 @@
30318 #include "nes.h"
30319 #include "nes_mgt.h"
30320
30321 -atomic_t pau_qps_created;
30322 -atomic_t pau_qps_destroyed;
30323 +atomic_unchecked_t pau_qps_created;
30324 +atomic_unchecked_t pau_qps_destroyed;
30325
30326 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
30327 {
30328 @@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
30329 {
30330 struct sk_buff *skb;
30331 unsigned long flags;
30332 - atomic_inc(&pau_qps_destroyed);
30333 + atomic_inc_unchecked(&pau_qps_destroyed);
30334
30335 /* Free packets that have not yet been forwarded */
30336 /* Lock is acquired by skb_dequeue when removing the skb */
30337 @@ -812,7 +812,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
30338 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
30339 skb_queue_head_init(&nesqp->pau_list);
30340 spin_lock_init(&nesqp->pau_lock);
30341 - atomic_inc(&pau_qps_created);
30342 + atomic_inc_unchecked(&pau_qps_created);
30343 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
30344 }
30345
30346 diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
30347 index c00d2f3..8834298 100644
30348 --- a/drivers/infiniband/hw/nes/nes_nic.c
30349 +++ b/drivers/infiniband/hw/nes/nes_nic.c
30350 @@ -1277,39 +1277,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
30351 target_stat_values[++index] = mh_detected;
30352 target_stat_values[++index] = mh_pauses_sent;
30353 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
30354 - target_stat_values[++index] = atomic_read(&cm_connects);
30355 - target_stat_values[++index] = atomic_read(&cm_accepts);
30356 - target_stat_values[++index] = atomic_read(&cm_disconnects);
30357 - target_stat_values[++index] = atomic_read(&cm_connecteds);
30358 - target_stat_values[++index] = atomic_read(&cm_connect_reqs);
30359 - target_stat_values[++index] = atomic_read(&cm_rejects);
30360 - target_stat_values[++index] = atomic_read(&mod_qp_timouts);
30361 - target_stat_values[++index] = atomic_read(&qps_created);
30362 - target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
30363 - target_stat_values[++index] = atomic_read(&qps_destroyed);
30364 - target_stat_values[++index] = atomic_read(&cm_closes);
30365 + target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
30366 + target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
30367 + target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
30368 + target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
30369 + target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
30370 + target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
30371 + target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
30372 + target_stat_values[++index] = atomic_read_unchecked(&qps_created);
30373 + target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
30374 + target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
30375 + target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
30376 target_stat_values[++index] = cm_packets_sent;
30377 target_stat_values[++index] = cm_packets_bounced;
30378 target_stat_values[++index] = cm_packets_created;
30379 target_stat_values[++index] = cm_packets_received;
30380 target_stat_values[++index] = cm_packets_dropped;
30381 target_stat_values[++index] = cm_packets_retrans;
30382 - target_stat_values[++index] = atomic_read(&cm_listens_created);
30383 - target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
30384 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
30385 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
30386 target_stat_values[++index] = cm_backlog_drops;
30387 - target_stat_values[++index] = atomic_read(&cm_loopbacks);
30388 - target_stat_values[++index] = atomic_read(&cm_nodes_created);
30389 - target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
30390 - target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
30391 - target_stat_values[++index] = atomic_read(&cm_resets_recvd);
30392 + target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
30393 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
30394 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
30395 + target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
30396 + target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
30397 target_stat_values[++index] = nesadapter->free_4kpbl;
30398 target_stat_values[++index] = nesadapter->free_256pbl;
30399 target_stat_values[++index] = int_mod_timer_init;
30400 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
30401 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
30402 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
30403 - target_stat_values[++index] = atomic_read(&pau_qps_created);
30404 - target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
30405 + target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
30406 + target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
30407 }
30408
30409 /**
30410 diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
30411 index 5095bc4..41e8fff 100644
30412 --- a/drivers/infiniband/hw/nes/nes_verbs.c
30413 +++ b/drivers/infiniband/hw/nes/nes_verbs.c
30414 @@ -46,9 +46,9 @@
30415
30416 #include <rdma/ib_umem.h>
30417
30418 -atomic_t mod_qp_timouts;
30419 -atomic_t qps_created;
30420 -atomic_t sw_qps_destroyed;
30421 +atomic_unchecked_t mod_qp_timouts;
30422 +atomic_unchecked_t qps_created;
30423 +atomic_unchecked_t sw_qps_destroyed;
30424
30425 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
30426
30427 @@ -1131,7 +1131,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
30428 if (init_attr->create_flags)
30429 return ERR_PTR(-EINVAL);
30430
30431 - atomic_inc(&qps_created);
30432 + atomic_inc_unchecked(&qps_created);
30433 switch (init_attr->qp_type) {
30434 case IB_QPT_RC:
30435 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
30436 @@ -1460,7 +1460,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
30437 struct iw_cm_event cm_event;
30438 int ret = 0;
30439
30440 - atomic_inc(&sw_qps_destroyed);
30441 + atomic_inc_unchecked(&sw_qps_destroyed);
30442 nesqp->destroyed = 1;
30443
30444 /* Blow away the connection if it exists. */
30445 diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
30446 index b881bdc..c2e360c 100644
30447 --- a/drivers/infiniband/hw/qib/qib.h
30448 +++ b/drivers/infiniband/hw/qib/qib.h
30449 @@ -51,6 +51,7 @@
30450 #include <linux/completion.h>
30451 #include <linux/kref.h>
30452 #include <linux/sched.h>
30453 +#include <linux/slab.h>
30454
30455 #include "qib_common.h"
30456 #include "qib_verbs.h"
30457 diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
30458 index c351aa4..e6967c2 100644
30459 --- a/drivers/input/gameport/gameport.c
30460 +++ b/drivers/input/gameport/gameport.c
30461 @@ -488,14 +488,14 @@ EXPORT_SYMBOL(gameport_set_phys);
30462 */
30463 static void gameport_init_port(struct gameport *gameport)
30464 {
30465 - static atomic_t gameport_no = ATOMIC_INIT(0);
30466 + static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
30467
30468 __module_get(THIS_MODULE);
30469
30470 mutex_init(&gameport->drv_mutex);
30471 device_initialize(&gameport->dev);
30472 dev_set_name(&gameport->dev, "gameport%lu",
30473 - (unsigned long)atomic_inc_return(&gameport_no) - 1);
30474 + (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
30475 gameport->dev.bus = &gameport_bus;
30476 gameport->dev.release = gameport_release_port;
30477 if (gameport->parent)
30478 diff --git a/drivers/input/input.c b/drivers/input/input.c
30479 index da38d97..2aa0b79 100644
30480 --- a/drivers/input/input.c
30481 +++ b/drivers/input/input.c
30482 @@ -1814,7 +1814,7 @@ static void input_cleanse_bitmasks(struct input_dev *dev)
30483 */
30484 int input_register_device(struct input_dev *dev)
30485 {
30486 - static atomic_t input_no = ATOMIC_INIT(0);
30487 + static atomic_unchecked_t input_no = ATOMIC_INIT(0);
30488 struct input_handler *handler;
30489 const char *path;
30490 int error;
30491 @@ -1851,7 +1851,7 @@ int input_register_device(struct input_dev *dev)
30492 dev->setkeycode = input_default_setkeycode;
30493
30494 dev_set_name(&dev->dev, "input%ld",
30495 - (unsigned long) atomic_inc_return(&input_no) - 1);
30496 + (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
30497
30498 error = device_add(&dev->dev);
30499 if (error)
30500 diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
30501 index b8d8611..7a4a04b 100644
30502 --- a/drivers/input/joystick/sidewinder.c
30503 +++ b/drivers/input/joystick/sidewinder.c
30504 @@ -30,6 +30,7 @@
30505 #include <linux/kernel.h>
30506 #include <linux/module.h>
30507 #include <linux/slab.h>
30508 +#include <linux/sched.h>
30509 #include <linux/init.h>
30510 #include <linux/input.h>
30511 #include <linux/gameport.h>
30512 diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
30513 index d728875..844c89b 100644
30514 --- a/drivers/input/joystick/xpad.c
30515 +++ b/drivers/input/joystick/xpad.c
30516 @@ -710,7 +710,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
30517
30518 static int xpad_led_probe(struct usb_xpad *xpad)
30519 {
30520 - static atomic_t led_seq = ATOMIC_INIT(0);
30521 + static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
30522 long led_no;
30523 struct xpad_led *led;
30524 struct led_classdev *led_cdev;
30525 @@ -723,7 +723,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
30526 if (!led)
30527 return -ENOMEM;
30528
30529 - led_no = (long)atomic_inc_return(&led_seq) - 1;
30530 + led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
30531
30532 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
30533 led->xpad = xpad;
30534 diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
30535 index 0110b5a..d3ad144 100644
30536 --- a/drivers/input/mousedev.c
30537 +++ b/drivers/input/mousedev.c
30538 @@ -763,7 +763,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
30539
30540 spin_unlock_irq(&client->packet_lock);
30541
30542 - if (copy_to_user(buffer, data, count))
30543 + if (count > sizeof(data) || copy_to_user(buffer, data, count))
30544 return -EFAULT;
30545
30546 return count;
30547 diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
30548 index ba70058..571d25d 100644
30549 --- a/drivers/input/serio/serio.c
30550 +++ b/drivers/input/serio/serio.c
30551 @@ -497,7 +497,7 @@ static void serio_release_port(struct device *dev)
30552 */
30553 static void serio_init_port(struct serio *serio)
30554 {
30555 - static atomic_t serio_no = ATOMIC_INIT(0);
30556 + static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
30557
30558 __module_get(THIS_MODULE);
30559
30560 @@ -508,7 +508,7 @@ static void serio_init_port(struct serio *serio)
30561 mutex_init(&serio->drv_mutex);
30562 device_initialize(&serio->dev);
30563 dev_set_name(&serio->dev, "serio%ld",
30564 - (long)atomic_inc_return(&serio_no) - 1);
30565 + (long)atomic_inc_return_unchecked(&serio_no) - 1);
30566 serio->dev.bus = &serio_bus;
30567 serio->dev.release = serio_release_port;
30568 serio->dev.groups = serio_device_attr_groups;
30569 diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
30570 index e44933d..9ba484a 100644
30571 --- a/drivers/isdn/capi/capi.c
30572 +++ b/drivers/isdn/capi/capi.c
30573 @@ -83,8 +83,8 @@ struct capiminor {
30574
30575 struct capi20_appl *ap;
30576 u32 ncci;
30577 - atomic_t datahandle;
30578 - atomic_t msgid;
30579 + atomic_unchecked_t datahandle;
30580 + atomic_unchecked_t msgid;
30581
30582 struct tty_port port;
30583 int ttyinstop;
30584 @@ -397,7 +397,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
30585 capimsg_setu16(s, 2, mp->ap->applid);
30586 capimsg_setu8 (s, 4, CAPI_DATA_B3);
30587 capimsg_setu8 (s, 5, CAPI_RESP);
30588 - capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
30589 + capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
30590 capimsg_setu32(s, 8, mp->ncci);
30591 capimsg_setu16(s, 12, datahandle);
30592 }
30593 @@ -518,14 +518,14 @@ static void handle_minor_send(struct capiminor *mp)
30594 mp->outbytes -= len;
30595 spin_unlock_bh(&mp->outlock);
30596
30597 - datahandle = atomic_inc_return(&mp->datahandle);
30598 + datahandle = atomic_inc_return_unchecked(&mp->datahandle);
30599 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
30600 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
30601 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
30602 capimsg_setu16(skb->data, 2, mp->ap->applid);
30603 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
30604 capimsg_setu8 (skb->data, 5, CAPI_REQ);
30605 - capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
30606 + capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
30607 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
30608 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
30609 capimsg_setu16(skb->data, 16, len); /* Data length */
30610 diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
30611 index db621db..825ea1a 100644
30612 --- a/drivers/isdn/gigaset/common.c
30613 +++ b/drivers/isdn/gigaset/common.c
30614 @@ -723,7 +723,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
30615 cs->commands_pending = 0;
30616 cs->cur_at_seq = 0;
30617 cs->gotfwver = -1;
30618 - cs->open_count = 0;
30619 + local_set(&cs->open_count, 0);
30620 cs->dev = NULL;
30621 cs->tty = NULL;
30622 cs->tty_dev = NULL;
30623 diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
30624 index 212efaf..f187c6b 100644
30625 --- a/drivers/isdn/gigaset/gigaset.h
30626 +++ b/drivers/isdn/gigaset/gigaset.h
30627 @@ -35,6 +35,7 @@
30628 #include <linux/tty_driver.h>
30629 #include <linux/list.h>
30630 #include <linux/atomic.h>
30631 +#include <asm/local.h>
30632
30633 #define GIG_VERSION {0, 5, 0, 0}
30634 #define GIG_COMPAT {0, 4, 0, 0}
30635 @@ -433,7 +434,7 @@ struct cardstate {
30636 spinlock_t cmdlock;
30637 unsigned curlen, cmdbytes;
30638
30639 - unsigned open_count;
30640 + local_t open_count;
30641 struct tty_struct *tty;
30642 struct tasklet_struct if_wake_tasklet;
30643 unsigned control_state;
30644 diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
30645 index ee0a549..a7c9798 100644
30646 --- a/drivers/isdn/gigaset/interface.c
30647 +++ b/drivers/isdn/gigaset/interface.c
30648 @@ -163,9 +163,7 @@ static int if_open(struct tty_struct *tty, struct file *filp)
30649 }
30650 tty->driver_data = cs;
30651
30652 - ++cs->open_count;
30653 -
30654 - if (cs->open_count == 1) {
30655 + if (local_inc_return(&cs->open_count) == 1) {
30656 spin_lock_irqsave(&cs->lock, flags);
30657 cs->tty = tty;
30658 spin_unlock_irqrestore(&cs->lock, flags);
30659 @@ -193,10 +191,10 @@ static void if_close(struct tty_struct *tty, struct file *filp)
30660
30661 if (!cs->connected)
30662 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
30663 - else if (!cs->open_count)
30664 + else if (!local_read(&cs->open_count))
30665 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30666 else {
30667 - if (!--cs->open_count) {
30668 + if (!local_dec_return(&cs->open_count)) {
30669 spin_lock_irqsave(&cs->lock, flags);
30670 cs->tty = NULL;
30671 spin_unlock_irqrestore(&cs->lock, flags);
30672 @@ -231,7 +229,7 @@ static int if_ioctl(struct tty_struct *tty,
30673 if (!cs->connected) {
30674 gig_dbg(DEBUG_IF, "not connected");
30675 retval = -ENODEV;
30676 - } else if (!cs->open_count)
30677 + } else if (!local_read(&cs->open_count))
30678 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30679 else {
30680 retval = 0;
30681 @@ -361,7 +359,7 @@ static int if_write(struct tty_struct *tty, const unsigned char *buf, int count)
30682 retval = -ENODEV;
30683 goto done;
30684 }
30685 - if (!cs->open_count) {
30686 + if (!local_read(&cs->open_count)) {
30687 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30688 retval = -ENODEV;
30689 goto done;
30690 @@ -414,7 +412,7 @@ static int if_write_room(struct tty_struct *tty)
30691 if (!cs->connected) {
30692 gig_dbg(DEBUG_IF, "not connected");
30693 retval = -ENODEV;
30694 - } else if (!cs->open_count)
30695 + } else if (!local_read(&cs->open_count))
30696 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30697 else if (cs->mstate != MS_LOCKED) {
30698 dev_warn(cs->dev, "can't write to unlocked device\n");
30699 @@ -444,7 +442,7 @@ static int if_chars_in_buffer(struct tty_struct *tty)
30700
30701 if (!cs->connected)
30702 gig_dbg(DEBUG_IF, "not connected");
30703 - else if (!cs->open_count)
30704 + else if (!local_read(&cs->open_count))
30705 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30706 else if (cs->mstate != MS_LOCKED)
30707 dev_warn(cs->dev, "can't write to unlocked device\n");
30708 @@ -472,7 +470,7 @@ static void if_throttle(struct tty_struct *tty)
30709
30710 if (!cs->connected)
30711 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
30712 - else if (!cs->open_count)
30713 + else if (!local_read(&cs->open_count))
30714 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30715 else
30716 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
30717 @@ -496,7 +494,7 @@ static void if_unthrottle(struct tty_struct *tty)
30718
30719 if (!cs->connected)
30720 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
30721 - else if (!cs->open_count)
30722 + else if (!local_read(&cs->open_count))
30723 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30724 else
30725 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
30726 @@ -527,7 +525,7 @@ static void if_set_termios(struct tty_struct *tty, struct ktermios *old)
30727 goto out;
30728 }
30729
30730 - if (!cs->open_count) {
30731 + if (!local_read(&cs->open_count)) {
30732 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30733 goto out;
30734 }
30735 diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
30736 index 2a57da59..e7a12ed 100644
30737 --- a/drivers/isdn/hardware/avm/b1.c
30738 +++ b/drivers/isdn/hardware/avm/b1.c
30739 @@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart * t4file)
30740 }
30741 if (left) {
30742 if (t4file->user) {
30743 - if (copy_from_user(buf, dp, left))
30744 + if (left > sizeof buf || copy_from_user(buf, dp, left))
30745 return -EFAULT;
30746 } else {
30747 memcpy(buf, dp, left);
30748 @@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart * config)
30749 }
30750 if (left) {
30751 if (config->user) {
30752 - if (copy_from_user(buf, dp, left))
30753 + if (left > sizeof buf || copy_from_user(buf, dp, left))
30754 return -EFAULT;
30755 } else {
30756 memcpy(buf, dp, left);
30757 diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h
30758 index 85784a7..a19ca98 100644
30759 --- a/drivers/isdn/hardware/eicon/divasync.h
30760 +++ b/drivers/isdn/hardware/eicon/divasync.h
30761 @@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
30762 } diva_didd_add_adapter_t;
30763 typedef struct _diva_didd_remove_adapter {
30764 IDI_CALL p_request;
30765 -} diva_didd_remove_adapter_t;
30766 +} __no_const diva_didd_remove_adapter_t;
30767 typedef struct _diva_didd_read_adapter_array {
30768 void * buffer;
30769 dword length;
30770 diff --git a/drivers/isdn/hardware/eicon/xdi_adapter.h b/drivers/isdn/hardware/eicon/xdi_adapter.h
30771 index a3bd163..8956575 100644
30772 --- a/drivers/isdn/hardware/eicon/xdi_adapter.h
30773 +++ b/drivers/isdn/hardware/eicon/xdi_adapter.h
30774 @@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
30775 typedef struct _diva_os_idi_adapter_interface {
30776 diva_init_card_proc_t cleanup_adapter_proc;
30777 diva_cmd_card_proc_t cmd_proc;
30778 -} diva_os_idi_adapter_interface_t;
30779 +} __no_const diva_os_idi_adapter_interface_t;
30780
30781 typedef struct _diva_os_xdi_adapter {
30782 struct list_head link;
30783 diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
30784 index 2339d73..802ab87 100644
30785 --- a/drivers/isdn/i4l/isdn_net.c
30786 +++ b/drivers/isdn/i4l/isdn_net.c
30787 @@ -1901,7 +1901,7 @@ static int isdn_net_header(struct sk_buff *skb, struct net_device *dev,
30788 {
30789 isdn_net_local *lp = netdev_priv(dev);
30790 unsigned char *p;
30791 - ushort len = 0;
30792 + int len = 0;
30793
30794 switch (lp->p_encap) {
30795 case ISDN_NET_ENCAP_ETHER:
30796 diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
30797 index 1f355bb..43f1fea 100644
30798 --- a/drivers/isdn/icn/icn.c
30799 +++ b/drivers/isdn/icn/icn.c
30800 @@ -1045,7 +1045,7 @@ icn_writecmd(const u_char * buf, int len, int user, icn_card * card)
30801 if (count > len)
30802 count = len;
30803 if (user) {
30804 - if (copy_from_user(msg, buf, count))
30805 + if (count > sizeof msg || copy_from_user(msg, buf, count))
30806 return -EFAULT;
30807 } else
30808 memcpy(msg, buf, count);
30809 diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
30810 index b5fdcb7..5b6c59f 100644
30811 --- a/drivers/lguest/core.c
30812 +++ b/drivers/lguest/core.c
30813 @@ -92,9 +92,17 @@ static __init int map_switcher(void)
30814 * it's worked so far. The end address needs +1 because __get_vm_area
30815 * allocates an extra guard page, so we need space for that.
30816 */
30817 +
30818 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
30819 + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
30820 + VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
30821 + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
30822 +#else
30823 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
30824 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
30825 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
30826 +#endif
30827 +
30828 if (!switcher_vma) {
30829 err = -ENOMEM;
30830 printk("lguest: could not map switcher pages high\n");
30831 @@ -119,7 +127,7 @@ static __init int map_switcher(void)
30832 * Now the Switcher is mapped at the right address, we can't fail!
30833 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
30834 */
30835 - memcpy(switcher_vma->addr, start_switcher_text,
30836 + memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
30837 end_switcher_text - start_switcher_text);
30838
30839 printk(KERN_INFO "lguest: mapped switcher at %p\n",
30840 diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
30841 index 65af42f..530c87a 100644
30842 --- a/drivers/lguest/x86/core.c
30843 +++ b/drivers/lguest/x86/core.c
30844 @@ -59,7 +59,7 @@ static struct {
30845 /* Offset from where switcher.S was compiled to where we've copied it */
30846 static unsigned long switcher_offset(void)
30847 {
30848 - return SWITCHER_ADDR - (unsigned long)start_switcher_text;
30849 + return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
30850 }
30851
30852 /* This cpu's struct lguest_pages. */
30853 @@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
30854 * These copies are pretty cheap, so we do them unconditionally: */
30855 /* Save the current Host top-level page directory.
30856 */
30857 +
30858 +#ifdef CONFIG_PAX_PER_CPU_PGD
30859 + pages->state.host_cr3 = read_cr3();
30860 +#else
30861 pages->state.host_cr3 = __pa(current->mm->pgd);
30862 +#endif
30863 +
30864 /*
30865 * Set up the Guest's page tables to see this CPU's pages (and no
30866 * other CPU's pages).
30867 @@ -472,7 +478,7 @@ void __init lguest_arch_host_init(void)
30868 * compiled-in switcher code and the high-mapped copy we just made.
30869 */
30870 for (i = 0; i < IDT_ENTRIES; i++)
30871 - default_idt_entries[i] += switcher_offset();
30872 + default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
30873
30874 /*
30875 * Set up the Switcher's per-cpu areas.
30876 @@ -555,7 +561,7 @@ void __init lguest_arch_host_init(void)
30877 * it will be undisturbed when we switch. To change %cs and jump we
30878 * need this structure to feed to Intel's "lcall" instruction.
30879 */
30880 - lguest_entry.offset = (long)switch_to_guest + switcher_offset();
30881 + lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
30882 lguest_entry.segment = LGUEST_CS;
30883
30884 /*
30885 diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
30886 index 40634b0..4f5855e 100644
30887 --- a/drivers/lguest/x86/switcher_32.S
30888 +++ b/drivers/lguest/x86/switcher_32.S
30889 @@ -87,6 +87,7 @@
30890 #include <asm/page.h>
30891 #include <asm/segment.h>
30892 #include <asm/lguest.h>
30893 +#include <asm/processor-flags.h>
30894
30895 // We mark the start of the code to copy
30896 // It's placed in .text tho it's never run here
30897 @@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
30898 // Changes type when we load it: damn Intel!
30899 // For after we switch over our page tables
30900 // That entry will be read-only: we'd crash.
30901 +
30902 +#ifdef CONFIG_PAX_KERNEXEC
30903 + mov %cr0, %edx
30904 + xor $X86_CR0_WP, %edx
30905 + mov %edx, %cr0
30906 +#endif
30907 +
30908 movl $(GDT_ENTRY_TSS*8), %edx
30909 ltr %dx
30910
30911 @@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
30912 // Let's clear it again for our return.
30913 // The GDT descriptor of the Host
30914 // Points to the table after two "size" bytes
30915 - movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
30916 + movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
30917 // Clear "used" from type field (byte 5, bit 2)
30918 - andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
30919 + andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
30920 +
30921 +#ifdef CONFIG_PAX_KERNEXEC
30922 + mov %cr0, %eax
30923 + xor $X86_CR0_WP, %eax
30924 + mov %eax, %cr0
30925 +#endif
30926
30927 // Once our page table's switched, the Guest is live!
30928 // The Host fades as we run this final step.
30929 @@ -295,13 +309,12 @@ deliver_to_host:
30930 // I consulted gcc, and it gave
30931 // These instructions, which I gladly credit:
30932 leal (%edx,%ebx,8), %eax
30933 - movzwl (%eax),%edx
30934 - movl 4(%eax), %eax
30935 - xorw %ax, %ax
30936 - orl %eax, %edx
30937 + movl 4(%eax), %edx
30938 + movw (%eax), %dx
30939 // Now the address of the handler's in %edx
30940 // We call it now: its "iret" drops us home.
30941 - jmp *%edx
30942 + ljmp $__KERNEL_CS, $1f
30943 +1: jmp *%edx
30944
30945 // Every interrupt can come to us here
30946 // But we must truly tell each apart.
30947 diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
30948 index 4daf9e5..b8d1d0f 100644
30949 --- a/drivers/macintosh/macio_asic.c
30950 +++ b/drivers/macintosh/macio_asic.c
30951 @@ -748,7 +748,7 @@ static void __devexit macio_pci_remove(struct pci_dev* pdev)
30952 * MacIO is matched against any Apple ID, it's probe() function
30953 * will then decide wether it applies or not
30954 */
30955 -static const struct pci_device_id __devinitdata pci_ids [] = { {
30956 +static const struct pci_device_id __devinitconst pci_ids [] = { {
30957 .vendor = PCI_VENDOR_ID_APPLE,
30958 .device = PCI_ANY_ID,
30959 .subvendor = PCI_ANY_ID,
30960 diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
30961 index 31c2dc2..a2de7a6 100644
30962 --- a/drivers/md/dm-ioctl.c
30963 +++ b/drivers/md/dm-ioctl.c
30964 @@ -1589,7 +1589,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
30965 cmd == DM_LIST_VERSIONS_CMD)
30966 return 0;
30967
30968 - if ((cmd == DM_DEV_CREATE_CMD)) {
30969 + if (cmd == DM_DEV_CREATE_CMD) {
30970 if (!*param->name) {
30971 DMWARN("name not supplied when creating device");
30972 return -EINVAL;
30973 diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
30974 index 9bfd057..01180bc 100644
30975 --- a/drivers/md/dm-raid1.c
30976 +++ b/drivers/md/dm-raid1.c
30977 @@ -40,7 +40,7 @@ enum dm_raid1_error {
30978
30979 struct mirror {
30980 struct mirror_set *ms;
30981 - atomic_t error_count;
30982 + atomic_unchecked_t error_count;
30983 unsigned long error_type;
30984 struct dm_dev *dev;
30985 sector_t offset;
30986 @@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
30987 struct mirror *m;
30988
30989 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
30990 - if (!atomic_read(&m->error_count))
30991 + if (!atomic_read_unchecked(&m->error_count))
30992 return m;
30993
30994 return NULL;
30995 @@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
30996 * simple way to tell if a device has encountered
30997 * errors.
30998 */
30999 - atomic_inc(&m->error_count);
31000 + atomic_inc_unchecked(&m->error_count);
31001
31002 if (test_and_set_bit(error_type, &m->error_type))
31003 return;
31004 @@ -408,7 +408,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
31005 struct mirror *m = get_default_mirror(ms);
31006
31007 do {
31008 - if (likely(!atomic_read(&m->error_count)))
31009 + if (likely(!atomic_read_unchecked(&m->error_count)))
31010 return m;
31011
31012 if (m-- == ms->mirror)
31013 @@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
31014 {
31015 struct mirror *default_mirror = get_default_mirror(m->ms);
31016
31017 - return !atomic_read(&default_mirror->error_count);
31018 + return !atomic_read_unchecked(&default_mirror->error_count);
31019 }
31020
31021 static int mirror_available(struct mirror_set *ms, struct bio *bio)
31022 @@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
31023 */
31024 if (likely(region_in_sync(ms, region, 1)))
31025 m = choose_mirror(ms, bio->bi_sector);
31026 - else if (m && atomic_read(&m->error_count))
31027 + else if (m && atomic_read_unchecked(&m->error_count))
31028 m = NULL;
31029
31030 if (likely(m))
31031 @@ -937,7 +937,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
31032 }
31033
31034 ms->mirror[mirror].ms = ms;
31035 - atomic_set(&(ms->mirror[mirror].error_count), 0);
31036 + atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
31037 ms->mirror[mirror].error_type = 0;
31038 ms->mirror[mirror].offset = offset;
31039
31040 @@ -1347,7 +1347,7 @@ static void mirror_resume(struct dm_target *ti)
31041 */
31042 static char device_status_char(struct mirror *m)
31043 {
31044 - if (!atomic_read(&(m->error_count)))
31045 + if (!atomic_read_unchecked(&(m->error_count)))
31046 return 'A';
31047
31048 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
31049 diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
31050 index 3d80cf0..b77cc47 100644
31051 --- a/drivers/md/dm-stripe.c
31052 +++ b/drivers/md/dm-stripe.c
31053 @@ -20,7 +20,7 @@ struct stripe {
31054 struct dm_dev *dev;
31055 sector_t physical_start;
31056
31057 - atomic_t error_count;
31058 + atomic_unchecked_t error_count;
31059 };
31060
31061 struct stripe_c {
31062 @@ -192,7 +192,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
31063 kfree(sc);
31064 return r;
31065 }
31066 - atomic_set(&(sc->stripe[i].error_count), 0);
31067 + atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
31068 }
31069
31070 ti->private = sc;
31071 @@ -314,7 +314,7 @@ static int stripe_status(struct dm_target *ti,
31072 DMEMIT("%d ", sc->stripes);
31073 for (i = 0; i < sc->stripes; i++) {
31074 DMEMIT("%s ", sc->stripe[i].dev->name);
31075 - buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
31076 + buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
31077 'D' : 'A';
31078 }
31079 buffer[i] = '\0';
31080 @@ -361,8 +361,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
31081 */
31082 for (i = 0; i < sc->stripes; i++)
31083 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
31084 - atomic_inc(&(sc->stripe[i].error_count));
31085 - if (atomic_read(&(sc->stripe[i].error_count)) <
31086 + atomic_inc_unchecked(&(sc->stripe[i].error_count));
31087 + if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
31088 DM_IO_ERROR_THRESHOLD)
31089 schedule_work(&sc->trigger_event);
31090 }
31091 diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
31092 index 8e91321..fd17aef 100644
31093 --- a/drivers/md/dm-table.c
31094 +++ b/drivers/md/dm-table.c
31095 @@ -391,7 +391,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
31096 if (!dev_size)
31097 return 0;
31098
31099 - if ((start >= dev_size) || (start + len > dev_size)) {
31100 + if ((start >= dev_size) || (len > dev_size - start)) {
31101 DMWARN("%s: %s too small for target: "
31102 "start=%llu, len=%llu, dev_size=%llu",
31103 dm_device_name(ti->table->md), bdevname(bdev, b),
31104 diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
31105 index 59c4f04..4c7b661 100644
31106 --- a/drivers/md/dm-thin-metadata.c
31107 +++ b/drivers/md/dm-thin-metadata.c
31108 @@ -431,7 +431,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
31109
31110 pmd->info.tm = tm;
31111 pmd->info.levels = 2;
31112 - pmd->info.value_type.context = pmd->data_sm;
31113 + pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
31114 pmd->info.value_type.size = sizeof(__le64);
31115 pmd->info.value_type.inc = data_block_inc;
31116 pmd->info.value_type.dec = data_block_dec;
31117 @@ -450,7 +450,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
31118
31119 pmd->bl_info.tm = tm;
31120 pmd->bl_info.levels = 1;
31121 - pmd->bl_info.value_type.context = pmd->data_sm;
31122 + pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
31123 pmd->bl_info.value_type.size = sizeof(__le64);
31124 pmd->bl_info.value_type.inc = data_block_inc;
31125 pmd->bl_info.value_type.dec = data_block_dec;
31126 diff --git a/drivers/md/dm.c b/drivers/md/dm.c
31127 index 4720f68..78d1df7 100644
31128 --- a/drivers/md/dm.c
31129 +++ b/drivers/md/dm.c
31130 @@ -177,9 +177,9 @@ struct mapped_device {
31131 /*
31132 * Event handling.
31133 */
31134 - atomic_t event_nr;
31135 + atomic_unchecked_t event_nr;
31136 wait_queue_head_t eventq;
31137 - atomic_t uevent_seq;
31138 + atomic_unchecked_t uevent_seq;
31139 struct list_head uevent_list;
31140 spinlock_t uevent_lock; /* Protect access to uevent_list */
31141
31142 @@ -1845,8 +1845,8 @@ static struct mapped_device *alloc_dev(int minor)
31143 rwlock_init(&md->map_lock);
31144 atomic_set(&md->holders, 1);
31145 atomic_set(&md->open_count, 0);
31146 - atomic_set(&md->event_nr, 0);
31147 - atomic_set(&md->uevent_seq, 0);
31148 + atomic_set_unchecked(&md->event_nr, 0);
31149 + atomic_set_unchecked(&md->uevent_seq, 0);
31150 INIT_LIST_HEAD(&md->uevent_list);
31151 spin_lock_init(&md->uevent_lock);
31152
31153 @@ -1980,7 +1980,7 @@ static void event_callback(void *context)
31154
31155 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
31156
31157 - atomic_inc(&md->event_nr);
31158 + atomic_inc_unchecked(&md->event_nr);
31159 wake_up(&md->eventq);
31160 }
31161
31162 @@ -2622,18 +2622,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
31163
31164 uint32_t dm_next_uevent_seq(struct mapped_device *md)
31165 {
31166 - return atomic_add_return(1, &md->uevent_seq);
31167 + return atomic_add_return_unchecked(1, &md->uevent_seq);
31168 }
31169
31170 uint32_t dm_get_event_nr(struct mapped_device *md)
31171 {
31172 - return atomic_read(&md->event_nr);
31173 + return atomic_read_unchecked(&md->event_nr);
31174 }
31175
31176 int dm_wait_event(struct mapped_device *md, int event_nr)
31177 {
31178 return wait_event_interruptible(md->eventq,
31179 - (event_nr != atomic_read(&md->event_nr)));
31180 + (event_nr != atomic_read_unchecked(&md->event_nr)));
31181 }
31182
31183 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
31184 diff --git a/drivers/md/md.c b/drivers/md/md.c
31185 index f47f1f8..b7f559e 100644
31186 --- a/drivers/md/md.c
31187 +++ b/drivers/md/md.c
31188 @@ -278,10 +278,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
31189 * start build, activate spare
31190 */
31191 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
31192 -static atomic_t md_event_count;
31193 +static atomic_unchecked_t md_event_count;
31194 void md_new_event(struct mddev *mddev)
31195 {
31196 - atomic_inc(&md_event_count);
31197 + atomic_inc_unchecked(&md_event_count);
31198 wake_up(&md_event_waiters);
31199 }
31200 EXPORT_SYMBOL_GPL(md_new_event);
31201 @@ -291,7 +291,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
31202 */
31203 static void md_new_event_inintr(struct mddev *mddev)
31204 {
31205 - atomic_inc(&md_event_count);
31206 + atomic_inc_unchecked(&md_event_count);
31207 wake_up(&md_event_waiters);
31208 }
31209
31210 @@ -1525,7 +1525,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
31211
31212 rdev->preferred_minor = 0xffff;
31213 rdev->data_offset = le64_to_cpu(sb->data_offset);
31214 - atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
31215 + atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
31216
31217 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
31218 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
31219 @@ -1742,7 +1742,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
31220 else
31221 sb->resync_offset = cpu_to_le64(0);
31222
31223 - sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
31224 + sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
31225
31226 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
31227 sb->size = cpu_to_le64(mddev->dev_sectors);
31228 @@ -2639,7 +2639,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
31229 static ssize_t
31230 errors_show(struct md_rdev *rdev, char *page)
31231 {
31232 - return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
31233 + return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
31234 }
31235
31236 static ssize_t
31237 @@ -2648,7 +2648,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
31238 char *e;
31239 unsigned long n = simple_strtoul(buf, &e, 10);
31240 if (*buf && (*e == 0 || *e == '\n')) {
31241 - atomic_set(&rdev->corrected_errors, n);
31242 + atomic_set_unchecked(&rdev->corrected_errors, n);
31243 return len;
31244 }
31245 return -EINVAL;
31246 @@ -3039,8 +3039,8 @@ int md_rdev_init(struct md_rdev *rdev)
31247 rdev->sb_loaded = 0;
31248 rdev->bb_page = NULL;
31249 atomic_set(&rdev->nr_pending, 0);
31250 - atomic_set(&rdev->read_errors, 0);
31251 - atomic_set(&rdev->corrected_errors, 0);
31252 + atomic_set_unchecked(&rdev->read_errors, 0);
31253 + atomic_set_unchecked(&rdev->corrected_errors, 0);
31254
31255 INIT_LIST_HEAD(&rdev->same_set);
31256 init_waitqueue_head(&rdev->blocked_wait);
31257 @@ -6683,7 +6683,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
31258
31259 spin_unlock(&pers_lock);
31260 seq_printf(seq, "\n");
31261 - seq->poll_event = atomic_read(&md_event_count);
31262 + seq->poll_event = atomic_read_unchecked(&md_event_count);
31263 return 0;
31264 }
31265 if (v == (void*)2) {
31266 @@ -6772,7 +6772,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
31267 chunk_kb ? "KB" : "B");
31268 if (bitmap->file) {
31269 seq_printf(seq, ", file: ");
31270 - seq_path(seq, &bitmap->file->f_path, " \t\n");
31271 + seq_path(seq, &bitmap->file->f_path, " \t\n\\");
31272 }
31273
31274 seq_printf(seq, "\n");
31275 @@ -6803,7 +6803,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
31276 return error;
31277
31278 seq = file->private_data;
31279 - seq->poll_event = atomic_read(&md_event_count);
31280 + seq->poll_event = atomic_read_unchecked(&md_event_count);
31281 return error;
31282 }
31283
31284 @@ -6817,7 +6817,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
31285 /* always allow read */
31286 mask = POLLIN | POLLRDNORM;
31287
31288 - if (seq->poll_event != atomic_read(&md_event_count))
31289 + if (seq->poll_event != atomic_read_unchecked(&md_event_count))
31290 mask |= POLLERR | POLLPRI;
31291 return mask;
31292 }
31293 @@ -6861,7 +6861,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
31294 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
31295 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
31296 (int)part_stat_read(&disk->part0, sectors[1]) -
31297 - atomic_read(&disk->sync_io);
31298 + atomic_read_unchecked(&disk->sync_io);
31299 /* sync IO will cause sync_io to increase before the disk_stats
31300 * as sync_io is counted when a request starts, and
31301 * disk_stats is counted when it completes.
31302 diff --git a/drivers/md/md.h b/drivers/md/md.h
31303 index cf742d9..7c7c745 100644
31304 --- a/drivers/md/md.h
31305 +++ b/drivers/md/md.h
31306 @@ -120,13 +120,13 @@ struct md_rdev {
31307 * only maintained for arrays that
31308 * support hot removal
31309 */
31310 - atomic_t read_errors; /* number of consecutive read errors that
31311 + atomic_unchecked_t read_errors; /* number of consecutive read errors that
31312 * we have tried to ignore.
31313 */
31314 struct timespec last_read_error; /* monotonic time since our
31315 * last read error
31316 */
31317 - atomic_t corrected_errors; /* number of corrected read errors,
31318 + atomic_unchecked_t corrected_errors; /* number of corrected read errors,
31319 * for reporting to userspace and storing
31320 * in superblock.
31321 */
31322 @@ -410,7 +410,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
31323
31324 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
31325 {
31326 - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
31327 + atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
31328 }
31329
31330 struct md_personality
31331 diff --git a/drivers/md/persistent-data/dm-space-map-checker.c b/drivers/md/persistent-data/dm-space-map-checker.c
31332 index 50ed53b..4f29d7d 100644
31333 --- a/drivers/md/persistent-data/dm-space-map-checker.c
31334 +++ b/drivers/md/persistent-data/dm-space-map-checker.c
31335 @@ -159,7 +159,7 @@ static void ca_destroy(struct count_array *ca)
31336 /*----------------------------------------------------------------*/
31337
31338 struct sm_checker {
31339 - struct dm_space_map sm;
31340 + dm_space_map_no_const sm;
31341
31342 struct count_array old_counts;
31343 struct count_array counts;
31344 diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
31345 index fc469ba..2d91555 100644
31346 --- a/drivers/md/persistent-data/dm-space-map-disk.c
31347 +++ b/drivers/md/persistent-data/dm-space-map-disk.c
31348 @@ -23,7 +23,7 @@
31349 * Space map interface.
31350 */
31351 struct sm_disk {
31352 - struct dm_space_map sm;
31353 + dm_space_map_no_const sm;
31354
31355 struct ll_disk ll;
31356 struct ll_disk old_ll;
31357 diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
31358 index e89ae5e..062e4c2 100644
31359 --- a/drivers/md/persistent-data/dm-space-map-metadata.c
31360 +++ b/drivers/md/persistent-data/dm-space-map-metadata.c
31361 @@ -43,7 +43,7 @@ struct block_op {
31362 };
31363
31364 struct sm_metadata {
31365 - struct dm_space_map sm;
31366 + dm_space_map_no_const sm;
31367
31368 struct ll_disk ll;
31369 struct ll_disk old_ll;
31370 diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
31371 index 1cbfc6b..56e1dbb 100644
31372 --- a/drivers/md/persistent-data/dm-space-map.h
31373 +++ b/drivers/md/persistent-data/dm-space-map.h
31374 @@ -60,6 +60,7 @@ struct dm_space_map {
31375 int (*root_size)(struct dm_space_map *sm, size_t *result);
31376 int (*copy_root)(struct dm_space_map *sm, void *copy_to_here_le, size_t len);
31377 };
31378 +typedef struct dm_space_map __no_const dm_space_map_no_const;
31379
31380 /*----------------------------------------------------------------*/
31381
31382 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
31383 index 7d9e071..015b1d5 100644
31384 --- a/drivers/md/raid1.c
31385 +++ b/drivers/md/raid1.c
31386 @@ -1568,7 +1568,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
31387 if (r1_sync_page_io(rdev, sect, s,
31388 bio->bi_io_vec[idx].bv_page,
31389 READ) != 0)
31390 - atomic_add(s, &rdev->corrected_errors);
31391 + atomic_add_unchecked(s, &rdev->corrected_errors);
31392 }
31393 sectors -= s;
31394 sect += s;
31395 @@ -1781,7 +1781,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
31396 test_bit(In_sync, &rdev->flags)) {
31397 if (r1_sync_page_io(rdev, sect, s,
31398 conf->tmppage, READ)) {
31399 - atomic_add(s, &rdev->corrected_errors);
31400 + atomic_add_unchecked(s, &rdev->corrected_errors);
31401 printk(KERN_INFO
31402 "md/raid1:%s: read error corrected "
31403 "(%d sectors at %llu on %s)\n",
31404 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
31405 index 685ddf3..955b087 100644
31406 --- a/drivers/md/raid10.c
31407 +++ b/drivers/md/raid10.c
31408 @@ -1440,7 +1440,7 @@ static void end_sync_read(struct bio *bio, int error)
31409 /* The write handler will notice the lack of
31410 * R10BIO_Uptodate and record any errors etc
31411 */
31412 - atomic_add(r10_bio->sectors,
31413 + atomic_add_unchecked(r10_bio->sectors,
31414 &conf->mirrors[d].rdev->corrected_errors);
31415
31416 /* for reconstruct, we always reschedule after a read.
31417 @@ -1740,7 +1740,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
31418 {
31419 struct timespec cur_time_mon;
31420 unsigned long hours_since_last;
31421 - unsigned int read_errors = atomic_read(&rdev->read_errors);
31422 + unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
31423
31424 ktime_get_ts(&cur_time_mon);
31425
31426 @@ -1762,9 +1762,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
31427 * overflowing the shift of read_errors by hours_since_last.
31428 */
31429 if (hours_since_last >= 8 * sizeof(read_errors))
31430 - atomic_set(&rdev->read_errors, 0);
31431 + atomic_set_unchecked(&rdev->read_errors, 0);
31432 else
31433 - atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
31434 + atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
31435 }
31436
31437 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
31438 @@ -1814,8 +1814,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
31439 return;
31440
31441 check_decay_read_errors(mddev, rdev);
31442 - atomic_inc(&rdev->read_errors);
31443 - if (atomic_read(&rdev->read_errors) > max_read_errors) {
31444 + atomic_inc_unchecked(&rdev->read_errors);
31445 + if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
31446 char b[BDEVNAME_SIZE];
31447 bdevname(rdev->bdev, b);
31448
31449 @@ -1823,7 +1823,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
31450 "md/raid10:%s: %s: Raid device exceeded "
31451 "read_error threshold [cur %d:max %d]\n",
31452 mdname(mddev), b,
31453 - atomic_read(&rdev->read_errors), max_read_errors);
31454 + atomic_read_unchecked(&rdev->read_errors), max_read_errors);
31455 printk(KERN_NOTICE
31456 "md/raid10:%s: %s: Failing raid device\n",
31457 mdname(mddev), b);
31458 @@ -1968,7 +1968,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
31459 (unsigned long long)(
31460 sect + rdev->data_offset),
31461 bdevname(rdev->bdev, b));
31462 - atomic_add(s, &rdev->corrected_errors);
31463 + atomic_add_unchecked(s, &rdev->corrected_errors);
31464 }
31465
31466 rdev_dec_pending(rdev, mddev);
31467 diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
31468 index 858fdbb..b2dac95 100644
31469 --- a/drivers/md/raid5.c
31470 +++ b/drivers/md/raid5.c
31471 @@ -1610,19 +1610,19 @@ static void raid5_end_read_request(struct bio * bi, int error)
31472 (unsigned long long)(sh->sector
31473 + rdev->data_offset),
31474 bdevname(rdev->bdev, b));
31475 - atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
31476 + atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
31477 clear_bit(R5_ReadError, &sh->dev[i].flags);
31478 clear_bit(R5_ReWrite, &sh->dev[i].flags);
31479 }
31480 - if (atomic_read(&conf->disks[i].rdev->read_errors))
31481 - atomic_set(&conf->disks[i].rdev->read_errors, 0);
31482 + if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
31483 + atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
31484 } else {
31485 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
31486 int retry = 0;
31487 rdev = conf->disks[i].rdev;
31488
31489 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
31490 - atomic_inc(&rdev->read_errors);
31491 + atomic_inc_unchecked(&rdev->read_errors);
31492 if (conf->mddev->degraded >= conf->max_degraded)
31493 printk_ratelimited(
31494 KERN_WARNING
31495 @@ -1642,7 +1642,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
31496 (unsigned long long)(sh->sector
31497 + rdev->data_offset),
31498 bdn);
31499 - else if (atomic_read(&rdev->read_errors)
31500 + else if (atomic_read_unchecked(&rdev->read_errors)
31501 > conf->max_nr_stripes)
31502 printk(KERN_WARNING
31503 "md/raid:%s: Too many read errors, failing device %s.\n",
31504 diff --git a/drivers/media/dvb/ddbridge/ddbridge-core.c b/drivers/media/dvb/ddbridge/ddbridge-core.c
31505 index ba9a643..e474ab5 100644
31506 --- a/drivers/media/dvb/ddbridge/ddbridge-core.c
31507 +++ b/drivers/media/dvb/ddbridge/ddbridge-core.c
31508 @@ -1678,7 +1678,7 @@ static struct ddb_info ddb_v6 = {
31509 .subvendor = _subvend, .subdevice = _subdev, \
31510 .driver_data = (unsigned long)&_driverdata }
31511
31512 -static const struct pci_device_id ddb_id_tbl[] __devinitdata = {
31513 +static const struct pci_device_id ddb_id_tbl[] __devinitconst = {
31514 DDB_ID(DDVID, 0x0002, DDVID, 0x0001, ddb_octopus),
31515 DDB_ID(DDVID, 0x0003, DDVID, 0x0001, ddb_octopus),
31516 DDB_ID(DDVID, 0x0003, DDVID, 0x0002, ddb_octopus_le),
31517 diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
31518 index a7d876f..8c21b61 100644
31519 --- a/drivers/media/dvb/dvb-core/dvb_demux.h
31520 +++ b/drivers/media/dvb/dvb-core/dvb_demux.h
31521 @@ -73,7 +73,7 @@ struct dvb_demux_feed {
31522 union {
31523 dmx_ts_cb ts;
31524 dmx_section_cb sec;
31525 - } cb;
31526 + } __no_const cb;
31527
31528 struct dvb_demux *demux;
31529 void *priv;
31530 diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
31531 index f732877..d38c35a 100644
31532 --- a/drivers/media/dvb/dvb-core/dvbdev.c
31533 +++ b/drivers/media/dvb/dvb-core/dvbdev.c
31534 @@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
31535 const struct dvb_device *template, void *priv, int type)
31536 {
31537 struct dvb_device *dvbdev;
31538 - struct file_operations *dvbdevfops;
31539 + file_operations_no_const *dvbdevfops;
31540 struct device *clsdev;
31541 int minor;
31542 int id;
31543 diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
31544 index 9f2a02c..5920f88 100644
31545 --- a/drivers/media/dvb/dvb-usb/cxusb.c
31546 +++ b/drivers/media/dvb/dvb-usb/cxusb.c
31547 @@ -1069,7 +1069,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
31548 struct dib0700_adapter_state {
31549 int (*set_param_save) (struct dvb_frontend *,
31550 struct dvb_frontend_parameters *);
31551 -};
31552 +} __no_const;
31553
31554 static int dib7070_set_param_override(struct dvb_frontend *fe,
31555 struct dvb_frontend_parameters *fep)
31556 diff --git a/drivers/media/dvb/dvb-usb/dw2102.c b/drivers/media/dvb/dvb-usb/dw2102.c
31557 index f103ec1..5e8968b 100644
31558 --- a/drivers/media/dvb/dvb-usb/dw2102.c
31559 +++ b/drivers/media/dvb/dvb-usb/dw2102.c
31560 @@ -95,7 +95,7 @@ struct su3000_state {
31561
31562 struct s6x0_state {
31563 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
31564 -};
31565 +} __no_const;
31566
31567 /* debug */
31568 static int dvb_usb_dw2102_debug;
31569 diff --git a/drivers/media/dvb/frontends/dib3000.h b/drivers/media/dvb/frontends/dib3000.h
31570 index 404f63a..4796533 100644
31571 --- a/drivers/media/dvb/frontends/dib3000.h
31572 +++ b/drivers/media/dvb/frontends/dib3000.h
31573 @@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
31574 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
31575 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
31576 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
31577 -};
31578 +} __no_const;
31579
31580 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
31581 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
31582 diff --git a/drivers/media/dvb/frontends/ds3000.c b/drivers/media/dvb/frontends/ds3000.c
31583 index 90bf573..e8463da 100644
31584 --- a/drivers/media/dvb/frontends/ds3000.c
31585 +++ b/drivers/media/dvb/frontends/ds3000.c
31586 @@ -1210,7 +1210,7 @@ static int ds3000_set_frontend(struct dvb_frontend *fe,
31587
31588 for (i = 0; i < 30 ; i++) {
31589 ds3000_read_status(fe, &status);
31590 - if (status && FE_HAS_LOCK)
31591 + if (status & FE_HAS_LOCK)
31592 break;
31593
31594 msleep(10);
31595 diff --git a/drivers/media/dvb/ngene/ngene-cards.c b/drivers/media/dvb/ngene/ngene-cards.c
31596 index 0564192..75b16f5 100644
31597 --- a/drivers/media/dvb/ngene/ngene-cards.c
31598 +++ b/drivers/media/dvb/ngene/ngene-cards.c
31599 @@ -477,7 +477,7 @@ static struct ngene_info ngene_info_m780 = {
31600
31601 /****************************************************************************/
31602
31603 -static const struct pci_device_id ngene_id_tbl[] __devinitdata = {
31604 +static const struct pci_device_id ngene_id_tbl[] __devinitconst = {
31605 NGENE_ID(0x18c3, 0xabc3, ngene_info_cineS2),
31606 NGENE_ID(0x18c3, 0xabc4, ngene_info_cineS2),
31607 NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2),
31608 diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
31609 index 16a089f..ab1667d 100644
31610 --- a/drivers/media/radio/radio-cadet.c
31611 +++ b/drivers/media/radio/radio-cadet.c
31612 @@ -326,6 +326,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
31613 unsigned char readbuf[RDS_BUFFER];
31614 int i = 0;
31615
31616 + if (count > RDS_BUFFER)
31617 + return -EFAULT;
31618 mutex_lock(&dev->lock);
31619 if (dev->rdsstat == 0) {
31620 dev->rdsstat = 1;
31621 diff --git a/drivers/media/rc/redrat3.c b/drivers/media/rc/redrat3.c
31622 index 61287fc..8b08712 100644
31623 --- a/drivers/media/rc/redrat3.c
31624 +++ b/drivers/media/rc/redrat3.c
31625 @@ -905,7 +905,7 @@ static int redrat3_set_tx_carrier(struct rc_dev *dev, u32 carrier)
31626 return carrier;
31627 }
31628
31629 -static int redrat3_transmit_ir(struct rc_dev *rcdev, int *txbuf, u32 n)
31630 +static int redrat3_transmit_ir(struct rc_dev *rcdev, unsigned *txbuf, u32 n)
31631 {
31632 struct redrat3_dev *rr3 = rcdev->priv;
31633 struct device *dev = rr3->dev;
31634 diff --git a/drivers/media/video/au0828/au0828.h b/drivers/media/video/au0828/au0828.h
31635 index 9cde353..8c6a1c3 100644
31636 --- a/drivers/media/video/au0828/au0828.h
31637 +++ b/drivers/media/video/au0828/au0828.h
31638 @@ -191,7 +191,7 @@ struct au0828_dev {
31639
31640 /* I2C */
31641 struct i2c_adapter i2c_adap;
31642 - struct i2c_algorithm i2c_algo;
31643 + i2c_algorithm_no_const i2c_algo;
31644 struct i2c_client i2c_client;
31645 u32 i2c_rc;
31646
31647 diff --git a/drivers/media/video/cx88/cx88-alsa.c b/drivers/media/video/cx88/cx88-alsa.c
31648 index 68d1240..46b32eb 100644
31649 --- a/drivers/media/video/cx88/cx88-alsa.c
31650 +++ b/drivers/media/video/cx88/cx88-alsa.c
31651 @@ -766,7 +766,7 @@ static struct snd_kcontrol_new snd_cx88_alc_switch = {
31652 * Only boards with eeprom and byte 1 at eeprom=1 have it
31653 */
31654
31655 -static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitdata = {
31656 +static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitconst = {
31657 {0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
31658 {0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
31659 {0, }
31660 diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
31661 index 305e6aa..0143317 100644
31662 --- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
31663 +++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
31664 @@ -196,7 +196,7 @@ struct pvr2_hdw {
31665
31666 /* I2C stuff */
31667 struct i2c_adapter i2c_adap;
31668 - struct i2c_algorithm i2c_algo;
31669 + i2c_algorithm_no_const i2c_algo;
31670 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
31671 int i2c_cx25840_hack_state;
31672 int i2c_linked;
31673 diff --git a/drivers/media/video/timblogiw.c b/drivers/media/video/timblogiw.c
31674 index a0895bf..b7ebb1b 100644
31675 --- a/drivers/media/video/timblogiw.c
31676 +++ b/drivers/media/video/timblogiw.c
31677 @@ -745,7 +745,7 @@ static int timblogiw_mmap(struct file *file, struct vm_area_struct *vma)
31678
31679 /* Platform device functions */
31680
31681 -static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
31682 +static __devinitconst v4l2_ioctl_ops_no_const timblogiw_ioctl_ops = {
31683 .vidioc_querycap = timblogiw_querycap,
31684 .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
31685 .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
31686 @@ -767,7 +767,7 @@ static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
31687 .vidioc_enum_framesizes = timblogiw_enum_framesizes,
31688 };
31689
31690 -static __devinitconst struct v4l2_file_operations timblogiw_fops = {
31691 +static __devinitconst v4l2_file_operations_no_const timblogiw_fops = {
31692 .owner = THIS_MODULE,
31693 .open = timblogiw_open,
31694 .release = timblogiw_close,
31695 diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
31696 index e9c6a60..daf6a33 100644
31697 --- a/drivers/message/fusion/mptbase.c
31698 +++ b/drivers/message/fusion/mptbase.c
31699 @@ -6753,8 +6753,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
31700 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
31701 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
31702
31703 +#ifdef CONFIG_GRKERNSEC_HIDESYM
31704 + seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
31705 +#else
31706 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
31707 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
31708 +#endif
31709 +
31710 /*
31711 * Rounding UP to nearest 4-kB boundary here...
31712 */
31713 diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
31714 index 9d95042..b808101 100644
31715 --- a/drivers/message/fusion/mptsas.c
31716 +++ b/drivers/message/fusion/mptsas.c
31717 @@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
31718 return 0;
31719 }
31720
31721 +static inline void
31722 +mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
31723 +{
31724 + if (phy_info->port_details) {
31725 + phy_info->port_details->rphy = rphy;
31726 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
31727 + ioc->name, rphy));
31728 + }
31729 +
31730 + if (rphy) {
31731 + dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
31732 + &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
31733 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
31734 + ioc->name, rphy, rphy->dev.release));
31735 + }
31736 +}
31737 +
31738 /* no mutex */
31739 static void
31740 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
31741 @@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
31742 return NULL;
31743 }
31744
31745 -static inline void
31746 -mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
31747 -{
31748 - if (phy_info->port_details) {
31749 - phy_info->port_details->rphy = rphy;
31750 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
31751 - ioc->name, rphy));
31752 - }
31753 -
31754 - if (rphy) {
31755 - dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
31756 - &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
31757 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
31758 - ioc->name, rphy, rphy->dev.release));
31759 - }
31760 -}
31761 -
31762 static inline struct sas_port *
31763 mptsas_get_port(struct mptsas_phyinfo *phy_info)
31764 {
31765 diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
31766 index 0c3ced7..1fe34ec 100644
31767 --- a/drivers/message/fusion/mptscsih.c
31768 +++ b/drivers/message/fusion/mptscsih.c
31769 @@ -1270,15 +1270,16 @@ mptscsih_info(struct Scsi_Host *SChost)
31770
31771 h = shost_priv(SChost);
31772
31773 - if (h) {
31774 - if (h->info_kbuf == NULL)
31775 - if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
31776 - return h->info_kbuf;
31777 - h->info_kbuf[0] = '\0';
31778 + if (!h)
31779 + return NULL;
31780
31781 - mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
31782 - h->info_kbuf[size-1] = '\0';
31783 - }
31784 + if (h->info_kbuf == NULL)
31785 + if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
31786 + return h->info_kbuf;
31787 + h->info_kbuf[0] = '\0';
31788 +
31789 + mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
31790 + h->info_kbuf[size-1] = '\0';
31791
31792 return h->info_kbuf;
31793 }
31794 diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
31795 index 07dbeaf..5533142 100644
31796 --- a/drivers/message/i2o/i2o_proc.c
31797 +++ b/drivers/message/i2o/i2o_proc.c
31798 @@ -255,13 +255,6 @@ static char *scsi_devices[] = {
31799 "Array Controller Device"
31800 };
31801
31802 -static char *chtostr(u8 * chars, int n)
31803 -{
31804 - char tmp[256];
31805 - tmp[0] = 0;
31806 - return strncat(tmp, (char *)chars, n);
31807 -}
31808 -
31809 static int i2o_report_query_status(struct seq_file *seq, int block_status,
31810 char *group)
31811 {
31812 @@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
31813
31814 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
31815 seq_printf(seq, "%-#8x", ddm_table.module_id);
31816 - seq_printf(seq, "%-29s",
31817 - chtostr(ddm_table.module_name_version, 28));
31818 + seq_printf(seq, "%-.28s", ddm_table.module_name_version);
31819 seq_printf(seq, "%9d ", ddm_table.data_size);
31820 seq_printf(seq, "%8d", ddm_table.code_size);
31821
31822 @@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
31823
31824 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
31825 seq_printf(seq, "%-#8x", dst->module_id);
31826 - seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
31827 - seq_printf(seq, "%-9s", chtostr(dst->date, 8));
31828 + seq_printf(seq, "%-.28s", dst->module_name_version);
31829 + seq_printf(seq, "%-.8s", dst->date);
31830 seq_printf(seq, "%8d ", dst->module_size);
31831 seq_printf(seq, "%8d ", dst->mpb_size);
31832 seq_printf(seq, "0x%04x", dst->module_flags);
31833 @@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
31834 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
31835 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
31836 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
31837 - seq_printf(seq, "Vendor info : %s\n",
31838 - chtostr((u8 *) (work32 + 2), 16));
31839 - seq_printf(seq, "Product info : %s\n",
31840 - chtostr((u8 *) (work32 + 6), 16));
31841 - seq_printf(seq, "Description : %s\n",
31842 - chtostr((u8 *) (work32 + 10), 16));
31843 - seq_printf(seq, "Product rev. : %s\n",
31844 - chtostr((u8 *) (work32 + 14), 8));
31845 + seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
31846 + seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
31847 + seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
31848 + seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
31849
31850 seq_printf(seq, "Serial number : ");
31851 print_serial_number(seq, (u8 *) (work32 + 16),
31852 @@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
31853 }
31854
31855 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
31856 - seq_printf(seq, "Module name : %s\n",
31857 - chtostr(result.module_name, 24));
31858 - seq_printf(seq, "Module revision : %s\n",
31859 - chtostr(result.module_rev, 8));
31860 + seq_printf(seq, "Module name : %.24s\n", result.module_name);
31861 + seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
31862
31863 seq_printf(seq, "Serial number : ");
31864 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
31865 @@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
31866 return 0;
31867 }
31868
31869 - seq_printf(seq, "Device name : %s\n",
31870 - chtostr(result.device_name, 64));
31871 - seq_printf(seq, "Service name : %s\n",
31872 - chtostr(result.service_name, 64));
31873 - seq_printf(seq, "Physical name : %s\n",
31874 - chtostr(result.physical_location, 64));
31875 - seq_printf(seq, "Instance number : %s\n",
31876 - chtostr(result.instance_number, 4));
31877 + seq_printf(seq, "Device name : %.64s\n", result.device_name);
31878 + seq_printf(seq, "Service name : %.64s\n", result.service_name);
31879 + seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
31880 + seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
31881
31882 return 0;
31883 }
31884 diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
31885 index a8c08f3..155fe3d 100644
31886 --- a/drivers/message/i2o/iop.c
31887 +++ b/drivers/message/i2o/iop.c
31888 @@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
31889
31890 spin_lock_irqsave(&c->context_list_lock, flags);
31891
31892 - if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
31893 - atomic_inc(&c->context_list_counter);
31894 + if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
31895 + atomic_inc_unchecked(&c->context_list_counter);
31896
31897 - entry->context = atomic_read(&c->context_list_counter);
31898 + entry->context = atomic_read_unchecked(&c->context_list_counter);
31899
31900 list_add(&entry->list, &c->context_list);
31901
31902 @@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void)
31903
31904 #if BITS_PER_LONG == 64
31905 spin_lock_init(&c->context_list_lock);
31906 - atomic_set(&c->context_list_counter, 0);
31907 + atomic_set_unchecked(&c->context_list_counter, 0);
31908 INIT_LIST_HEAD(&c->context_list);
31909 #endif
31910
31911 diff --git a/drivers/mfd/abx500-core.c b/drivers/mfd/abx500-core.c
31912 index 7ce65f4..e66e9bc 100644
31913 --- a/drivers/mfd/abx500-core.c
31914 +++ b/drivers/mfd/abx500-core.c
31915 @@ -15,7 +15,7 @@ static LIST_HEAD(abx500_list);
31916
31917 struct abx500_device_entry {
31918 struct list_head list;
31919 - struct abx500_ops ops;
31920 + abx500_ops_no_const ops;
31921 struct device *dev;
31922 };
31923
31924 diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
31925 index 5c2a06a..8fa077c 100644
31926 --- a/drivers/mfd/janz-cmodio.c
31927 +++ b/drivers/mfd/janz-cmodio.c
31928 @@ -13,6 +13,7 @@
31929
31930 #include <linux/kernel.h>
31931 #include <linux/module.h>
31932 +#include <linux/slab.h>
31933 #include <linux/init.h>
31934 #include <linux/pci.h>
31935 #include <linux/interrupt.h>
31936 diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
31937 index 29d12a7..f900ba4 100644
31938 --- a/drivers/misc/lis3lv02d/lis3lv02d.c
31939 +++ b/drivers/misc/lis3lv02d/lis3lv02d.c
31940 @@ -464,7 +464,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
31941 * the lid is closed. This leads to interrupts as soon as a little move
31942 * is done.
31943 */
31944 - atomic_inc(&lis3->count);
31945 + atomic_inc_unchecked(&lis3->count);
31946
31947 wake_up_interruptible(&lis3->misc_wait);
31948 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
31949 @@ -550,7 +550,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
31950 if (lis3->pm_dev)
31951 pm_runtime_get_sync(lis3->pm_dev);
31952
31953 - atomic_set(&lis3->count, 0);
31954 + atomic_set_unchecked(&lis3->count, 0);
31955 return 0;
31956 }
31957
31958 @@ -583,7 +583,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
31959 add_wait_queue(&lis3->misc_wait, &wait);
31960 while (true) {
31961 set_current_state(TASK_INTERRUPTIBLE);
31962 - data = atomic_xchg(&lis3->count, 0);
31963 + data = atomic_xchg_unchecked(&lis3->count, 0);
31964 if (data)
31965 break;
31966
31967 @@ -624,7 +624,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
31968 struct lis3lv02d, miscdev);
31969
31970 poll_wait(file, &lis3->misc_wait, wait);
31971 - if (atomic_read(&lis3->count))
31972 + if (atomic_read_unchecked(&lis3->count))
31973 return POLLIN | POLLRDNORM;
31974 return 0;
31975 }
31976 diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
31977 index 2b1482a..5d33616 100644
31978 --- a/drivers/misc/lis3lv02d/lis3lv02d.h
31979 +++ b/drivers/misc/lis3lv02d/lis3lv02d.h
31980 @@ -266,7 +266,7 @@ struct lis3lv02d {
31981 struct input_polled_dev *idev; /* input device */
31982 struct platform_device *pdev; /* platform device */
31983 struct regulator_bulk_data regulators[2];
31984 - atomic_t count; /* interrupt count after last read */
31985 + atomic_unchecked_t count; /* interrupt count after last read */
31986 union axis_conversion ac; /* hw -> logical axis */
31987 int mapped_btns[3];
31988
31989 diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
31990 index 2f30bad..c4c13d0 100644
31991 --- a/drivers/misc/sgi-gru/gruhandles.c
31992 +++ b/drivers/misc/sgi-gru/gruhandles.c
31993 @@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
31994 unsigned long nsec;
31995
31996 nsec = CLKS2NSEC(clks);
31997 - atomic_long_inc(&mcs_op_statistics[op].count);
31998 - atomic_long_add(nsec, &mcs_op_statistics[op].total);
31999 + atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
32000 + atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
32001 if (mcs_op_statistics[op].max < nsec)
32002 mcs_op_statistics[op].max = nsec;
32003 }
32004 diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
32005 index 7768b87..f8aac38 100644
32006 --- a/drivers/misc/sgi-gru/gruprocfs.c
32007 +++ b/drivers/misc/sgi-gru/gruprocfs.c
32008 @@ -32,9 +32,9 @@
32009
32010 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
32011
32012 -static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
32013 +static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
32014 {
32015 - unsigned long val = atomic_long_read(v);
32016 + unsigned long val = atomic_long_read_unchecked(v);
32017
32018 seq_printf(s, "%16lu %s\n", val, id);
32019 }
32020 @@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
32021
32022 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
32023 for (op = 0; op < mcsop_last; op++) {
32024 - count = atomic_long_read(&mcs_op_statistics[op].count);
32025 - total = atomic_long_read(&mcs_op_statistics[op].total);
32026 + count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
32027 + total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
32028 max = mcs_op_statistics[op].max;
32029 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
32030 count ? total / count : 0, max);
32031 diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
32032 index 5c3ce24..4915ccb 100644
32033 --- a/drivers/misc/sgi-gru/grutables.h
32034 +++ b/drivers/misc/sgi-gru/grutables.h
32035 @@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
32036 * GRU statistics.
32037 */
32038 struct gru_stats_s {
32039 - atomic_long_t vdata_alloc;
32040 - atomic_long_t vdata_free;
32041 - atomic_long_t gts_alloc;
32042 - atomic_long_t gts_free;
32043 - atomic_long_t gms_alloc;
32044 - atomic_long_t gms_free;
32045 - atomic_long_t gts_double_allocate;
32046 - atomic_long_t assign_context;
32047 - atomic_long_t assign_context_failed;
32048 - atomic_long_t free_context;
32049 - atomic_long_t load_user_context;
32050 - atomic_long_t load_kernel_context;
32051 - atomic_long_t lock_kernel_context;
32052 - atomic_long_t unlock_kernel_context;
32053 - atomic_long_t steal_user_context;
32054 - atomic_long_t steal_kernel_context;
32055 - atomic_long_t steal_context_failed;
32056 - atomic_long_t nopfn;
32057 - atomic_long_t asid_new;
32058 - atomic_long_t asid_next;
32059 - atomic_long_t asid_wrap;
32060 - atomic_long_t asid_reuse;
32061 - atomic_long_t intr;
32062 - atomic_long_t intr_cbr;
32063 - atomic_long_t intr_tfh;
32064 - atomic_long_t intr_spurious;
32065 - atomic_long_t intr_mm_lock_failed;
32066 - atomic_long_t call_os;
32067 - atomic_long_t call_os_wait_queue;
32068 - atomic_long_t user_flush_tlb;
32069 - atomic_long_t user_unload_context;
32070 - atomic_long_t user_exception;
32071 - atomic_long_t set_context_option;
32072 - atomic_long_t check_context_retarget_intr;
32073 - atomic_long_t check_context_unload;
32074 - atomic_long_t tlb_dropin;
32075 - atomic_long_t tlb_preload_page;
32076 - atomic_long_t tlb_dropin_fail_no_asid;
32077 - atomic_long_t tlb_dropin_fail_upm;
32078 - atomic_long_t tlb_dropin_fail_invalid;
32079 - atomic_long_t tlb_dropin_fail_range_active;
32080 - atomic_long_t tlb_dropin_fail_idle;
32081 - atomic_long_t tlb_dropin_fail_fmm;
32082 - atomic_long_t tlb_dropin_fail_no_exception;
32083 - atomic_long_t tfh_stale_on_fault;
32084 - atomic_long_t mmu_invalidate_range;
32085 - atomic_long_t mmu_invalidate_page;
32086 - atomic_long_t flush_tlb;
32087 - atomic_long_t flush_tlb_gru;
32088 - atomic_long_t flush_tlb_gru_tgh;
32089 - atomic_long_t flush_tlb_gru_zero_asid;
32090 + atomic_long_unchecked_t vdata_alloc;
32091 + atomic_long_unchecked_t vdata_free;
32092 + atomic_long_unchecked_t gts_alloc;
32093 + atomic_long_unchecked_t gts_free;
32094 + atomic_long_unchecked_t gms_alloc;
32095 + atomic_long_unchecked_t gms_free;
32096 + atomic_long_unchecked_t gts_double_allocate;
32097 + atomic_long_unchecked_t assign_context;
32098 + atomic_long_unchecked_t assign_context_failed;
32099 + atomic_long_unchecked_t free_context;
32100 + atomic_long_unchecked_t load_user_context;
32101 + atomic_long_unchecked_t load_kernel_context;
32102 + atomic_long_unchecked_t lock_kernel_context;
32103 + atomic_long_unchecked_t unlock_kernel_context;
32104 + atomic_long_unchecked_t steal_user_context;
32105 + atomic_long_unchecked_t steal_kernel_context;
32106 + atomic_long_unchecked_t steal_context_failed;
32107 + atomic_long_unchecked_t nopfn;
32108 + atomic_long_unchecked_t asid_new;
32109 + atomic_long_unchecked_t asid_next;
32110 + atomic_long_unchecked_t asid_wrap;
32111 + atomic_long_unchecked_t asid_reuse;
32112 + atomic_long_unchecked_t intr;
32113 + atomic_long_unchecked_t intr_cbr;
32114 + atomic_long_unchecked_t intr_tfh;
32115 + atomic_long_unchecked_t intr_spurious;
32116 + atomic_long_unchecked_t intr_mm_lock_failed;
32117 + atomic_long_unchecked_t call_os;
32118 + atomic_long_unchecked_t call_os_wait_queue;
32119 + atomic_long_unchecked_t user_flush_tlb;
32120 + atomic_long_unchecked_t user_unload_context;
32121 + atomic_long_unchecked_t user_exception;
32122 + atomic_long_unchecked_t set_context_option;
32123 + atomic_long_unchecked_t check_context_retarget_intr;
32124 + atomic_long_unchecked_t check_context_unload;
32125 + atomic_long_unchecked_t tlb_dropin;
32126 + atomic_long_unchecked_t tlb_preload_page;
32127 + atomic_long_unchecked_t tlb_dropin_fail_no_asid;
32128 + atomic_long_unchecked_t tlb_dropin_fail_upm;
32129 + atomic_long_unchecked_t tlb_dropin_fail_invalid;
32130 + atomic_long_unchecked_t tlb_dropin_fail_range_active;
32131 + atomic_long_unchecked_t tlb_dropin_fail_idle;
32132 + atomic_long_unchecked_t tlb_dropin_fail_fmm;
32133 + atomic_long_unchecked_t tlb_dropin_fail_no_exception;
32134 + atomic_long_unchecked_t tfh_stale_on_fault;
32135 + atomic_long_unchecked_t mmu_invalidate_range;
32136 + atomic_long_unchecked_t mmu_invalidate_page;
32137 + atomic_long_unchecked_t flush_tlb;
32138 + atomic_long_unchecked_t flush_tlb_gru;
32139 + atomic_long_unchecked_t flush_tlb_gru_tgh;
32140 + atomic_long_unchecked_t flush_tlb_gru_zero_asid;
32141
32142 - atomic_long_t copy_gpa;
32143 - atomic_long_t read_gpa;
32144 + atomic_long_unchecked_t copy_gpa;
32145 + atomic_long_unchecked_t read_gpa;
32146
32147 - atomic_long_t mesq_receive;
32148 - atomic_long_t mesq_receive_none;
32149 - atomic_long_t mesq_send;
32150 - atomic_long_t mesq_send_failed;
32151 - atomic_long_t mesq_noop;
32152 - atomic_long_t mesq_send_unexpected_error;
32153 - atomic_long_t mesq_send_lb_overflow;
32154 - atomic_long_t mesq_send_qlimit_reached;
32155 - atomic_long_t mesq_send_amo_nacked;
32156 - atomic_long_t mesq_send_put_nacked;
32157 - atomic_long_t mesq_page_overflow;
32158 - atomic_long_t mesq_qf_locked;
32159 - atomic_long_t mesq_qf_noop_not_full;
32160 - atomic_long_t mesq_qf_switch_head_failed;
32161 - atomic_long_t mesq_qf_unexpected_error;
32162 - atomic_long_t mesq_noop_unexpected_error;
32163 - atomic_long_t mesq_noop_lb_overflow;
32164 - atomic_long_t mesq_noop_qlimit_reached;
32165 - atomic_long_t mesq_noop_amo_nacked;
32166 - atomic_long_t mesq_noop_put_nacked;
32167 - atomic_long_t mesq_noop_page_overflow;
32168 + atomic_long_unchecked_t mesq_receive;
32169 + atomic_long_unchecked_t mesq_receive_none;
32170 + atomic_long_unchecked_t mesq_send;
32171 + atomic_long_unchecked_t mesq_send_failed;
32172 + atomic_long_unchecked_t mesq_noop;
32173 + atomic_long_unchecked_t mesq_send_unexpected_error;
32174 + atomic_long_unchecked_t mesq_send_lb_overflow;
32175 + atomic_long_unchecked_t mesq_send_qlimit_reached;
32176 + atomic_long_unchecked_t mesq_send_amo_nacked;
32177 + atomic_long_unchecked_t mesq_send_put_nacked;
32178 + atomic_long_unchecked_t mesq_page_overflow;
32179 + atomic_long_unchecked_t mesq_qf_locked;
32180 + atomic_long_unchecked_t mesq_qf_noop_not_full;
32181 + atomic_long_unchecked_t mesq_qf_switch_head_failed;
32182 + atomic_long_unchecked_t mesq_qf_unexpected_error;
32183 + atomic_long_unchecked_t mesq_noop_unexpected_error;
32184 + atomic_long_unchecked_t mesq_noop_lb_overflow;
32185 + atomic_long_unchecked_t mesq_noop_qlimit_reached;
32186 + atomic_long_unchecked_t mesq_noop_amo_nacked;
32187 + atomic_long_unchecked_t mesq_noop_put_nacked;
32188 + atomic_long_unchecked_t mesq_noop_page_overflow;
32189
32190 };
32191
32192 @@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
32193 tghop_invalidate, mcsop_last};
32194
32195 struct mcs_op_statistic {
32196 - atomic_long_t count;
32197 - atomic_long_t total;
32198 + atomic_long_unchecked_t count;
32199 + atomic_long_unchecked_t total;
32200 unsigned long max;
32201 };
32202
32203 @@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
32204
32205 #define STAT(id) do { \
32206 if (gru_options & OPT_STATS) \
32207 - atomic_long_inc(&gru_stats.id); \
32208 + atomic_long_inc_unchecked(&gru_stats.id); \
32209 } while (0)
32210
32211 #ifdef CONFIG_SGI_GRU_DEBUG
32212 diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
32213 index 851b2f2..a4ec097 100644
32214 --- a/drivers/misc/sgi-xp/xp.h
32215 +++ b/drivers/misc/sgi-xp/xp.h
32216 @@ -289,7 +289,7 @@ struct xpc_interface {
32217 xpc_notify_func, void *);
32218 void (*received) (short, int, void *);
32219 enum xp_retval (*partid_to_nasids) (short, void *);
32220 -};
32221 +} __no_const;
32222
32223 extern struct xpc_interface xpc_interface;
32224
32225 diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
32226 index b94d5f7..7f494c5 100644
32227 --- a/drivers/misc/sgi-xp/xpc.h
32228 +++ b/drivers/misc/sgi-xp/xpc.h
32229 @@ -835,6 +835,7 @@ struct xpc_arch_operations {
32230 void (*received_payload) (struct xpc_channel *, void *);
32231 void (*notify_senders_of_disconnect) (struct xpc_channel *);
32232 };
32233 +typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
32234
32235 /* struct xpc_partition act_state values (for XPC HB) */
32236
32237 @@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
32238 /* found in xpc_main.c */
32239 extern struct device *xpc_part;
32240 extern struct device *xpc_chan;
32241 -extern struct xpc_arch_operations xpc_arch_ops;
32242 +extern xpc_arch_operations_no_const xpc_arch_ops;
32243 extern int xpc_disengage_timelimit;
32244 extern int xpc_disengage_timedout;
32245 extern int xpc_activate_IRQ_rcvd;
32246 diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
32247 index 8d082b4..aa749ae 100644
32248 --- a/drivers/misc/sgi-xp/xpc_main.c
32249 +++ b/drivers/misc/sgi-xp/xpc_main.c
32250 @@ -162,7 +162,7 @@ static struct notifier_block xpc_die_notifier = {
32251 .notifier_call = xpc_system_die,
32252 };
32253
32254 -struct xpc_arch_operations xpc_arch_ops;
32255 +xpc_arch_operations_no_const xpc_arch_ops;
32256
32257 /*
32258 * Timer function to enforce the timelimit on the partition disengage.
32259 diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
32260 index 6878a94..fe5c5f1 100644
32261 --- a/drivers/mmc/host/sdhci-pci.c
32262 +++ b/drivers/mmc/host/sdhci-pci.c
32263 @@ -673,7 +673,7 @@ static const struct sdhci_pci_fixes sdhci_via = {
32264 .probe = via_probe,
32265 };
32266
32267 -static const struct pci_device_id pci_ids[] __devinitdata = {
32268 +static const struct pci_device_id pci_ids[] __devinitconst = {
32269 {
32270 .vendor = PCI_VENDOR_ID_RICOH,
32271 .device = PCI_DEVICE_ID_RICOH_R5C822,
32272 diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
32273 index e9fad91..0a7a16a 100644
32274 --- a/drivers/mtd/devices/doc2000.c
32275 +++ b/drivers/mtd/devices/doc2000.c
32276 @@ -773,7 +773,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
32277
32278 /* The ECC will not be calculated correctly if less than 512 is written */
32279 /* DBB-
32280 - if (len != 0x200 && eccbuf)
32281 + if (len != 0x200)
32282 printk(KERN_WARNING
32283 "ECC needs a full sector write (adr: %lx size %lx)\n",
32284 (long) to, (long) len);
32285 diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
32286 index a3f7a27..234016e 100644
32287 --- a/drivers/mtd/devices/doc2001.c
32288 +++ b/drivers/mtd/devices/doc2001.c
32289 @@ -392,7 +392,7 @@ static int doc_read (struct mtd_info *mtd, loff_t from, size_t len,
32290 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
32291
32292 /* Don't allow read past end of device */
32293 - if (from >= this->totlen)
32294 + if (from >= this->totlen || !len)
32295 return -EINVAL;
32296
32297 /* Don't allow a single read to cross a 512-byte block boundary */
32298 diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
32299 index 3984d48..28aa897 100644
32300 --- a/drivers/mtd/nand/denali.c
32301 +++ b/drivers/mtd/nand/denali.c
32302 @@ -26,6 +26,7 @@
32303 #include <linux/pci.h>
32304 #include <linux/mtd/mtd.h>
32305 #include <linux/module.h>
32306 +#include <linux/slab.h>
32307
32308 #include "denali.h"
32309
32310 diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
32311 index ac40925..483b753 100644
32312 --- a/drivers/mtd/nftlmount.c
32313 +++ b/drivers/mtd/nftlmount.c
32314 @@ -24,6 +24,7 @@
32315 #include <asm/errno.h>
32316 #include <linux/delay.h>
32317 #include <linux/slab.h>
32318 +#include <linux/sched.h>
32319 #include <linux/mtd/mtd.h>
32320 #include <linux/mtd/nand.h>
32321 #include <linux/mtd/nftl.h>
32322 diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
32323 index 6c3fb5a..c542a81 100644
32324 --- a/drivers/mtd/ubi/build.c
32325 +++ b/drivers/mtd/ubi/build.c
32326 @@ -1311,7 +1311,7 @@ module_exit(ubi_exit);
32327 static int __init bytes_str_to_int(const char *str)
32328 {
32329 char *endp;
32330 - unsigned long result;
32331 + unsigned long result, scale = 1;
32332
32333 result = simple_strtoul(str, &endp, 0);
32334 if (str == endp || result >= INT_MAX) {
32335 @@ -1322,11 +1322,11 @@ static int __init bytes_str_to_int(const char *str)
32336
32337 switch (*endp) {
32338 case 'G':
32339 - result *= 1024;
32340 + scale *= 1024;
32341 case 'M':
32342 - result *= 1024;
32343 + scale *= 1024;
32344 case 'K':
32345 - result *= 1024;
32346 + scale *= 1024;
32347 if (endp[1] == 'i' && endp[2] == 'B')
32348 endp += 2;
32349 case '\0':
32350 @@ -1337,7 +1337,13 @@ static int __init bytes_str_to_int(const char *str)
32351 return -EINVAL;
32352 }
32353
32354 - return result;
32355 + if ((intoverflow_t)result*scale >= INT_MAX) {
32356 + printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
32357 + str);
32358 + return -EINVAL;
32359 + }
32360 +
32361 + return result*scale;
32362 }
32363
32364 /**
32365 diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
32366 index 1feae59..c2a61d2 100644
32367 --- a/drivers/net/ethernet/atheros/atlx/atl2.c
32368 +++ b/drivers/net/ethernet/atheros/atlx/atl2.c
32369 @@ -2857,7 +2857,7 @@ static void atl2_force_ps(struct atl2_hw *hw)
32370 */
32371
32372 #define ATL2_PARAM(X, desc) \
32373 - static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
32374 + static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
32375 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
32376 MODULE_PARM_DESC(X, desc);
32377 #else
32378 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
32379 index 9a517c2..a50cfcb 100644
32380 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
32381 +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
32382 @@ -449,7 +449,7 @@ struct bnx2x_rx_mode_obj {
32383
32384 int (*wait_comp)(struct bnx2x *bp,
32385 struct bnx2x_rx_mode_ramrod_params *p);
32386 -};
32387 +} __no_const;
32388
32389 /********************** Set multicast group ***********************************/
32390
32391 diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
32392 index 94b4bd0..73c02de 100644
32393 --- a/drivers/net/ethernet/broadcom/tg3.h
32394 +++ b/drivers/net/ethernet/broadcom/tg3.h
32395 @@ -134,6 +134,7 @@
32396 #define CHIPREV_ID_5750_A0 0x4000
32397 #define CHIPREV_ID_5750_A1 0x4001
32398 #define CHIPREV_ID_5750_A3 0x4003
32399 +#define CHIPREV_ID_5750_C1 0x4201
32400 #define CHIPREV_ID_5750_C2 0x4202
32401 #define CHIPREV_ID_5752_A0_HW 0x5000
32402 #define CHIPREV_ID_5752_A0 0x6000
32403 diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
32404 index c5f5479..2e8c260 100644
32405 --- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
32406 +++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
32407 @@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
32408 */
32409 struct l2t_skb_cb {
32410 arp_failure_handler_func arp_failure_handler;
32411 -};
32412 +} __no_const;
32413
32414 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
32415
32416 diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
32417 index 871bcaa..4043505 100644
32418 --- a/drivers/net/ethernet/dec/tulip/de4x5.c
32419 +++ b/drivers/net/ethernet/dec/tulip/de4x5.c
32420 @@ -5397,7 +5397,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
32421 for (i=0; i<ETH_ALEN; i++) {
32422 tmp.addr[i] = dev->dev_addr[i];
32423 }
32424 - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
32425 + if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
32426 break;
32427
32428 case DE4X5_SET_HWADDR: /* Set the hardware address */
32429 @@ -5437,7 +5437,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
32430 spin_lock_irqsave(&lp->lock, flags);
32431 memcpy(&statbuf, &lp->pktStats, ioc->len);
32432 spin_unlock_irqrestore(&lp->lock, flags);
32433 - if (copy_to_user(ioc->data, &statbuf, ioc->len))
32434 + if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
32435 return -EFAULT;
32436 break;
32437 }
32438 diff --git a/drivers/net/ethernet/dec/tulip/eeprom.c b/drivers/net/ethernet/dec/tulip/eeprom.c
32439 index 14d5b61..1398636 100644
32440 --- a/drivers/net/ethernet/dec/tulip/eeprom.c
32441 +++ b/drivers/net/ethernet/dec/tulip/eeprom.c
32442 @@ -79,7 +79,7 @@ static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
32443 {NULL}};
32444
32445
32446 -static const char *block_name[] __devinitdata = {
32447 +static const char *block_name[] __devinitconst = {
32448 "21140 non-MII",
32449 "21140 MII PHY",
32450 "21142 Serial PHY",
32451 diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
32452 index 4d01219..b58d26d 100644
32453 --- a/drivers/net/ethernet/dec/tulip/winbond-840.c
32454 +++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
32455 @@ -236,7 +236,7 @@ struct pci_id_info {
32456 int drv_flags; /* Driver use, intended as capability flags. */
32457 };
32458
32459 -static const struct pci_id_info pci_id_tbl[] __devinitdata = {
32460 +static const struct pci_id_info pci_id_tbl[] __devinitconst = {
32461 { /* Sometime a Level-One switch card. */
32462 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
32463 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
32464 diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
32465 index dcd7f7a..ecb7fb3 100644
32466 --- a/drivers/net/ethernet/dlink/sundance.c
32467 +++ b/drivers/net/ethernet/dlink/sundance.c
32468 @@ -218,7 +218,7 @@ enum {
32469 struct pci_id_info {
32470 const char *name;
32471 };
32472 -static const struct pci_id_info pci_id_tbl[] __devinitdata = {
32473 +static const struct pci_id_info pci_id_tbl[] __devinitconst = {
32474 {"D-Link DFE-550TX FAST Ethernet Adapter"},
32475 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
32476 {"D-Link DFE-580TX 4 port Server Adapter"},
32477 diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
32478 index bf266a0..e024af7 100644
32479 --- a/drivers/net/ethernet/emulex/benet/be_main.c
32480 +++ b/drivers/net/ethernet/emulex/benet/be_main.c
32481 @@ -397,7 +397,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
32482
32483 if (wrapped)
32484 newacc += 65536;
32485 - ACCESS_ONCE(*acc) = newacc;
32486 + ACCESS_ONCE_RW(*acc) = newacc;
32487 }
32488
32489 void be_parse_stats(struct be_adapter *adapter)
32490 diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
32491 index 61d2bdd..7f1154a 100644
32492 --- a/drivers/net/ethernet/fealnx.c
32493 +++ b/drivers/net/ethernet/fealnx.c
32494 @@ -150,7 +150,7 @@ struct chip_info {
32495 int flags;
32496 };
32497
32498 -static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
32499 +static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
32500 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
32501 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
32502 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
32503 diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
32504 index e1159e5..e18684d 100644
32505 --- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
32506 +++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
32507 @@ -205,7 +205,7 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
32508 {
32509 struct e1000_hw *hw = &adapter->hw;
32510 struct e1000_mac_info *mac = &hw->mac;
32511 - struct e1000_mac_operations *func = &mac->ops;
32512 + e1000_mac_operations_no_const *func = &mac->ops;
32513
32514 /* Set media type */
32515 switch (adapter->pdev->device) {
32516 diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
32517 index a3e65fd..f451444 100644
32518 --- a/drivers/net/ethernet/intel/e1000e/82571.c
32519 +++ b/drivers/net/ethernet/intel/e1000e/82571.c
32520 @@ -239,7 +239,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
32521 {
32522 struct e1000_hw *hw = &adapter->hw;
32523 struct e1000_mac_info *mac = &hw->mac;
32524 - struct e1000_mac_operations *func = &mac->ops;
32525 + e1000_mac_operations_no_const *func = &mac->ops;
32526 u32 swsm = 0;
32527 u32 swsm2 = 0;
32528 bool force_clear_smbi = false;
32529 diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
32530 index 2967039..ca8c40c 100644
32531 --- a/drivers/net/ethernet/intel/e1000e/hw.h
32532 +++ b/drivers/net/ethernet/intel/e1000e/hw.h
32533 @@ -778,6 +778,7 @@ struct e1000_mac_operations {
32534 void (*write_vfta)(struct e1000_hw *, u32, u32);
32535 s32 (*read_mac_addr)(struct e1000_hw *);
32536 };
32537 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
32538
32539 /*
32540 * When to use various PHY register access functions:
32541 @@ -818,6 +819,7 @@ struct e1000_phy_operations {
32542 void (*power_up)(struct e1000_hw *);
32543 void (*power_down)(struct e1000_hw *);
32544 };
32545 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
32546
32547 /* Function pointers for the NVM. */
32548 struct e1000_nvm_operations {
32549 @@ -829,9 +831,10 @@ struct e1000_nvm_operations {
32550 s32 (*validate)(struct e1000_hw *);
32551 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
32552 };
32553 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
32554
32555 struct e1000_mac_info {
32556 - struct e1000_mac_operations ops;
32557 + e1000_mac_operations_no_const ops;
32558 u8 addr[ETH_ALEN];
32559 u8 perm_addr[ETH_ALEN];
32560
32561 @@ -872,7 +875,7 @@ struct e1000_mac_info {
32562 };
32563
32564 struct e1000_phy_info {
32565 - struct e1000_phy_operations ops;
32566 + e1000_phy_operations_no_const ops;
32567
32568 enum e1000_phy_type type;
32569
32570 @@ -906,7 +909,7 @@ struct e1000_phy_info {
32571 };
32572
32573 struct e1000_nvm_info {
32574 - struct e1000_nvm_operations ops;
32575 + e1000_nvm_operations_no_const ops;
32576
32577 enum e1000_nvm_type type;
32578 enum e1000_nvm_override override;
32579 diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
32580 index 4519a13..f97fcd0 100644
32581 --- a/drivers/net/ethernet/intel/igb/e1000_hw.h
32582 +++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
32583 @@ -314,6 +314,7 @@ struct e1000_mac_operations {
32584 s32 (*read_mac_addr)(struct e1000_hw *);
32585 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
32586 };
32587 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
32588
32589 struct e1000_phy_operations {
32590 s32 (*acquire)(struct e1000_hw *);
32591 @@ -330,6 +331,7 @@ struct e1000_phy_operations {
32592 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
32593 s32 (*write_reg)(struct e1000_hw *, u32, u16);
32594 };
32595 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
32596
32597 struct e1000_nvm_operations {
32598 s32 (*acquire)(struct e1000_hw *);
32599 @@ -339,6 +341,7 @@ struct e1000_nvm_operations {
32600 s32 (*update)(struct e1000_hw *);
32601 s32 (*validate)(struct e1000_hw *);
32602 };
32603 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
32604
32605 struct e1000_info {
32606 s32 (*get_invariants)(struct e1000_hw *);
32607 @@ -350,7 +353,7 @@ struct e1000_info {
32608 extern const struct e1000_info e1000_82575_info;
32609
32610 struct e1000_mac_info {
32611 - struct e1000_mac_operations ops;
32612 + e1000_mac_operations_no_const ops;
32613
32614 u8 addr[6];
32615 u8 perm_addr[6];
32616 @@ -388,7 +391,7 @@ struct e1000_mac_info {
32617 };
32618
32619 struct e1000_phy_info {
32620 - struct e1000_phy_operations ops;
32621 + e1000_phy_operations_no_const ops;
32622
32623 enum e1000_phy_type type;
32624
32625 @@ -423,7 +426,7 @@ struct e1000_phy_info {
32626 };
32627
32628 struct e1000_nvm_info {
32629 - struct e1000_nvm_operations ops;
32630 + e1000_nvm_operations_no_const ops;
32631 enum e1000_nvm_type type;
32632 enum e1000_nvm_override override;
32633
32634 @@ -468,6 +471,7 @@ struct e1000_mbx_operations {
32635 s32 (*check_for_ack)(struct e1000_hw *, u16);
32636 s32 (*check_for_rst)(struct e1000_hw *, u16);
32637 };
32638 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
32639
32640 struct e1000_mbx_stats {
32641 u32 msgs_tx;
32642 @@ -479,7 +483,7 @@ struct e1000_mbx_stats {
32643 };
32644
32645 struct e1000_mbx_info {
32646 - struct e1000_mbx_operations ops;
32647 + e1000_mbx_operations_no_const ops;
32648 struct e1000_mbx_stats stats;
32649 u32 timeout;
32650 u32 usec_delay;
32651 diff --git a/drivers/net/ethernet/intel/igbvf/vf.h b/drivers/net/ethernet/intel/igbvf/vf.h
32652 index d7ed58f..64cde36 100644
32653 --- a/drivers/net/ethernet/intel/igbvf/vf.h
32654 +++ b/drivers/net/ethernet/intel/igbvf/vf.h
32655 @@ -189,9 +189,10 @@ struct e1000_mac_operations {
32656 s32 (*read_mac_addr)(struct e1000_hw *);
32657 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
32658 };
32659 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
32660
32661 struct e1000_mac_info {
32662 - struct e1000_mac_operations ops;
32663 + e1000_mac_operations_no_const ops;
32664 u8 addr[6];
32665 u8 perm_addr[6];
32666
32667 @@ -213,6 +214,7 @@ struct e1000_mbx_operations {
32668 s32 (*check_for_ack)(struct e1000_hw *);
32669 s32 (*check_for_rst)(struct e1000_hw *);
32670 };
32671 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
32672
32673 struct e1000_mbx_stats {
32674 u32 msgs_tx;
32675 @@ -224,7 +226,7 @@ struct e1000_mbx_stats {
32676 };
32677
32678 struct e1000_mbx_info {
32679 - struct e1000_mbx_operations ops;
32680 + e1000_mbx_operations_no_const ops;
32681 struct e1000_mbx_stats stats;
32682 u32 timeout;
32683 u32 usec_delay;
32684 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
32685 index 6c5cca8..de8ef63 100644
32686 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
32687 +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
32688 @@ -2708,6 +2708,7 @@ struct ixgbe_eeprom_operations {
32689 s32 (*update_checksum)(struct ixgbe_hw *);
32690 u16 (*calc_checksum)(struct ixgbe_hw *);
32691 };
32692 +typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
32693
32694 struct ixgbe_mac_operations {
32695 s32 (*init_hw)(struct ixgbe_hw *);
32696 @@ -2769,6 +2770,7 @@ struct ixgbe_mac_operations {
32697 /* Manageability interface */
32698 s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
32699 };
32700 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
32701
32702 struct ixgbe_phy_operations {
32703 s32 (*identify)(struct ixgbe_hw *);
32704 @@ -2788,9 +2790,10 @@ struct ixgbe_phy_operations {
32705 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
32706 s32 (*check_overtemp)(struct ixgbe_hw *);
32707 };
32708 +typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
32709
32710 struct ixgbe_eeprom_info {
32711 - struct ixgbe_eeprom_operations ops;
32712 + ixgbe_eeprom_operations_no_const ops;
32713 enum ixgbe_eeprom_type type;
32714 u32 semaphore_delay;
32715 u16 word_size;
32716 @@ -2800,7 +2803,7 @@ struct ixgbe_eeprom_info {
32717
32718 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
32719 struct ixgbe_mac_info {
32720 - struct ixgbe_mac_operations ops;
32721 + ixgbe_mac_operations_no_const ops;
32722 enum ixgbe_mac_type type;
32723 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
32724 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
32725 @@ -2828,7 +2831,7 @@ struct ixgbe_mac_info {
32726 };
32727
32728 struct ixgbe_phy_info {
32729 - struct ixgbe_phy_operations ops;
32730 + ixgbe_phy_operations_no_const ops;
32731 struct mdio_if_info mdio;
32732 enum ixgbe_phy_type type;
32733 u32 id;
32734 @@ -2856,6 +2859,7 @@ struct ixgbe_mbx_operations {
32735 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
32736 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
32737 };
32738 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
32739
32740 struct ixgbe_mbx_stats {
32741 u32 msgs_tx;
32742 @@ -2867,7 +2871,7 @@ struct ixgbe_mbx_stats {
32743 };
32744
32745 struct ixgbe_mbx_info {
32746 - struct ixgbe_mbx_operations ops;
32747 + ixgbe_mbx_operations_no_const ops;
32748 struct ixgbe_mbx_stats stats;
32749 u32 timeout;
32750 u32 usec_delay;
32751 diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
32752 index 10306b4..28df758 100644
32753 --- a/drivers/net/ethernet/intel/ixgbevf/vf.h
32754 +++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
32755 @@ -70,6 +70,7 @@ struct ixgbe_mac_operations {
32756 s32 (*clear_vfta)(struct ixgbe_hw *);
32757 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
32758 };
32759 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
32760
32761 enum ixgbe_mac_type {
32762 ixgbe_mac_unknown = 0,
32763 @@ -79,7 +80,7 @@ enum ixgbe_mac_type {
32764 };
32765
32766 struct ixgbe_mac_info {
32767 - struct ixgbe_mac_operations ops;
32768 + ixgbe_mac_operations_no_const ops;
32769 u8 addr[6];
32770 u8 perm_addr[6];
32771
32772 @@ -103,6 +104,7 @@ struct ixgbe_mbx_operations {
32773 s32 (*check_for_ack)(struct ixgbe_hw *);
32774 s32 (*check_for_rst)(struct ixgbe_hw *);
32775 };
32776 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
32777
32778 struct ixgbe_mbx_stats {
32779 u32 msgs_tx;
32780 @@ -114,7 +116,7 @@ struct ixgbe_mbx_stats {
32781 };
32782
32783 struct ixgbe_mbx_info {
32784 - struct ixgbe_mbx_operations ops;
32785 + ixgbe_mbx_operations_no_const ops;
32786 struct ixgbe_mbx_stats stats;
32787 u32 timeout;
32788 u32 udelay;
32789 diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
32790 index 94bbc85..78c12e6 100644
32791 --- a/drivers/net/ethernet/mellanox/mlx4/main.c
32792 +++ b/drivers/net/ethernet/mellanox/mlx4/main.c
32793 @@ -40,6 +40,7 @@
32794 #include <linux/dma-mapping.h>
32795 #include <linux/slab.h>
32796 #include <linux/io-mapping.h>
32797 +#include <linux/sched.h>
32798
32799 #include <linux/mlx4/device.h>
32800 #include <linux/mlx4/doorbell.h>
32801 diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h
32802 index 5046a64..71ca936 100644
32803 --- a/drivers/net/ethernet/neterion/vxge/vxge-config.h
32804 +++ b/drivers/net/ethernet/neterion/vxge/vxge-config.h
32805 @@ -514,7 +514,7 @@ struct vxge_hw_uld_cbs {
32806 void (*link_down)(struct __vxge_hw_device *devh);
32807 void (*crit_err)(struct __vxge_hw_device *devh,
32808 enum vxge_hw_event type, u64 ext_data);
32809 -};
32810 +} __no_const;
32811
32812 /*
32813 * struct __vxge_hw_blockpool_entry - Block private data structure
32814 diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
32815 index 4a518a3..936b334 100644
32816 --- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
32817 +++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
32818 @@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
32819 struct vxge_hw_mempool_dma *dma_object,
32820 u32 index,
32821 u32 is_last);
32822 -};
32823 +} __no_const;
32824
32825 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
32826 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
32827 diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
32828 index c8f47f1..5da9840 100644
32829 --- a/drivers/net/ethernet/realtek/r8169.c
32830 +++ b/drivers/net/ethernet/realtek/r8169.c
32831 @@ -698,17 +698,17 @@ struct rtl8169_private {
32832 struct mdio_ops {
32833 void (*write)(void __iomem *, int, int);
32834 int (*read)(void __iomem *, int);
32835 - } mdio_ops;
32836 + } __no_const mdio_ops;
32837
32838 struct pll_power_ops {
32839 void (*down)(struct rtl8169_private *);
32840 void (*up)(struct rtl8169_private *);
32841 - } pll_power_ops;
32842 + } __no_const pll_power_ops;
32843
32844 struct jumbo_ops {
32845 void (*enable)(struct rtl8169_private *);
32846 void (*disable)(struct rtl8169_private *);
32847 - } jumbo_ops;
32848 + } __no_const jumbo_ops;
32849
32850 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
32851 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
32852 diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
32853 index 1b4658c..a30dabb 100644
32854 --- a/drivers/net/ethernet/sis/sis190.c
32855 +++ b/drivers/net/ethernet/sis/sis190.c
32856 @@ -1624,7 +1624,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
32857 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
32858 struct net_device *dev)
32859 {
32860 - static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
32861 + static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
32862 struct sis190_private *tp = netdev_priv(dev);
32863 struct pci_dev *isa_bridge;
32864 u8 reg, tmp8;
32865 diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
32866 index edfa15d..002bfa9 100644
32867 --- a/drivers/net/ppp/ppp_generic.c
32868 +++ b/drivers/net/ppp/ppp_generic.c
32869 @@ -987,7 +987,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
32870 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
32871 struct ppp_stats stats;
32872 struct ppp_comp_stats cstats;
32873 - char *vers;
32874
32875 switch (cmd) {
32876 case SIOCGPPPSTATS:
32877 @@ -1009,8 +1008,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
32878 break;
32879
32880 case SIOCGPPPVER:
32881 - vers = PPP_VERSION;
32882 - if (copy_to_user(addr, vers, strlen(vers) + 1))
32883 + if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
32884 break;
32885 err = 0;
32886 break;
32887 diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
32888 index 515f122..41dd273 100644
32889 --- a/drivers/net/tokenring/abyss.c
32890 +++ b/drivers/net/tokenring/abyss.c
32891 @@ -451,10 +451,12 @@ static struct pci_driver abyss_driver = {
32892
32893 static int __init abyss_init (void)
32894 {
32895 - abyss_netdev_ops = tms380tr_netdev_ops;
32896 + pax_open_kernel();
32897 + memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
32898
32899 - abyss_netdev_ops.ndo_open = abyss_open;
32900 - abyss_netdev_ops.ndo_stop = abyss_close;
32901 + *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
32902 + *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
32903 + pax_close_kernel();
32904
32905 return pci_register_driver(&abyss_driver);
32906 }
32907 diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
32908 index 6153cfd..cf69c1c 100644
32909 --- a/drivers/net/tokenring/madgemc.c
32910 +++ b/drivers/net/tokenring/madgemc.c
32911 @@ -744,9 +744,11 @@ static struct mca_driver madgemc_driver = {
32912
32913 static int __init madgemc_init (void)
32914 {
32915 - madgemc_netdev_ops = tms380tr_netdev_ops;
32916 - madgemc_netdev_ops.ndo_open = madgemc_open;
32917 - madgemc_netdev_ops.ndo_stop = madgemc_close;
32918 + pax_open_kernel();
32919 + memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
32920 + *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
32921 + *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
32922 + pax_close_kernel();
32923
32924 return mca_register_driver (&madgemc_driver);
32925 }
32926 diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
32927 index 8d362e6..f91cc52 100644
32928 --- a/drivers/net/tokenring/proteon.c
32929 +++ b/drivers/net/tokenring/proteon.c
32930 @@ -353,9 +353,11 @@ static int __init proteon_init(void)
32931 struct platform_device *pdev;
32932 int i, num = 0, err = 0;
32933
32934 - proteon_netdev_ops = tms380tr_netdev_ops;
32935 - proteon_netdev_ops.ndo_open = proteon_open;
32936 - proteon_netdev_ops.ndo_stop = tms380tr_close;
32937 + pax_open_kernel();
32938 + memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
32939 + *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
32940 + *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
32941 + pax_close_kernel();
32942
32943 err = platform_driver_register(&proteon_driver);
32944 if (err)
32945 diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
32946 index 46db5c5..37c1536 100644
32947 --- a/drivers/net/tokenring/skisa.c
32948 +++ b/drivers/net/tokenring/skisa.c
32949 @@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
32950 struct platform_device *pdev;
32951 int i, num = 0, err = 0;
32952
32953 - sk_isa_netdev_ops = tms380tr_netdev_ops;
32954 - sk_isa_netdev_ops.ndo_open = sk_isa_open;
32955 - sk_isa_netdev_ops.ndo_stop = tms380tr_close;
32956 + pax_open_kernel();
32957 + memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
32958 + *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
32959 + *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
32960 + pax_close_kernel();
32961
32962 err = platform_driver_register(&sk_isa_driver);
32963 if (err)
32964 diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
32965 index 304fe78..db112fa 100644
32966 --- a/drivers/net/usb/hso.c
32967 +++ b/drivers/net/usb/hso.c
32968 @@ -71,7 +71,7 @@
32969 #include <asm/byteorder.h>
32970 #include <linux/serial_core.h>
32971 #include <linux/serial.h>
32972 -
32973 +#include <asm/local.h>
32974
32975 #define MOD_AUTHOR "Option Wireless"
32976 #define MOD_DESCRIPTION "USB High Speed Option driver"
32977 @@ -257,7 +257,7 @@ struct hso_serial {
32978
32979 /* from usb_serial_port */
32980 struct tty_struct *tty;
32981 - int open_count;
32982 + local_t open_count;
32983 spinlock_t serial_lock;
32984
32985 int (*write_data) (struct hso_serial *serial);
32986 @@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
32987 struct urb *urb;
32988
32989 urb = serial->rx_urb[0];
32990 - if (serial->open_count > 0) {
32991 + if (local_read(&serial->open_count) > 0) {
32992 count = put_rxbuf_data(urb, serial);
32993 if (count == -1)
32994 return;
32995 @@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
32996 DUMP1(urb->transfer_buffer, urb->actual_length);
32997
32998 /* Anyone listening? */
32999 - if (serial->open_count == 0)
33000 + if (local_read(&serial->open_count) == 0)
33001 return;
33002
33003 if (status == 0) {
33004 @@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
33005 spin_unlock_irq(&serial->serial_lock);
33006
33007 /* check for port already opened, if not set the termios */
33008 - serial->open_count++;
33009 - if (serial->open_count == 1) {
33010 + if (local_inc_return(&serial->open_count) == 1) {
33011 serial->rx_state = RX_IDLE;
33012 /* Force default termio settings */
33013 _hso_serial_set_termios(tty, NULL);
33014 @@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
33015 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
33016 if (result) {
33017 hso_stop_serial_device(serial->parent);
33018 - serial->open_count--;
33019 + local_dec(&serial->open_count);
33020 kref_put(&serial->parent->ref, hso_serial_ref_free);
33021 }
33022 } else {
33023 @@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
33024
33025 /* reset the rts and dtr */
33026 /* do the actual close */
33027 - serial->open_count--;
33028 + local_dec(&serial->open_count);
33029
33030 - if (serial->open_count <= 0) {
33031 - serial->open_count = 0;
33032 + if (local_read(&serial->open_count) <= 0) {
33033 + local_set(&serial->open_count, 0);
33034 spin_lock_irq(&serial->serial_lock);
33035 if (serial->tty == tty) {
33036 serial->tty->driver_data = NULL;
33037 @@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
33038
33039 /* the actual setup */
33040 spin_lock_irqsave(&serial->serial_lock, flags);
33041 - if (serial->open_count)
33042 + if (local_read(&serial->open_count))
33043 _hso_serial_set_termios(tty, old);
33044 else
33045 tty->termios = old;
33046 @@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *urb)
33047 D1("Pending read interrupt on port %d\n", i);
33048 spin_lock(&serial->serial_lock);
33049 if (serial->rx_state == RX_IDLE &&
33050 - serial->open_count > 0) {
33051 + local_read(&serial->open_count) > 0) {
33052 /* Setup and send a ctrl req read on
33053 * port i */
33054 if (!serial->rx_urb_filled[0]) {
33055 @@ -3098,7 +3097,7 @@ static int hso_resume(struct usb_interface *iface)
33056 /* Start all serial ports */
33057 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
33058 if (serial_table[i] && (serial_table[i]->interface == iface)) {
33059 - if (dev2ser(serial_table[i])->open_count) {
33060 + if (local_read(&dev2ser(serial_table[i])->open_count)) {
33061 result =
33062 hso_start_serial_device(serial_table[i], GFP_NOIO);
33063 hso_kick_transmit(dev2ser(serial_table[i]));
33064 diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
33065 index e662cbc..8d4a102 100644
33066 --- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
33067 +++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
33068 @@ -601,8 +601,7 @@ vmxnet3_set_rss_indir(struct net_device *netdev,
33069 * Return with error code if any of the queue indices
33070 * is out of range
33071 */
33072 - if (p->ring_index[i] < 0 ||
33073 - p->ring_index[i] >= adapter->num_rx_queues)
33074 + if (p->ring_index[i] >= adapter->num_rx_queues)
33075 return -EINVAL;
33076 }
33077
33078 diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
33079 index 0f9ee46..e2d6e65 100644
33080 --- a/drivers/net/wireless/ath/ath.h
33081 +++ b/drivers/net/wireless/ath/ath.h
33082 @@ -119,6 +119,7 @@ struct ath_ops {
33083 void (*write_flush) (void *);
33084 u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
33085 };
33086 +typedef struct ath_ops __no_const ath_ops_no_const;
33087
33088 struct ath_common;
33089 struct ath_bus_ops;
33090 diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
33091 index b592016..fe47870 100644
33092 --- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
33093 +++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
33094 @@ -183,8 +183,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33095 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
33096 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
33097
33098 - ACCESS_ONCE(ads->ds_link) = i->link;
33099 - ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
33100 + ACCESS_ONCE_RW(ads->ds_link) = i->link;
33101 + ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
33102
33103 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
33104 ctl6 = SM(i->keytype, AR_EncrType);
33105 @@ -198,26 +198,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33106
33107 if ((i->is_first || i->is_last) &&
33108 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
33109 - ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
33110 + ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
33111 | set11nTries(i->rates, 1)
33112 | set11nTries(i->rates, 2)
33113 | set11nTries(i->rates, 3)
33114 | (i->dur_update ? AR_DurUpdateEna : 0)
33115 | SM(0, AR_BurstDur);
33116
33117 - ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
33118 + ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
33119 | set11nRate(i->rates, 1)
33120 | set11nRate(i->rates, 2)
33121 | set11nRate(i->rates, 3);
33122 } else {
33123 - ACCESS_ONCE(ads->ds_ctl2) = 0;
33124 - ACCESS_ONCE(ads->ds_ctl3) = 0;
33125 + ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
33126 + ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
33127 }
33128
33129 if (!i->is_first) {
33130 - ACCESS_ONCE(ads->ds_ctl0) = 0;
33131 - ACCESS_ONCE(ads->ds_ctl1) = ctl1;
33132 - ACCESS_ONCE(ads->ds_ctl6) = ctl6;
33133 + ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
33134 + ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
33135 + ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
33136 return;
33137 }
33138
33139 @@ -242,7 +242,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33140 break;
33141 }
33142
33143 - ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
33144 + ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
33145 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
33146 | SM(i->txpower, AR_XmitPower)
33147 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
33148 @@ -252,19 +252,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33149 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
33150 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
33151
33152 - ACCESS_ONCE(ads->ds_ctl1) = ctl1;
33153 - ACCESS_ONCE(ads->ds_ctl6) = ctl6;
33154 + ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
33155 + ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
33156
33157 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
33158 return;
33159
33160 - ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
33161 + ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
33162 | set11nPktDurRTSCTS(i->rates, 1);
33163
33164 - ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
33165 + ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
33166 | set11nPktDurRTSCTS(i->rates, 3);
33167
33168 - ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
33169 + ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
33170 | set11nRateFlags(i->rates, 1)
33171 | set11nRateFlags(i->rates, 2)
33172 | set11nRateFlags(i->rates, 3)
33173 diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
33174 index f5ae3c6..7936af3 100644
33175 --- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
33176 +++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
33177 @@ -35,47 +35,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33178 (i->qcu << AR_TxQcuNum_S) | 0x17;
33179
33180 checksum += val;
33181 - ACCESS_ONCE(ads->info) = val;
33182 + ACCESS_ONCE_RW(ads->info) = val;
33183
33184 checksum += i->link;
33185 - ACCESS_ONCE(ads->link) = i->link;
33186 + ACCESS_ONCE_RW(ads->link) = i->link;
33187
33188 checksum += i->buf_addr[0];
33189 - ACCESS_ONCE(ads->data0) = i->buf_addr[0];
33190 + ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
33191 checksum += i->buf_addr[1];
33192 - ACCESS_ONCE(ads->data1) = i->buf_addr[1];
33193 + ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
33194 checksum += i->buf_addr[2];
33195 - ACCESS_ONCE(ads->data2) = i->buf_addr[2];
33196 + ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
33197 checksum += i->buf_addr[3];
33198 - ACCESS_ONCE(ads->data3) = i->buf_addr[3];
33199 + ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
33200
33201 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
33202 - ACCESS_ONCE(ads->ctl3) = val;
33203 + ACCESS_ONCE_RW(ads->ctl3) = val;
33204 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
33205 - ACCESS_ONCE(ads->ctl5) = val;
33206 + ACCESS_ONCE_RW(ads->ctl5) = val;
33207 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
33208 - ACCESS_ONCE(ads->ctl7) = val;
33209 + ACCESS_ONCE_RW(ads->ctl7) = val;
33210 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
33211 - ACCESS_ONCE(ads->ctl9) = val;
33212 + ACCESS_ONCE_RW(ads->ctl9) = val;
33213
33214 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
33215 - ACCESS_ONCE(ads->ctl10) = checksum;
33216 + ACCESS_ONCE_RW(ads->ctl10) = checksum;
33217
33218 if (i->is_first || i->is_last) {
33219 - ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
33220 + ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
33221 | set11nTries(i->rates, 1)
33222 | set11nTries(i->rates, 2)
33223 | set11nTries(i->rates, 3)
33224 | (i->dur_update ? AR_DurUpdateEna : 0)
33225 | SM(0, AR_BurstDur);
33226
33227 - ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
33228 + ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
33229 | set11nRate(i->rates, 1)
33230 | set11nRate(i->rates, 2)
33231 | set11nRate(i->rates, 3);
33232 } else {
33233 - ACCESS_ONCE(ads->ctl13) = 0;
33234 - ACCESS_ONCE(ads->ctl14) = 0;
33235 + ACCESS_ONCE_RW(ads->ctl13) = 0;
33236 + ACCESS_ONCE_RW(ads->ctl14) = 0;
33237 }
33238
33239 ads->ctl20 = 0;
33240 @@ -84,17 +84,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33241
33242 ctl17 = SM(i->keytype, AR_EncrType);
33243 if (!i->is_first) {
33244 - ACCESS_ONCE(ads->ctl11) = 0;
33245 - ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
33246 - ACCESS_ONCE(ads->ctl15) = 0;
33247 - ACCESS_ONCE(ads->ctl16) = 0;
33248 - ACCESS_ONCE(ads->ctl17) = ctl17;
33249 - ACCESS_ONCE(ads->ctl18) = 0;
33250 - ACCESS_ONCE(ads->ctl19) = 0;
33251 + ACCESS_ONCE_RW(ads->ctl11) = 0;
33252 + ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
33253 + ACCESS_ONCE_RW(ads->ctl15) = 0;
33254 + ACCESS_ONCE_RW(ads->ctl16) = 0;
33255 + ACCESS_ONCE_RW(ads->ctl17) = ctl17;
33256 + ACCESS_ONCE_RW(ads->ctl18) = 0;
33257 + ACCESS_ONCE_RW(ads->ctl19) = 0;
33258 return;
33259 }
33260
33261 - ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
33262 + ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
33263 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
33264 | SM(i->txpower, AR_XmitPower)
33265 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
33266 @@ -130,22 +130,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33267 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
33268 ctl12 |= SM(val, AR_PAPRDChainMask);
33269
33270 - ACCESS_ONCE(ads->ctl12) = ctl12;
33271 - ACCESS_ONCE(ads->ctl17) = ctl17;
33272 + ACCESS_ONCE_RW(ads->ctl12) = ctl12;
33273 + ACCESS_ONCE_RW(ads->ctl17) = ctl17;
33274
33275 - ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
33276 + ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
33277 | set11nPktDurRTSCTS(i->rates, 1);
33278
33279 - ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
33280 + ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
33281 | set11nPktDurRTSCTS(i->rates, 3);
33282
33283 - ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
33284 + ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
33285 | set11nRateFlags(i->rates, 1)
33286 | set11nRateFlags(i->rates, 2)
33287 | set11nRateFlags(i->rates, 3)
33288 | SM(i->rtscts_rate, AR_RTSCTSRate);
33289
33290 - ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
33291 + ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
33292 }
33293
33294 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
33295 diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
33296 index f389b3c..7359e18 100644
33297 --- a/drivers/net/wireless/ath/ath9k/hw.h
33298 +++ b/drivers/net/wireless/ath/ath9k/hw.h
33299 @@ -605,7 +605,7 @@ struct ath_hw_private_ops {
33300
33301 /* ANI */
33302 void (*ani_cache_ini_regs)(struct ath_hw *ah);
33303 -};
33304 +} __no_const;
33305
33306 /**
33307 * struct ath_hw_ops - callbacks used by hardware code and driver code
33308 @@ -635,7 +635,7 @@ struct ath_hw_ops {
33309 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
33310 struct ath_hw_antcomb_conf *antconf);
33311
33312 -};
33313 +} __no_const;
33314
33315 struct ath_nf_limits {
33316 s16 max;
33317 @@ -655,7 +655,7 @@ enum ath_cal_list {
33318 #define AH_FASTCC 0x4
33319
33320 struct ath_hw {
33321 - struct ath_ops reg_ops;
33322 + ath_ops_no_const reg_ops;
33323
33324 struct ieee80211_hw *hw;
33325 struct ath_common common;
33326 diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
33327 index bea8524..c677c06 100644
33328 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
33329 +++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
33330 @@ -547,7 +547,7 @@ struct phy_func_ptr {
33331 void (*carrsuppr)(struct brcms_phy *);
33332 s32 (*rxsigpwr)(struct brcms_phy *, s32);
33333 void (*detach)(struct brcms_phy *);
33334 -};
33335 +} __no_const;
33336
33337 struct brcms_phy {
33338 struct brcms_phy_pub pubpi_ro;
33339 diff --git a/drivers/net/wireless/iwlegacy/iwl3945-base.c b/drivers/net/wireless/iwlegacy/iwl3945-base.c
33340 index 05f2ad1..ae00eea 100644
33341 --- a/drivers/net/wireless/iwlegacy/iwl3945-base.c
33342 +++ b/drivers/net/wireless/iwlegacy/iwl3945-base.c
33343 @@ -3685,7 +3685,9 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
33344 */
33345 if (iwl3945_mod_params.disable_hw_scan) {
33346 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
33347 - iwl3945_hw_ops.hw_scan = NULL;
33348 + pax_open_kernel();
33349 + *(void **)&iwl3945_hw_ops.hw_scan = NULL;
33350 + pax_close_kernel();
33351 }
33352
33353 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
33354 diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
33355 index 69a77e2..552b42c 100644
33356 --- a/drivers/net/wireless/iwlwifi/iwl-debug.h
33357 +++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
33358 @@ -71,8 +71,8 @@ do { \
33359 } while (0)
33360
33361 #else
33362 -#define IWL_DEBUG(m, level, fmt, args...)
33363 -#define IWL_DEBUG_LIMIT(m, level, fmt, args...)
33364 +#define IWL_DEBUG(m, level, fmt, args...) do {} while (0)
33365 +#define IWL_DEBUG_LIMIT(m, level, fmt, args...) do {} while (0)
33366 #define iwl_print_hex_dump(m, level, p, len)
33367 #endif /* CONFIG_IWLWIFI_DEBUG */
33368
33369 diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
33370 index 523ad55..f8c5dc5 100644
33371 --- a/drivers/net/wireless/mac80211_hwsim.c
33372 +++ b/drivers/net/wireless/mac80211_hwsim.c
33373 @@ -1678,9 +1678,11 @@ static int __init init_mac80211_hwsim(void)
33374 return -EINVAL;
33375
33376 if (fake_hw_scan) {
33377 - mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
33378 - mac80211_hwsim_ops.sw_scan_start = NULL;
33379 - mac80211_hwsim_ops.sw_scan_complete = NULL;
33380 + pax_open_kernel();
33381 + *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
33382 + *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
33383 + *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
33384 + pax_close_kernel();
33385 }
33386
33387 spin_lock_init(&hwsim_radio_lock);
33388 diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
33389 index 30f138b..c904585 100644
33390 --- a/drivers/net/wireless/mwifiex/main.h
33391 +++ b/drivers/net/wireless/mwifiex/main.h
33392 @@ -543,7 +543,7 @@ struct mwifiex_if_ops {
33393 void (*cleanup_mpa_buf) (struct mwifiex_adapter *);
33394 int (*cmdrsp_complete) (struct mwifiex_adapter *, struct sk_buff *);
33395 int (*event_complete) (struct mwifiex_adapter *, struct sk_buff *);
33396 -};
33397 +} __no_const;
33398
33399 struct mwifiex_adapter {
33400 u8 iface_type;
33401 diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
33402 index 0c13840..a5c3ed6 100644
33403 --- a/drivers/net/wireless/rndis_wlan.c
33404 +++ b/drivers/net/wireless/rndis_wlan.c
33405 @@ -1275,7 +1275,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
33406
33407 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
33408
33409 - if (rts_threshold < 0 || rts_threshold > 2347)
33410 + if (rts_threshold > 2347)
33411 rts_threshold = 2347;
33412
33413 tmp = cpu_to_le32(rts_threshold);
33414 diff --git a/drivers/net/wireless/wl1251/wl1251.h b/drivers/net/wireless/wl1251/wl1251.h
33415 index a77f1bb..c608b2b 100644
33416 --- a/drivers/net/wireless/wl1251/wl1251.h
33417 +++ b/drivers/net/wireless/wl1251/wl1251.h
33418 @@ -266,7 +266,7 @@ struct wl1251_if_operations {
33419 void (*reset)(struct wl1251 *wl);
33420 void (*enable_irq)(struct wl1251 *wl);
33421 void (*disable_irq)(struct wl1251 *wl);
33422 -};
33423 +} __no_const;
33424
33425 struct wl1251 {
33426 struct ieee80211_hw *hw;
33427 diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
33428 index f34b5b2..b5abb9f 100644
33429 --- a/drivers/oprofile/buffer_sync.c
33430 +++ b/drivers/oprofile/buffer_sync.c
33431 @@ -343,7 +343,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
33432 if (cookie == NO_COOKIE)
33433 offset = pc;
33434 if (cookie == INVALID_COOKIE) {
33435 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
33436 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
33437 offset = pc;
33438 }
33439 if (cookie != last_cookie) {
33440 @@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
33441 /* add userspace sample */
33442
33443 if (!mm) {
33444 - atomic_inc(&oprofile_stats.sample_lost_no_mm);
33445 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
33446 return 0;
33447 }
33448
33449 cookie = lookup_dcookie(mm, s->eip, &offset);
33450
33451 if (cookie == INVALID_COOKIE) {
33452 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
33453 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
33454 return 0;
33455 }
33456
33457 @@ -563,7 +563,7 @@ void sync_buffer(int cpu)
33458 /* ignore backtraces if failed to add a sample */
33459 if (state == sb_bt_start) {
33460 state = sb_bt_ignore;
33461 - atomic_inc(&oprofile_stats.bt_lost_no_mapping);
33462 + atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
33463 }
33464 }
33465 release_mm(mm);
33466 diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
33467 index c0cc4e7..44d4e54 100644
33468 --- a/drivers/oprofile/event_buffer.c
33469 +++ b/drivers/oprofile/event_buffer.c
33470 @@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
33471 }
33472
33473 if (buffer_pos == buffer_size) {
33474 - atomic_inc(&oprofile_stats.event_lost_overflow);
33475 + atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
33476 return;
33477 }
33478
33479 diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
33480 index f8c752e..28bf4fc 100644
33481 --- a/drivers/oprofile/oprof.c
33482 +++ b/drivers/oprofile/oprof.c
33483 @@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
33484 if (oprofile_ops.switch_events())
33485 return;
33486
33487 - atomic_inc(&oprofile_stats.multiplex_counter);
33488 + atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
33489 start_switch_worker();
33490 }
33491
33492 diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
33493 index 917d28e..d62d981 100644
33494 --- a/drivers/oprofile/oprofile_stats.c
33495 +++ b/drivers/oprofile/oprofile_stats.c
33496 @@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
33497 cpu_buf->sample_invalid_eip = 0;
33498 }
33499
33500 - atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
33501 - atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
33502 - atomic_set(&oprofile_stats.event_lost_overflow, 0);
33503 - atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
33504 - atomic_set(&oprofile_stats.multiplex_counter, 0);
33505 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
33506 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
33507 + atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
33508 + atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
33509 + atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
33510 }
33511
33512
33513 diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
33514 index 38b6fc0..b5cbfce 100644
33515 --- a/drivers/oprofile/oprofile_stats.h
33516 +++ b/drivers/oprofile/oprofile_stats.h
33517 @@ -13,11 +13,11 @@
33518 #include <linux/atomic.h>
33519
33520 struct oprofile_stat_struct {
33521 - atomic_t sample_lost_no_mm;
33522 - atomic_t sample_lost_no_mapping;
33523 - atomic_t bt_lost_no_mapping;
33524 - atomic_t event_lost_overflow;
33525 - atomic_t multiplex_counter;
33526 + atomic_unchecked_t sample_lost_no_mm;
33527 + atomic_unchecked_t sample_lost_no_mapping;
33528 + atomic_unchecked_t bt_lost_no_mapping;
33529 + atomic_unchecked_t event_lost_overflow;
33530 + atomic_unchecked_t multiplex_counter;
33531 };
33532
33533 extern struct oprofile_stat_struct oprofile_stats;
33534 diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
33535 index 2f0aa0f..90fab02 100644
33536 --- a/drivers/oprofile/oprofilefs.c
33537 +++ b/drivers/oprofile/oprofilefs.c
33538 @@ -193,7 +193,7 @@ static const struct file_operations atomic_ro_fops = {
33539
33540
33541 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
33542 - char const *name, atomic_t *val)
33543 + char const *name, atomic_unchecked_t *val)
33544 {
33545 return __oprofilefs_create_file(sb, root, name,
33546 &atomic_ro_fops, 0444, val);
33547 diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
33548 index 3f56bc0..707d642 100644
33549 --- a/drivers/parport/procfs.c
33550 +++ b/drivers/parport/procfs.c
33551 @@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
33552
33553 *ppos += len;
33554
33555 - return copy_to_user(result, buffer, len) ? -EFAULT : 0;
33556 + return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
33557 }
33558
33559 #ifdef CONFIG_PARPORT_1284
33560 @@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
33561
33562 *ppos += len;
33563
33564 - return copy_to_user (result, buffer, len) ? -EFAULT : 0;
33565 + return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
33566 }
33567 #endif /* IEEE1284.3 support. */
33568
33569 diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
33570 index 9fff878..ad0ad53 100644
33571 --- a/drivers/pci/hotplug/cpci_hotplug.h
33572 +++ b/drivers/pci/hotplug/cpci_hotplug.h
33573 @@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
33574 int (*hardware_test) (struct slot* slot, u32 value);
33575 u8 (*get_power) (struct slot* slot);
33576 int (*set_power) (struct slot* slot, int value);
33577 -};
33578 +} __no_const;
33579
33580 struct cpci_hp_controller {
33581 unsigned int irq;
33582 diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
33583 index 76ba8a1..20ca857 100644
33584 --- a/drivers/pci/hotplug/cpqphp_nvram.c
33585 +++ b/drivers/pci/hotplug/cpqphp_nvram.c
33586 @@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
33587
33588 void compaq_nvram_init (void __iomem *rom_start)
33589 {
33590 +
33591 +#ifndef CONFIG_PAX_KERNEXEC
33592 if (rom_start) {
33593 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
33594 }
33595 +#endif
33596 +
33597 dbg("int15 entry = %p\n", compaq_int15_entry_point);
33598
33599 /* initialize our int15 lock */
33600 diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
33601 index 1cfbf22..be96487 100644
33602 --- a/drivers/pci/pcie/aspm.c
33603 +++ b/drivers/pci/pcie/aspm.c
33604 @@ -27,9 +27,9 @@
33605 #define MODULE_PARAM_PREFIX "pcie_aspm."
33606
33607 /* Note: those are not register definitions */
33608 -#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
33609 -#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
33610 -#define ASPM_STATE_L1 (4) /* L1 state */
33611 +#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
33612 +#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
33613 +#define ASPM_STATE_L1 (4U) /* L1 state */
33614 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
33615 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
33616
33617 diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
33618 index 04e74f4..a960176 100644
33619 --- a/drivers/pci/probe.c
33620 +++ b/drivers/pci/probe.c
33621 @@ -136,7 +136,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
33622 u32 l, sz, mask;
33623 u16 orig_cmd;
33624
33625 - mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
33626 + mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
33627
33628 if (!dev->mmio_always_on) {
33629 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
33630 diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
33631 index 27911b5..5b6db88 100644
33632 --- a/drivers/pci/proc.c
33633 +++ b/drivers/pci/proc.c
33634 @@ -476,7 +476,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
33635 static int __init pci_proc_init(void)
33636 {
33637 struct pci_dev *dev = NULL;
33638 +
33639 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
33640 +#ifdef CONFIG_GRKERNSEC_PROC_USER
33641 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
33642 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
33643 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
33644 +#endif
33645 +#else
33646 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
33647 +#endif
33648 proc_create("devices", 0, proc_bus_pci_dir,
33649 &proc_bus_pci_dev_operations);
33650 proc_initialized = 1;
33651 diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
33652 index 7b82868..b9344c9 100644
33653 --- a/drivers/platform/x86/thinkpad_acpi.c
33654 +++ b/drivers/platform/x86/thinkpad_acpi.c
33655 @@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
33656 return 0;
33657 }
33658
33659 -void static hotkey_mask_warn_incomplete_mask(void)
33660 +static void hotkey_mask_warn_incomplete_mask(void)
33661 {
33662 /* log only what the user can fix... */
33663 const u32 wantedmask = hotkey_driver_mask &
33664 @@ -2325,11 +2325,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
33665 }
33666 }
33667
33668 -static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33669 - struct tp_nvram_state *newn,
33670 - const u32 event_mask)
33671 -{
33672 -
33673 #define TPACPI_COMPARE_KEY(__scancode, __member) \
33674 do { \
33675 if ((event_mask & (1 << __scancode)) && \
33676 @@ -2343,36 +2338,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33677 tpacpi_hotkey_send_key(__scancode); \
33678 } while (0)
33679
33680 - void issue_volchange(const unsigned int oldvol,
33681 - const unsigned int newvol)
33682 - {
33683 - unsigned int i = oldvol;
33684 +static void issue_volchange(const unsigned int oldvol,
33685 + const unsigned int newvol,
33686 + const u32 event_mask)
33687 +{
33688 + unsigned int i = oldvol;
33689
33690 - while (i > newvol) {
33691 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
33692 - i--;
33693 - }
33694 - while (i < newvol) {
33695 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
33696 - i++;
33697 - }
33698 + while (i > newvol) {
33699 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
33700 + i--;
33701 }
33702 + while (i < newvol) {
33703 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
33704 + i++;
33705 + }
33706 +}
33707
33708 - void issue_brightnesschange(const unsigned int oldbrt,
33709 - const unsigned int newbrt)
33710 - {
33711 - unsigned int i = oldbrt;
33712 +static void issue_brightnesschange(const unsigned int oldbrt,
33713 + const unsigned int newbrt,
33714 + const u32 event_mask)
33715 +{
33716 + unsigned int i = oldbrt;
33717
33718 - while (i > newbrt) {
33719 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
33720 - i--;
33721 - }
33722 - while (i < newbrt) {
33723 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
33724 - i++;
33725 - }
33726 + while (i > newbrt) {
33727 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
33728 + i--;
33729 + }
33730 + while (i < newbrt) {
33731 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
33732 + i++;
33733 }
33734 +}
33735
33736 +static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33737 + struct tp_nvram_state *newn,
33738 + const u32 event_mask)
33739 +{
33740 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
33741 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
33742 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
33743 @@ -2406,7 +2407,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33744 oldn->volume_level != newn->volume_level) {
33745 /* recently muted, or repeated mute keypress, or
33746 * multiple presses ending in mute */
33747 - issue_volchange(oldn->volume_level, newn->volume_level);
33748 + issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
33749 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
33750 }
33751 } else {
33752 @@ -2416,7 +2417,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33753 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
33754 }
33755 if (oldn->volume_level != newn->volume_level) {
33756 - issue_volchange(oldn->volume_level, newn->volume_level);
33757 + issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
33758 } else if (oldn->volume_toggle != newn->volume_toggle) {
33759 /* repeated vol up/down keypress at end of scale ? */
33760 if (newn->volume_level == 0)
33761 @@ -2429,7 +2430,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33762 /* handle brightness */
33763 if (oldn->brightness_level != newn->brightness_level) {
33764 issue_brightnesschange(oldn->brightness_level,
33765 - newn->brightness_level);
33766 + newn->brightness_level,
33767 + event_mask);
33768 } else if (oldn->brightness_toggle != newn->brightness_toggle) {
33769 /* repeated key presses that didn't change state */
33770 if (newn->brightness_level == 0)
33771 @@ -2438,10 +2440,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33772 && !tp_features.bright_unkfw)
33773 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
33774 }
33775 +}
33776
33777 #undef TPACPI_COMPARE_KEY
33778 #undef TPACPI_MAY_SEND_KEY
33779 -}
33780
33781 /*
33782 * Polling driver
33783 diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
33784 index b859d16..5cc6b1a 100644
33785 --- a/drivers/pnp/pnpbios/bioscalls.c
33786 +++ b/drivers/pnp/pnpbios/bioscalls.c
33787 @@ -59,7 +59,7 @@ do { \
33788 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
33789 } while(0)
33790
33791 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
33792 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
33793 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
33794
33795 /*
33796 @@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
33797
33798 cpu = get_cpu();
33799 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
33800 +
33801 + pax_open_kernel();
33802 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
33803 + pax_close_kernel();
33804
33805 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
33806 spin_lock_irqsave(&pnp_bios_lock, flags);
33807 @@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
33808 :"memory");
33809 spin_unlock_irqrestore(&pnp_bios_lock, flags);
33810
33811 + pax_open_kernel();
33812 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
33813 + pax_close_kernel();
33814 +
33815 put_cpu();
33816
33817 /* If we get here and this is set then the PnP BIOS faulted on us. */
33818 @@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
33819 return status;
33820 }
33821
33822 -void pnpbios_calls_init(union pnp_bios_install_struct *header)
33823 +void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
33824 {
33825 int i;
33826
33827 @@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
33828 pnp_bios_callpoint.offset = header->fields.pm16offset;
33829 pnp_bios_callpoint.segment = PNP_CS16;
33830
33831 + pax_open_kernel();
33832 +
33833 for_each_possible_cpu(i) {
33834 struct desc_struct *gdt = get_cpu_gdt_table(i);
33835 if (!gdt)
33836 @@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
33837 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
33838 (unsigned long)__va(header->fields.pm16dseg));
33839 }
33840 +
33841 + pax_close_kernel();
33842 }
33843 diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
33844 index b0ecacb..7c9da2e 100644
33845 --- a/drivers/pnp/resource.c
33846 +++ b/drivers/pnp/resource.c
33847 @@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
33848 return 1;
33849
33850 /* check if the resource is valid */
33851 - if (*irq < 0 || *irq > 15)
33852 + if (*irq > 15)
33853 return 0;
33854
33855 /* check if the resource is reserved */
33856 @@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
33857 return 1;
33858
33859 /* check if the resource is valid */
33860 - if (*dma < 0 || *dma == 4 || *dma > 7)
33861 + if (*dma == 4 || *dma > 7)
33862 return 0;
33863
33864 /* check if the resource is reserved */
33865 diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
33866 index bb16f5b..c751eef 100644
33867 --- a/drivers/power/bq27x00_battery.c
33868 +++ b/drivers/power/bq27x00_battery.c
33869 @@ -67,7 +67,7 @@
33870 struct bq27x00_device_info;
33871 struct bq27x00_access_methods {
33872 int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
33873 -};
33874 +} __no_const;
33875
33876 enum bq27x00_chip { BQ27000, BQ27500 };
33877
33878 diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
33879 index 33f5d9a..d957d3f 100644
33880 --- a/drivers/regulator/max8660.c
33881 +++ b/drivers/regulator/max8660.c
33882 @@ -383,8 +383,10 @@ static int __devinit max8660_probe(struct i2c_client *client,
33883 max8660->shadow_regs[MAX8660_OVER1] = 5;
33884 } else {
33885 /* Otherwise devices can be toggled via software */
33886 - max8660_dcdc_ops.enable = max8660_dcdc_enable;
33887 - max8660_dcdc_ops.disable = max8660_dcdc_disable;
33888 + pax_open_kernel();
33889 + *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
33890 + *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
33891 + pax_close_kernel();
33892 }
33893
33894 /*
33895 diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
33896 index 023d17d..74ef35b 100644
33897 --- a/drivers/regulator/mc13892-regulator.c
33898 +++ b/drivers/regulator/mc13892-regulator.c
33899 @@ -565,10 +565,12 @@ static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
33900 }
33901 mc13xxx_unlock(mc13892);
33902
33903 - mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
33904 + pax_open_kernel();
33905 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
33906 = mc13892_vcam_set_mode;
33907 - mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
33908 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
33909 = mc13892_vcam_get_mode;
33910 + pax_close_kernel();
33911 for (i = 0; i < pdata->num_regulators; i++) {
33912 init_data = &pdata->regulators[i];
33913 priv->regulators[i] = regulator_register(
33914 diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
33915 index cace6d3..f623fda 100644
33916 --- a/drivers/rtc/rtc-dev.c
33917 +++ b/drivers/rtc/rtc-dev.c
33918 @@ -14,6 +14,7 @@
33919 #include <linux/module.h>
33920 #include <linux/rtc.h>
33921 #include <linux/sched.h>
33922 +#include <linux/grsecurity.h>
33923 #include "rtc-core.h"
33924
33925 static dev_t rtc_devt;
33926 @@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *file,
33927 if (copy_from_user(&tm, uarg, sizeof(tm)))
33928 return -EFAULT;
33929
33930 + gr_log_timechange();
33931 +
33932 return rtc_set_time(rtc, &tm);
33933
33934 case RTC_PIE_ON:
33935 diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
33936 index ffb5878..e6d785c 100644
33937 --- a/drivers/scsi/aacraid/aacraid.h
33938 +++ b/drivers/scsi/aacraid/aacraid.h
33939 @@ -492,7 +492,7 @@ struct adapter_ops
33940 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
33941 /* Administrative operations */
33942 int (*adapter_comm)(struct aac_dev * dev, int comm);
33943 -};
33944 +} __no_const;
33945
33946 /*
33947 * Define which interrupt handler needs to be installed
33948 diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
33949 index 705e13e..91c873c 100644
33950 --- a/drivers/scsi/aacraid/linit.c
33951 +++ b/drivers/scsi/aacraid/linit.c
33952 @@ -93,7 +93,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_tbl) = {
33953 #elif defined(__devinitconst)
33954 static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
33955 #else
33956 -static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
33957 +static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
33958 #endif
33959 { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
33960 { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
33961 diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
33962 index d5ff142..49c0ebb 100644
33963 --- a/drivers/scsi/aic94xx/aic94xx_init.c
33964 +++ b/drivers/scsi/aic94xx/aic94xx_init.c
33965 @@ -1012,7 +1012,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
33966 .lldd_control_phy = asd_control_phy,
33967 };
33968
33969 -static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
33970 +static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
33971 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
33972 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
33973 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
33974 diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
33975 index a796de9..1ef20e1 100644
33976 --- a/drivers/scsi/bfa/bfa.h
33977 +++ b/drivers/scsi/bfa/bfa.h
33978 @@ -196,7 +196,7 @@ struct bfa_hwif_s {
33979 u32 *end);
33980 int cpe_vec_q0;
33981 int rme_vec_q0;
33982 -};
33983 +} __no_const;
33984 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
33985
33986 struct bfa_faa_cbfn_s {
33987 diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
33988 index e07bd47..cd1bbbb 100644
33989 --- a/drivers/scsi/bfa/bfa_fcpim.c
33990 +++ b/drivers/scsi/bfa/bfa_fcpim.c
33991 @@ -4121,7 +4121,7 @@ bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
33992
33993 bfa_iotag_attach(fcp);
33994
33995 - fcp->itn_arr = (struct bfa_itn_s *) bfa_mem_kva_curp(fcp);
33996 + fcp->itn_arr = (bfa_itn_s_no_const *) bfa_mem_kva_curp(fcp);
33997 bfa_mem_kva_curp(fcp) = (u8 *)fcp->itn_arr +
33998 (fcp->num_itns * sizeof(struct bfa_itn_s));
33999 memset(fcp->itn_arr, 0,
34000 @@ -4179,7 +4179,7 @@ bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
34001 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m))
34002 {
34003 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
34004 - struct bfa_itn_s *itn;
34005 + bfa_itn_s_no_const *itn;
34006
34007 itn = BFA_ITN_FROM_TAG(fcp, rport->rport_tag);
34008 itn->isr = isr;
34009 diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
34010 index 1080bcb..a3b39e3 100644
34011 --- a/drivers/scsi/bfa/bfa_fcpim.h
34012 +++ b/drivers/scsi/bfa/bfa_fcpim.h
34013 @@ -37,6 +37,7 @@ struct bfa_iotag_s {
34014 struct bfa_itn_s {
34015 bfa_isr_func_t isr;
34016 };
34017 +typedef struct bfa_itn_s __no_const bfa_itn_s_no_const;
34018
34019 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
34020 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
34021 @@ -149,7 +150,7 @@ struct bfa_fcp_mod_s {
34022 struct list_head iotag_tio_free_q; /* free IO resources */
34023 struct list_head iotag_unused_q; /* unused IO resources*/
34024 struct bfa_iotag_s *iotag_arr;
34025 - struct bfa_itn_s *itn_arr;
34026 + bfa_itn_s_no_const *itn_arr;
34027 int num_ioim_reqs;
34028 int num_fwtio_reqs;
34029 int num_itns;
34030 diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
34031 index 546d46b..642fa5b 100644
34032 --- a/drivers/scsi/bfa/bfa_ioc.h
34033 +++ b/drivers/scsi/bfa/bfa_ioc.h
34034 @@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
34035 bfa_ioc_disable_cbfn_t disable_cbfn;
34036 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
34037 bfa_ioc_reset_cbfn_t reset_cbfn;
34038 -};
34039 +} __no_const;
34040
34041 /*
34042 * IOC event notification mechanism.
34043 @@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s {
34044 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
34045 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
34046 bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
34047 -};
34048 +} __no_const;
34049
34050 /*
34051 * Queue element to wait for room in request queue. FIFO order is
34052 diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
34053 index 351dc0b..951dc32 100644
34054 --- a/drivers/scsi/hosts.c
34055 +++ b/drivers/scsi/hosts.c
34056 @@ -42,7 +42,7 @@
34057 #include "scsi_logging.h"
34058
34059
34060 -static atomic_t scsi_host_next_hn; /* host_no for next new host */
34061 +static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
34062
34063
34064 static void scsi_host_cls_release(struct device *dev)
34065 @@ -357,7 +357,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
34066 * subtract one because we increment first then return, but we need to
34067 * know what the next host number was before increment
34068 */
34069 - shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
34070 + shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
34071 shost->dma_channel = 0xff;
34072
34073 /* These three are default values which can be overridden */
34074 diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
34075 index 865d452..e9b7fa7 100644
34076 --- a/drivers/scsi/hpsa.c
34077 +++ b/drivers/scsi/hpsa.c
34078 @@ -505,7 +505,7 @@ static inline u32 next_command(struct ctlr_info *h)
34079 u32 a;
34080
34081 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
34082 - return h->access.command_completed(h);
34083 + return h->access->command_completed(h);
34084
34085 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
34086 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
34087 @@ -2989,7 +2989,7 @@ static void start_io(struct ctlr_info *h)
34088 while (!list_empty(&h->reqQ)) {
34089 c = list_entry(h->reqQ.next, struct CommandList, list);
34090 /* can't do anything if fifo is full */
34091 - if ((h->access.fifo_full(h))) {
34092 + if ((h->access->fifo_full(h))) {
34093 dev_warn(&h->pdev->dev, "fifo full\n");
34094 break;
34095 }
34096 @@ -2999,7 +2999,7 @@ static void start_io(struct ctlr_info *h)
34097 h->Qdepth--;
34098
34099 /* Tell the controller execute command */
34100 - h->access.submit_command(h, c);
34101 + h->access->submit_command(h, c);
34102
34103 /* Put job onto the completed Q */
34104 addQ(&h->cmpQ, c);
34105 @@ -3008,17 +3008,17 @@ static void start_io(struct ctlr_info *h)
34106
34107 static inline unsigned long get_next_completion(struct ctlr_info *h)
34108 {
34109 - return h->access.command_completed(h);
34110 + return h->access->command_completed(h);
34111 }
34112
34113 static inline bool interrupt_pending(struct ctlr_info *h)
34114 {
34115 - return h->access.intr_pending(h);
34116 + return h->access->intr_pending(h);
34117 }
34118
34119 static inline long interrupt_not_for_us(struct ctlr_info *h)
34120 {
34121 - return (h->access.intr_pending(h) == 0) ||
34122 + return (h->access->intr_pending(h) == 0) ||
34123 (h->interrupts_enabled == 0);
34124 }
34125
34126 @@ -3917,7 +3917,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
34127 if (prod_index < 0)
34128 return -ENODEV;
34129 h->product_name = products[prod_index].product_name;
34130 - h->access = *(products[prod_index].access);
34131 + h->access = products[prod_index].access;
34132
34133 if (hpsa_board_disabled(h->pdev)) {
34134 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
34135 @@ -4162,7 +4162,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
34136
34137 assert_spin_locked(&lockup_detector_lock);
34138 remove_ctlr_from_lockup_detector_list(h);
34139 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
34140 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
34141 spin_lock_irqsave(&h->lock, flags);
34142 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
34143 spin_unlock_irqrestore(&h->lock, flags);
34144 @@ -4340,7 +4340,7 @@ reinit_after_soft_reset:
34145 }
34146
34147 /* make sure the board interrupts are off */
34148 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
34149 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
34150
34151 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
34152 goto clean2;
34153 @@ -4374,7 +4374,7 @@ reinit_after_soft_reset:
34154 * fake ones to scoop up any residual completions.
34155 */
34156 spin_lock_irqsave(&h->lock, flags);
34157 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
34158 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
34159 spin_unlock_irqrestore(&h->lock, flags);
34160 free_irq(h->intr[h->intr_mode], h);
34161 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
34162 @@ -4393,9 +4393,9 @@ reinit_after_soft_reset:
34163 dev_info(&h->pdev->dev, "Board READY.\n");
34164 dev_info(&h->pdev->dev,
34165 "Waiting for stale completions to drain.\n");
34166 - h->access.set_intr_mask(h, HPSA_INTR_ON);
34167 + h->access->set_intr_mask(h, HPSA_INTR_ON);
34168 msleep(10000);
34169 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
34170 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
34171
34172 rc = controller_reset_failed(h->cfgtable);
34173 if (rc)
34174 @@ -4416,7 +4416,7 @@ reinit_after_soft_reset:
34175 }
34176
34177 /* Turn the interrupts on so we can service requests */
34178 - h->access.set_intr_mask(h, HPSA_INTR_ON);
34179 + h->access->set_intr_mask(h, HPSA_INTR_ON);
34180
34181 hpsa_hba_inquiry(h);
34182 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
34183 @@ -4468,7 +4468,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
34184 * To write all data in the battery backed cache to disks
34185 */
34186 hpsa_flush_cache(h);
34187 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
34188 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
34189 free_irq(h->intr[h->intr_mode], h);
34190 #ifdef CONFIG_PCI_MSI
34191 if (h->msix_vector)
34192 @@ -4632,7 +4632,7 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
34193 return;
34194 }
34195 /* Change the access methods to the performant access methods */
34196 - h->access = SA5_performant_access;
34197 + h->access = &SA5_performant_access;
34198 h->transMethod = CFGTBL_Trans_Performant;
34199 }
34200
34201 diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
34202 index 91edafb..a9b88ec 100644
34203 --- a/drivers/scsi/hpsa.h
34204 +++ b/drivers/scsi/hpsa.h
34205 @@ -73,7 +73,7 @@ struct ctlr_info {
34206 unsigned int msix_vector;
34207 unsigned int msi_vector;
34208 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
34209 - struct access_method access;
34210 + struct access_method *access;
34211
34212 /* queue and queue Info */
34213 struct list_head reqQ;
34214 diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
34215 index f2df059..a3a9930 100644
34216 --- a/drivers/scsi/ips.h
34217 +++ b/drivers/scsi/ips.h
34218 @@ -1027,7 +1027,7 @@ typedef struct {
34219 int (*intr)(struct ips_ha *);
34220 void (*enableint)(struct ips_ha *);
34221 uint32_t (*statupd)(struct ips_ha *);
34222 -} ips_hw_func_t;
34223 +} __no_const ips_hw_func_t;
34224
34225 typedef struct ips_ha {
34226 uint8_t ha_id[IPS_MAX_CHANNELS+1];
34227 diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
34228 index 9de9db2..1e09660 100644
34229 --- a/drivers/scsi/libfc/fc_exch.c
34230 +++ b/drivers/scsi/libfc/fc_exch.c
34231 @@ -105,12 +105,12 @@ struct fc_exch_mgr {
34232 * all together if not used XXX
34233 */
34234 struct {
34235 - atomic_t no_free_exch;
34236 - atomic_t no_free_exch_xid;
34237 - atomic_t xid_not_found;
34238 - atomic_t xid_busy;
34239 - atomic_t seq_not_found;
34240 - atomic_t non_bls_resp;
34241 + atomic_unchecked_t no_free_exch;
34242 + atomic_unchecked_t no_free_exch_xid;
34243 + atomic_unchecked_t xid_not_found;
34244 + atomic_unchecked_t xid_busy;
34245 + atomic_unchecked_t seq_not_found;
34246 + atomic_unchecked_t non_bls_resp;
34247 } stats;
34248 };
34249
34250 @@ -719,7 +719,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
34251 /* allocate memory for exchange */
34252 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
34253 if (!ep) {
34254 - atomic_inc(&mp->stats.no_free_exch);
34255 + atomic_inc_unchecked(&mp->stats.no_free_exch);
34256 goto out;
34257 }
34258 memset(ep, 0, sizeof(*ep));
34259 @@ -780,7 +780,7 @@ out:
34260 return ep;
34261 err:
34262 spin_unlock_bh(&pool->lock);
34263 - atomic_inc(&mp->stats.no_free_exch_xid);
34264 + atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
34265 mempool_free(ep, mp->ep_pool);
34266 return NULL;
34267 }
34268 @@ -923,7 +923,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
34269 xid = ntohs(fh->fh_ox_id); /* we originated exch */
34270 ep = fc_exch_find(mp, xid);
34271 if (!ep) {
34272 - atomic_inc(&mp->stats.xid_not_found);
34273 + atomic_inc_unchecked(&mp->stats.xid_not_found);
34274 reject = FC_RJT_OX_ID;
34275 goto out;
34276 }
34277 @@ -953,7 +953,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
34278 ep = fc_exch_find(mp, xid);
34279 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
34280 if (ep) {
34281 - atomic_inc(&mp->stats.xid_busy);
34282 + atomic_inc_unchecked(&mp->stats.xid_busy);
34283 reject = FC_RJT_RX_ID;
34284 goto rel;
34285 }
34286 @@ -964,7 +964,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
34287 }
34288 xid = ep->xid; /* get our XID */
34289 } else if (!ep) {
34290 - atomic_inc(&mp->stats.xid_not_found);
34291 + atomic_inc_unchecked(&mp->stats.xid_not_found);
34292 reject = FC_RJT_RX_ID; /* XID not found */
34293 goto out;
34294 }
34295 @@ -981,7 +981,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
34296 } else {
34297 sp = &ep->seq;
34298 if (sp->id != fh->fh_seq_id) {
34299 - atomic_inc(&mp->stats.seq_not_found);
34300 + atomic_inc_unchecked(&mp->stats.seq_not_found);
34301 if (f_ctl & FC_FC_END_SEQ) {
34302 /*
34303 * Update sequence_id based on incoming last
34304 @@ -1431,22 +1431,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
34305
34306 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
34307 if (!ep) {
34308 - atomic_inc(&mp->stats.xid_not_found);
34309 + atomic_inc_unchecked(&mp->stats.xid_not_found);
34310 goto out;
34311 }
34312 if (ep->esb_stat & ESB_ST_COMPLETE) {
34313 - atomic_inc(&mp->stats.xid_not_found);
34314 + atomic_inc_unchecked(&mp->stats.xid_not_found);
34315 goto rel;
34316 }
34317 if (ep->rxid == FC_XID_UNKNOWN)
34318 ep->rxid = ntohs(fh->fh_rx_id);
34319 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
34320 - atomic_inc(&mp->stats.xid_not_found);
34321 + atomic_inc_unchecked(&mp->stats.xid_not_found);
34322 goto rel;
34323 }
34324 if (ep->did != ntoh24(fh->fh_s_id) &&
34325 ep->did != FC_FID_FLOGI) {
34326 - atomic_inc(&mp->stats.xid_not_found);
34327 + atomic_inc_unchecked(&mp->stats.xid_not_found);
34328 goto rel;
34329 }
34330 sof = fr_sof(fp);
34331 @@ -1455,7 +1455,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
34332 sp->ssb_stat |= SSB_ST_RESP;
34333 sp->id = fh->fh_seq_id;
34334 } else if (sp->id != fh->fh_seq_id) {
34335 - atomic_inc(&mp->stats.seq_not_found);
34336 + atomic_inc_unchecked(&mp->stats.seq_not_found);
34337 goto rel;
34338 }
34339
34340 @@ -1519,9 +1519,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
34341 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
34342
34343 if (!sp)
34344 - atomic_inc(&mp->stats.xid_not_found);
34345 + atomic_inc_unchecked(&mp->stats.xid_not_found);
34346 else
34347 - atomic_inc(&mp->stats.non_bls_resp);
34348 + atomic_inc_unchecked(&mp->stats.non_bls_resp);
34349
34350 fc_frame_free(fp);
34351 }
34352 diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
34353 index db9238f..4378ed2 100644
34354 --- a/drivers/scsi/libsas/sas_ata.c
34355 +++ b/drivers/scsi/libsas/sas_ata.c
34356 @@ -368,7 +368,7 @@ static struct ata_port_operations sas_sata_ops = {
34357 .postreset = ata_std_postreset,
34358 .error_handler = ata_std_error_handler,
34359 .post_internal_cmd = sas_ata_post_internal,
34360 - .qc_defer = ata_std_qc_defer,
34361 + .qc_defer = ata_std_qc_defer,
34362 .qc_prep = ata_noop_qc_prep,
34363 .qc_issue = sas_ata_qc_issue,
34364 .qc_fill_rtf = sas_ata_qc_fill_rtf,
34365 diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
34366 index bb4c8e0..f33d849 100644
34367 --- a/drivers/scsi/lpfc/lpfc.h
34368 +++ b/drivers/scsi/lpfc/lpfc.h
34369 @@ -425,7 +425,7 @@ struct lpfc_vport {
34370 struct dentry *debug_nodelist;
34371 struct dentry *vport_debugfs_root;
34372 struct lpfc_debugfs_trc *disc_trc;
34373 - atomic_t disc_trc_cnt;
34374 + atomic_unchecked_t disc_trc_cnt;
34375 #endif
34376 uint8_t stat_data_enabled;
34377 uint8_t stat_data_blocked;
34378 @@ -835,8 +835,8 @@ struct lpfc_hba {
34379 struct timer_list fabric_block_timer;
34380 unsigned long bit_flags;
34381 #define FABRIC_COMANDS_BLOCKED 0
34382 - atomic_t num_rsrc_err;
34383 - atomic_t num_cmd_success;
34384 + atomic_unchecked_t num_rsrc_err;
34385 + atomic_unchecked_t num_cmd_success;
34386 unsigned long last_rsrc_error_time;
34387 unsigned long last_ramp_down_time;
34388 unsigned long last_ramp_up_time;
34389 @@ -866,7 +866,7 @@ struct lpfc_hba {
34390
34391 struct dentry *debug_slow_ring_trc;
34392 struct lpfc_debugfs_trc *slow_ring_trc;
34393 - atomic_t slow_ring_trc_cnt;
34394 + atomic_unchecked_t slow_ring_trc_cnt;
34395 /* iDiag debugfs sub-directory */
34396 struct dentry *idiag_root;
34397 struct dentry *idiag_pci_cfg;
34398 diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
34399 index 2838259..a07cfb5 100644
34400 --- a/drivers/scsi/lpfc/lpfc_debugfs.c
34401 +++ b/drivers/scsi/lpfc/lpfc_debugfs.c
34402 @@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
34403
34404 #include <linux/debugfs.h>
34405
34406 -static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
34407 +static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
34408 static unsigned long lpfc_debugfs_start_time = 0L;
34409
34410 /* iDiag */
34411 @@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
34412 lpfc_debugfs_enable = 0;
34413
34414 len = 0;
34415 - index = (atomic_read(&vport->disc_trc_cnt) + 1) &
34416 + index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
34417 (lpfc_debugfs_max_disc_trc - 1);
34418 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
34419 dtp = vport->disc_trc + i;
34420 @@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
34421 lpfc_debugfs_enable = 0;
34422
34423 len = 0;
34424 - index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
34425 + index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
34426 (lpfc_debugfs_max_slow_ring_trc - 1);
34427 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
34428 dtp = phba->slow_ring_trc + i;
34429 @@ -636,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
34430 !vport || !vport->disc_trc)
34431 return;
34432
34433 - index = atomic_inc_return(&vport->disc_trc_cnt) &
34434 + index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
34435 (lpfc_debugfs_max_disc_trc - 1);
34436 dtp = vport->disc_trc + index;
34437 dtp->fmt = fmt;
34438 dtp->data1 = data1;
34439 dtp->data2 = data2;
34440 dtp->data3 = data3;
34441 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
34442 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
34443 dtp->jif = jiffies;
34444 #endif
34445 return;
34446 @@ -674,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
34447 !phba || !phba->slow_ring_trc)
34448 return;
34449
34450 - index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
34451 + index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
34452 (lpfc_debugfs_max_slow_ring_trc - 1);
34453 dtp = phba->slow_ring_trc + index;
34454 dtp->fmt = fmt;
34455 dtp->data1 = data1;
34456 dtp->data2 = data2;
34457 dtp->data3 = data3;
34458 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
34459 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
34460 dtp->jif = jiffies;
34461 #endif
34462 return;
34463 @@ -3986,7 +3986,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
34464 "slow_ring buffer\n");
34465 goto debug_failed;
34466 }
34467 - atomic_set(&phba->slow_ring_trc_cnt, 0);
34468 + atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
34469 memset(phba->slow_ring_trc, 0,
34470 (sizeof(struct lpfc_debugfs_trc) *
34471 lpfc_debugfs_max_slow_ring_trc));
34472 @@ -4032,7 +4032,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
34473 "buffer\n");
34474 goto debug_failed;
34475 }
34476 - atomic_set(&vport->disc_trc_cnt, 0);
34477 + atomic_set_unchecked(&vport->disc_trc_cnt, 0);
34478
34479 snprintf(name, sizeof(name), "discovery_trace");
34480 vport->debug_disc_trc =
34481 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
34482 index 55bc4fc..a2a109c 100644
34483 --- a/drivers/scsi/lpfc/lpfc_init.c
34484 +++ b/drivers/scsi/lpfc/lpfc_init.c
34485 @@ -10027,8 +10027,10 @@ lpfc_init(void)
34486 printk(LPFC_COPYRIGHT "\n");
34487
34488 if (lpfc_enable_npiv) {
34489 - lpfc_transport_functions.vport_create = lpfc_vport_create;
34490 - lpfc_transport_functions.vport_delete = lpfc_vport_delete;
34491 + pax_open_kernel();
34492 + *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
34493 + *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
34494 + pax_close_kernel();
34495 }
34496 lpfc_transport_template =
34497 fc_attach_transport(&lpfc_transport_functions);
34498 diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
34499 index 2e1e54e..1af0a0d 100644
34500 --- a/drivers/scsi/lpfc/lpfc_scsi.c
34501 +++ b/drivers/scsi/lpfc/lpfc_scsi.c
34502 @@ -305,7 +305,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
34503 uint32_t evt_posted;
34504
34505 spin_lock_irqsave(&phba->hbalock, flags);
34506 - atomic_inc(&phba->num_rsrc_err);
34507 + atomic_inc_unchecked(&phba->num_rsrc_err);
34508 phba->last_rsrc_error_time = jiffies;
34509
34510 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
34511 @@ -346,7 +346,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
34512 unsigned long flags;
34513 struct lpfc_hba *phba = vport->phba;
34514 uint32_t evt_posted;
34515 - atomic_inc(&phba->num_cmd_success);
34516 + atomic_inc_unchecked(&phba->num_cmd_success);
34517
34518 if (vport->cfg_lun_queue_depth <= queue_depth)
34519 return;
34520 @@ -390,8 +390,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
34521 unsigned long num_rsrc_err, num_cmd_success;
34522 int i;
34523
34524 - num_rsrc_err = atomic_read(&phba->num_rsrc_err);
34525 - num_cmd_success = atomic_read(&phba->num_cmd_success);
34526 + num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
34527 + num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
34528
34529 vports = lpfc_create_vport_work_array(phba);
34530 if (vports != NULL)
34531 @@ -411,8 +411,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
34532 }
34533 }
34534 lpfc_destroy_vport_work_array(phba, vports);
34535 - atomic_set(&phba->num_rsrc_err, 0);
34536 - atomic_set(&phba->num_cmd_success, 0);
34537 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
34538 + atomic_set_unchecked(&phba->num_cmd_success, 0);
34539 }
34540
34541 /**
34542 @@ -446,8 +446,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
34543 }
34544 }
34545 lpfc_destroy_vport_work_array(phba, vports);
34546 - atomic_set(&phba->num_rsrc_err, 0);
34547 - atomic_set(&phba->num_cmd_success, 0);
34548 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
34549 + atomic_set_unchecked(&phba->num_cmd_success, 0);
34550 }
34551
34552 /**
34553 diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
34554 index 5163edb..7b142bc 100644
34555 --- a/drivers/scsi/pmcraid.c
34556 +++ b/drivers/scsi/pmcraid.c
34557 @@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
34558 res->scsi_dev = scsi_dev;
34559 scsi_dev->hostdata = res;
34560 res->change_detected = 0;
34561 - atomic_set(&res->read_failures, 0);
34562 - atomic_set(&res->write_failures, 0);
34563 + atomic_set_unchecked(&res->read_failures, 0);
34564 + atomic_set_unchecked(&res->write_failures, 0);
34565 rc = 0;
34566 }
34567 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
34568 @@ -2676,9 +2676,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
34569
34570 /* If this was a SCSI read/write command keep count of errors */
34571 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
34572 - atomic_inc(&res->read_failures);
34573 + atomic_inc_unchecked(&res->read_failures);
34574 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
34575 - atomic_inc(&res->write_failures);
34576 + atomic_inc_unchecked(&res->write_failures);
34577
34578 if (!RES_IS_GSCSI(res->cfg_entry) &&
34579 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
34580 @@ -3534,7 +3534,7 @@ static int pmcraid_queuecommand_lck(
34581 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
34582 * hrrq_id assigned here in queuecommand
34583 */
34584 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
34585 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
34586 pinstance->num_hrrq;
34587 cmd->cmd_done = pmcraid_io_done;
34588
34589 @@ -3859,7 +3859,7 @@ static long pmcraid_ioctl_passthrough(
34590 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
34591 * hrrq_id assigned here in queuecommand
34592 */
34593 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
34594 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
34595 pinstance->num_hrrq;
34596
34597 if (request_size) {
34598 @@ -4497,7 +4497,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
34599
34600 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
34601 /* add resources only after host is added into system */
34602 - if (!atomic_read(&pinstance->expose_resources))
34603 + if (!atomic_read_unchecked(&pinstance->expose_resources))
34604 return;
34605
34606 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
34607 @@ -5331,8 +5331,8 @@ static int __devinit pmcraid_init_instance(
34608 init_waitqueue_head(&pinstance->reset_wait_q);
34609
34610 atomic_set(&pinstance->outstanding_cmds, 0);
34611 - atomic_set(&pinstance->last_message_id, 0);
34612 - atomic_set(&pinstance->expose_resources, 0);
34613 + atomic_set_unchecked(&pinstance->last_message_id, 0);
34614 + atomic_set_unchecked(&pinstance->expose_resources, 0);
34615
34616 INIT_LIST_HEAD(&pinstance->free_res_q);
34617 INIT_LIST_HEAD(&pinstance->used_res_q);
34618 @@ -6047,7 +6047,7 @@ static int __devinit pmcraid_probe(
34619 /* Schedule worker thread to handle CCN and take care of adding and
34620 * removing devices to OS
34621 */
34622 - atomic_set(&pinstance->expose_resources, 1);
34623 + atomic_set_unchecked(&pinstance->expose_resources, 1);
34624 schedule_work(&pinstance->worker_q);
34625 return rc;
34626
34627 diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
34628 index ca496c7..9c791d5 100644
34629 --- a/drivers/scsi/pmcraid.h
34630 +++ b/drivers/scsi/pmcraid.h
34631 @@ -748,7 +748,7 @@ struct pmcraid_instance {
34632 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
34633
34634 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
34635 - atomic_t last_message_id;
34636 + atomic_unchecked_t last_message_id;
34637
34638 /* configuration table */
34639 struct pmcraid_config_table *cfg_table;
34640 @@ -777,7 +777,7 @@ struct pmcraid_instance {
34641 atomic_t outstanding_cmds;
34642
34643 /* should add/delete resources to mid-layer now ?*/
34644 - atomic_t expose_resources;
34645 + atomic_unchecked_t expose_resources;
34646
34647
34648
34649 @@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
34650 struct pmcraid_config_table_entry_ext cfg_entry_ext;
34651 };
34652 struct scsi_device *scsi_dev; /* Link scsi_device structure */
34653 - atomic_t read_failures; /* count of failed READ commands */
34654 - atomic_t write_failures; /* count of failed WRITE commands */
34655 + atomic_unchecked_t read_failures; /* count of failed READ commands */
34656 + atomic_unchecked_t write_failures; /* count of failed WRITE commands */
34657
34658 /* To indicate add/delete/modify during CCN */
34659 u8 change_detected;
34660 diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
34661 index fcf052c..a8025a4 100644
34662 --- a/drivers/scsi/qla2xxx/qla_def.h
34663 +++ b/drivers/scsi/qla2xxx/qla_def.h
34664 @@ -2244,7 +2244,7 @@ struct isp_operations {
34665 int (*get_flash_version) (struct scsi_qla_host *, void *);
34666 int (*start_scsi) (srb_t *);
34667 int (*abort_isp) (struct scsi_qla_host *);
34668 -};
34669 +} __no_const;
34670
34671 /* MSI-X Support *************************************************************/
34672
34673 diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
34674 index fd5edc6..4906148 100644
34675 --- a/drivers/scsi/qla4xxx/ql4_def.h
34676 +++ b/drivers/scsi/qla4xxx/ql4_def.h
34677 @@ -258,7 +258,7 @@ struct ddb_entry {
34678 * (4000 only) */
34679 atomic_t relogin_timer; /* Max Time to wait for
34680 * relogin to complete */
34681 - atomic_t relogin_retry_count; /* Num of times relogin has been
34682 + atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
34683 * retried */
34684 uint32_t default_time2wait; /* Default Min time between
34685 * relogins (+aens) */
34686 diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
34687 index 4169c8b..a8b896b 100644
34688 --- a/drivers/scsi/qla4xxx/ql4_os.c
34689 +++ b/drivers/scsi/qla4xxx/ql4_os.c
34690 @@ -2104,12 +2104,12 @@ void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
34691 */
34692 if (!iscsi_is_session_online(cls_sess)) {
34693 /* Reset retry relogin timer */
34694 - atomic_inc(&ddb_entry->relogin_retry_count);
34695 + atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
34696 DEBUG2(ql4_printk(KERN_INFO, ha,
34697 "%s: index[%d] relogin timed out-retrying"
34698 " relogin (%d), retry (%d)\n", __func__,
34699 ddb_entry->fw_ddb_index,
34700 - atomic_read(&ddb_entry->relogin_retry_count),
34701 + atomic_read_unchecked(&ddb_entry->relogin_retry_count),
34702 ddb_entry->default_time2wait + 4));
34703 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
34704 atomic_set(&ddb_entry->retry_relogin_timer,
34705 @@ -3835,7 +3835,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
34706
34707 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
34708 atomic_set(&ddb_entry->relogin_timer, 0);
34709 - atomic_set(&ddb_entry->relogin_retry_count, 0);
34710 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
34711
34712 ddb_entry->default_relogin_timeout =
34713 le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
34714 diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
34715 index 2aeb2e9..46e3925 100644
34716 --- a/drivers/scsi/scsi.c
34717 +++ b/drivers/scsi/scsi.c
34718 @@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
34719 unsigned long timeout;
34720 int rtn = 0;
34721
34722 - atomic_inc(&cmd->device->iorequest_cnt);
34723 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
34724
34725 /* check if the device is still usable */
34726 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
34727 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
34728 index f85cfa6..a57c9e8 100644
34729 --- a/drivers/scsi/scsi_lib.c
34730 +++ b/drivers/scsi/scsi_lib.c
34731 @@ -1416,7 +1416,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
34732 shost = sdev->host;
34733 scsi_init_cmd_errh(cmd);
34734 cmd->result = DID_NO_CONNECT << 16;
34735 - atomic_inc(&cmd->device->iorequest_cnt);
34736 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
34737
34738 /*
34739 * SCSI request completion path will do scsi_device_unbusy(),
34740 @@ -1442,9 +1442,9 @@ static void scsi_softirq_done(struct request *rq)
34741
34742 INIT_LIST_HEAD(&cmd->eh_entry);
34743
34744 - atomic_inc(&cmd->device->iodone_cnt);
34745 + atomic_inc_unchecked(&cmd->device->iodone_cnt);
34746 if (cmd->result)
34747 - atomic_inc(&cmd->device->ioerr_cnt);
34748 + atomic_inc_unchecked(&cmd->device->ioerr_cnt);
34749
34750 disposition = scsi_decide_disposition(cmd);
34751 if (disposition != SUCCESS &&
34752 diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
34753 index 04c2a27..9d8bd66 100644
34754 --- a/drivers/scsi/scsi_sysfs.c
34755 +++ b/drivers/scsi/scsi_sysfs.c
34756 @@ -660,7 +660,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
34757 char *buf) \
34758 { \
34759 struct scsi_device *sdev = to_scsi_device(dev); \
34760 - unsigned long long count = atomic_read(&sdev->field); \
34761 + unsigned long long count = atomic_read_unchecked(&sdev->field); \
34762 return snprintf(buf, 20, "0x%llx\n", count); \
34763 } \
34764 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
34765 diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
34766 index 84a1fdf..693b0d6 100644
34767 --- a/drivers/scsi/scsi_tgt_lib.c
34768 +++ b/drivers/scsi/scsi_tgt_lib.c
34769 @@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
34770 int err;
34771
34772 dprintk("%lx %u\n", uaddr, len);
34773 - err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
34774 + err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
34775 if (err) {
34776 /*
34777 * TODO: need to fixup sg_tablesize, max_segment_size,
34778 diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
34779 index 1b21491..1b7f60e 100644
34780 --- a/drivers/scsi/scsi_transport_fc.c
34781 +++ b/drivers/scsi/scsi_transport_fc.c
34782 @@ -484,7 +484,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
34783 * Netlink Infrastructure
34784 */
34785
34786 -static atomic_t fc_event_seq;
34787 +static atomic_unchecked_t fc_event_seq;
34788
34789 /**
34790 * fc_get_event_number - Obtain the next sequential FC event number
34791 @@ -497,7 +497,7 @@ static atomic_t fc_event_seq;
34792 u32
34793 fc_get_event_number(void)
34794 {
34795 - return atomic_add_return(1, &fc_event_seq);
34796 + return atomic_add_return_unchecked(1, &fc_event_seq);
34797 }
34798 EXPORT_SYMBOL(fc_get_event_number);
34799
34800 @@ -645,7 +645,7 @@ static __init int fc_transport_init(void)
34801 {
34802 int error;
34803
34804 - atomic_set(&fc_event_seq, 0);
34805 + atomic_set_unchecked(&fc_event_seq, 0);
34806
34807 error = transport_class_register(&fc_host_class);
34808 if (error)
34809 @@ -835,7 +835,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
34810 char *cp;
34811
34812 *val = simple_strtoul(buf, &cp, 0);
34813 - if ((*cp && (*cp != '\n')) || (*val < 0))
34814 + if (*cp && (*cp != '\n'))
34815 return -EINVAL;
34816 /*
34817 * Check for overflow; dev_loss_tmo is u32
34818 diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
34819 index 96029e6..4d77fa0 100644
34820 --- a/drivers/scsi/scsi_transport_iscsi.c
34821 +++ b/drivers/scsi/scsi_transport_iscsi.c
34822 @@ -79,7 +79,7 @@ struct iscsi_internal {
34823 struct transport_container session_cont;
34824 };
34825
34826 -static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
34827 +static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
34828 static struct workqueue_struct *iscsi_eh_timer_workq;
34829
34830 static DEFINE_IDA(iscsi_sess_ida);
34831 @@ -1062,7 +1062,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
34832 int err;
34833
34834 ihost = shost->shost_data;
34835 - session->sid = atomic_add_return(1, &iscsi_session_nr);
34836 + session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
34837
34838 if (target_id == ISCSI_MAX_TARGET) {
34839 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
34840 @@ -2663,7 +2663,7 @@ static __init int iscsi_transport_init(void)
34841 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
34842 ISCSI_TRANSPORT_VERSION);
34843
34844 - atomic_set(&iscsi_session_nr, 0);
34845 + atomic_set_unchecked(&iscsi_session_nr, 0);
34846
34847 err = class_register(&iscsi_transport_class);
34848 if (err)
34849 diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
34850 index 21a045e..ec89e03 100644
34851 --- a/drivers/scsi/scsi_transport_srp.c
34852 +++ b/drivers/scsi/scsi_transport_srp.c
34853 @@ -33,7 +33,7 @@
34854 #include "scsi_transport_srp_internal.h"
34855
34856 struct srp_host_attrs {
34857 - atomic_t next_port_id;
34858 + atomic_unchecked_t next_port_id;
34859 };
34860 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
34861
34862 @@ -62,7 +62,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
34863 struct Scsi_Host *shost = dev_to_shost(dev);
34864 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
34865
34866 - atomic_set(&srp_host->next_port_id, 0);
34867 + atomic_set_unchecked(&srp_host->next_port_id, 0);
34868 return 0;
34869 }
34870
34871 @@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
34872 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
34873 rport->roles = ids->roles;
34874
34875 - id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
34876 + id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
34877 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
34878
34879 transport_setup_device(&rport->dev);
34880 diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
34881 index 441a1c5..07cece7 100644
34882 --- a/drivers/scsi/sg.c
34883 +++ b/drivers/scsi/sg.c
34884 @@ -1077,7 +1077,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
34885 sdp->disk->disk_name,
34886 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
34887 NULL,
34888 - (char *)arg);
34889 + (char __user *)arg);
34890 case BLKTRACESTART:
34891 return blk_trace_startstop(sdp->device->request_queue, 1);
34892 case BLKTRACESTOP:
34893 @@ -2312,7 +2312,7 @@ struct sg_proc_leaf {
34894 const struct file_operations * fops;
34895 };
34896
34897 -static struct sg_proc_leaf sg_proc_leaf_arr[] = {
34898 +static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
34899 {"allow_dio", &adio_fops},
34900 {"debug", &debug_fops},
34901 {"def_reserved_size", &dressz_fops},
34902 @@ -2327,7 +2327,7 @@ sg_proc_init(void)
34903 {
34904 int k, mask;
34905 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
34906 - struct sg_proc_leaf * leaf;
34907 + const struct sg_proc_leaf * leaf;
34908
34909 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
34910 if (!sg_proc_sgp)
34911 diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c
34912 index f64250e..1ee3049 100644
34913 --- a/drivers/spi/spi-dw-pci.c
34914 +++ b/drivers/spi/spi-dw-pci.c
34915 @@ -149,7 +149,7 @@ static int spi_resume(struct pci_dev *pdev)
34916 #define spi_resume NULL
34917 #endif
34918
34919 -static const struct pci_device_id pci_ids[] __devinitdata = {
34920 +static const struct pci_device_id pci_ids[] __devinitconst = {
34921 /* Intel MID platform SPI controller 0 */
34922 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) },
34923 {},
34924 diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
34925 index 77eae99..b7cdcc9 100644
34926 --- a/drivers/spi/spi.c
34927 +++ b/drivers/spi/spi.c
34928 @@ -1024,7 +1024,7 @@ int spi_bus_unlock(struct spi_master *master)
34929 EXPORT_SYMBOL_GPL(spi_bus_unlock);
34930
34931 /* portable code must never pass more than 32 bytes */
34932 -#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
34933 +#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
34934
34935 static u8 *buf;
34936
34937 diff --git a/drivers/staging/gma500/power.c b/drivers/staging/gma500/power.c
34938 index 436fe97..4082570 100644
34939 --- a/drivers/staging/gma500/power.c
34940 +++ b/drivers/staging/gma500/power.c
34941 @@ -266,7 +266,7 @@ bool gma_power_begin(struct drm_device *dev, bool force_on)
34942 ret = gma_resume_pci(dev->pdev);
34943 if (ret == 0) {
34944 /* FIXME: we want to defer this for Medfield/Oaktrail */
34945 - gma_resume_display(dev);
34946 + gma_resume_display(dev->pdev);
34947 psb_irq_preinstall(dev);
34948 psb_irq_postinstall(dev);
34949 pm_runtime_get(&dev->pdev->dev);
34950 diff --git a/drivers/staging/hv/rndis_filter.c b/drivers/staging/hv/rndis_filter.c
34951 index bafccb3..e3ac78d 100644
34952 --- a/drivers/staging/hv/rndis_filter.c
34953 +++ b/drivers/staging/hv/rndis_filter.c
34954 @@ -42,7 +42,7 @@ struct rndis_device {
34955
34956 enum rndis_device_state state;
34957 bool link_state;
34958 - atomic_t new_req_id;
34959 + atomic_unchecked_t new_req_id;
34960
34961 spinlock_t request_lock;
34962 struct list_head req_list;
34963 @@ -116,7 +116,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
34964 * template
34965 */
34966 set = &rndis_msg->msg.set_req;
34967 - set->req_id = atomic_inc_return(&dev->new_req_id);
34968 + set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
34969
34970 /* Add to the request list */
34971 spin_lock_irqsave(&dev->request_lock, flags);
34972 @@ -646,7 +646,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
34973
34974 /* Setup the rndis set */
34975 halt = &request->request_msg.msg.halt_req;
34976 - halt->req_id = atomic_inc_return(&dev->new_req_id);
34977 + halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
34978
34979 /* Ignore return since this msg is optional. */
34980 rndis_filter_send_request(dev, request);
34981 diff --git a/drivers/staging/iio/buffer_generic.h b/drivers/staging/iio/buffer_generic.h
34982 index 9e8f010..af9efb5 100644
34983 --- a/drivers/staging/iio/buffer_generic.h
34984 +++ b/drivers/staging/iio/buffer_generic.h
34985 @@ -64,7 +64,7 @@ struct iio_buffer_access_funcs {
34986
34987 int (*is_enabled)(struct iio_buffer *buffer);
34988 int (*enable)(struct iio_buffer *buffer);
34989 -};
34990 +} __no_const;
34991
34992 /**
34993 * struct iio_buffer_setup_ops - buffer setup related callbacks
34994 diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
34995 index 8b307b4..a97ac91 100644
34996 --- a/drivers/staging/octeon/ethernet-rx.c
34997 +++ b/drivers/staging/octeon/ethernet-rx.c
34998 @@ -420,11 +420,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
34999 /* Increment RX stats for virtual ports */
35000 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
35001 #ifdef CONFIG_64BIT
35002 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
35003 - atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
35004 + atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
35005 + atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
35006 #else
35007 - atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
35008 - atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
35009 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
35010 + atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
35011 #endif
35012 }
35013 netif_receive_skb(skb);
35014 @@ -436,9 +436,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
35015 dev->name);
35016 */
35017 #ifdef CONFIG_64BIT
35018 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
35019 + atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
35020 #else
35021 - atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
35022 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
35023 #endif
35024 dev_kfree_skb_irq(skb);
35025 }
35026 diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
35027 index 076f866..2308070 100644
35028 --- a/drivers/staging/octeon/ethernet.c
35029 +++ b/drivers/staging/octeon/ethernet.c
35030 @@ -258,11 +258,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
35031 * since the RX tasklet also increments it.
35032 */
35033 #ifdef CONFIG_64BIT
35034 - atomic64_add(rx_status.dropped_packets,
35035 - (atomic64_t *)&priv->stats.rx_dropped);
35036 + atomic64_add_unchecked(rx_status.dropped_packets,
35037 + (atomic64_unchecked_t *)&priv->stats.rx_dropped);
35038 #else
35039 - atomic_add(rx_status.dropped_packets,
35040 - (atomic_t *)&priv->stats.rx_dropped);
35041 + atomic_add_unchecked(rx_status.dropped_packets,
35042 + (atomic_unchecked_t *)&priv->stats.rx_dropped);
35043 #endif
35044 }
35045
35046 diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c
35047 index 7a19555..466456d 100644
35048 --- a/drivers/staging/pohmelfs/inode.c
35049 +++ b/drivers/staging/pohmelfs/inode.c
35050 @@ -1861,7 +1861,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
35051 mutex_init(&psb->mcache_lock);
35052 psb->mcache_root = RB_ROOT;
35053 psb->mcache_timeout = msecs_to_jiffies(5000);
35054 - atomic_long_set(&psb->mcache_gen, 0);
35055 + atomic_long_set_unchecked(&psb->mcache_gen, 0);
35056
35057 psb->trans_max_pages = 100;
35058
35059 @@ -1876,7 +1876,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
35060 INIT_LIST_HEAD(&psb->crypto_ready_list);
35061 INIT_LIST_HEAD(&psb->crypto_active_list);
35062
35063 - atomic_set(&psb->trans_gen, 1);
35064 + atomic_set_unchecked(&psb->trans_gen, 1);
35065 atomic_long_set(&psb->total_inodes, 0);
35066
35067 mutex_init(&psb->state_lock);
35068 diff --git a/drivers/staging/pohmelfs/mcache.c b/drivers/staging/pohmelfs/mcache.c
35069 index e22665c..a2a9390 100644
35070 --- a/drivers/staging/pohmelfs/mcache.c
35071 +++ b/drivers/staging/pohmelfs/mcache.c
35072 @@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_alloc(struct pohmelfs_sb *psb, u64 start
35073 m->data = data;
35074 m->start = start;
35075 m->size = size;
35076 - m->gen = atomic_long_inc_return(&psb->mcache_gen);
35077 + m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
35078
35079 mutex_lock(&psb->mcache_lock);
35080 err = pohmelfs_mcache_insert(psb, m);
35081 diff --git a/drivers/staging/pohmelfs/netfs.h b/drivers/staging/pohmelfs/netfs.h
35082 index 985b6b7..7699e05 100644
35083 --- a/drivers/staging/pohmelfs/netfs.h
35084 +++ b/drivers/staging/pohmelfs/netfs.h
35085 @@ -571,14 +571,14 @@ struct pohmelfs_config;
35086 struct pohmelfs_sb {
35087 struct rb_root mcache_root;
35088 struct mutex mcache_lock;
35089 - atomic_long_t mcache_gen;
35090 + atomic_long_unchecked_t mcache_gen;
35091 unsigned long mcache_timeout;
35092
35093 unsigned int idx;
35094
35095 unsigned int trans_retries;
35096
35097 - atomic_t trans_gen;
35098 + atomic_unchecked_t trans_gen;
35099
35100 unsigned int crypto_attached_size;
35101 unsigned int crypto_align_size;
35102 diff --git a/drivers/staging/pohmelfs/trans.c b/drivers/staging/pohmelfs/trans.c
35103 index 06c1a74..866eebc 100644
35104 --- a/drivers/staging/pohmelfs/trans.c
35105 +++ b/drivers/staging/pohmelfs/trans.c
35106 @@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_trans *t, struct pohmelfs_sb *psb)
35107 int err;
35108 struct netfs_cmd *cmd = t->iovec.iov_base;
35109
35110 - t->gen = atomic_inc_return(&psb->trans_gen);
35111 + t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
35112
35113 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
35114 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
35115 diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
35116 index 86308a0..feaa925 100644
35117 --- a/drivers/staging/rtl8712/rtl871x_io.h
35118 +++ b/drivers/staging/rtl8712/rtl871x_io.h
35119 @@ -108,7 +108,7 @@ struct _io_ops {
35120 u8 *pmem);
35121 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
35122 u8 *pmem);
35123 -};
35124 +} __no_const;
35125
35126 struct io_req {
35127 struct list_head list;
35128 diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
35129 index c7b5e8b..783d6cb 100644
35130 --- a/drivers/staging/sbe-2t3e3/netdev.c
35131 +++ b/drivers/staging/sbe-2t3e3/netdev.c
35132 @@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
35133 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
35134
35135 if (rlen)
35136 - if (copy_to_user(data, &resp, rlen))
35137 + if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
35138 return -EFAULT;
35139
35140 return 0;
35141 diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
35142 index be21617..0954e45 100644
35143 --- a/drivers/staging/usbip/usbip_common.h
35144 +++ b/drivers/staging/usbip/usbip_common.h
35145 @@ -289,7 +289,7 @@ struct usbip_device {
35146 void (*shutdown)(struct usbip_device *);
35147 void (*reset)(struct usbip_device *);
35148 void (*unusable)(struct usbip_device *);
35149 - } eh_ops;
35150 + } __no_const eh_ops;
35151 };
35152
35153 #if 0
35154 diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
35155 index 88b3298..3783eee 100644
35156 --- a/drivers/staging/usbip/vhci.h
35157 +++ b/drivers/staging/usbip/vhci.h
35158 @@ -88,7 +88,7 @@ struct vhci_hcd {
35159 unsigned resuming:1;
35160 unsigned long re_timeout;
35161
35162 - atomic_t seqnum;
35163 + atomic_unchecked_t seqnum;
35164
35165 /*
35166 * NOTE:
35167 diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
35168 index 2ee97e2..0420b86 100644
35169 --- a/drivers/staging/usbip/vhci_hcd.c
35170 +++ b/drivers/staging/usbip/vhci_hcd.c
35171 @@ -527,7 +527,7 @@ static void vhci_tx_urb(struct urb *urb)
35172 return;
35173 }
35174
35175 - priv->seqnum = atomic_inc_return(&the_controller->seqnum);
35176 + priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
35177 if (priv->seqnum == 0xffff)
35178 dev_info(&urb->dev->dev, "seqnum max\n");
35179
35180 @@ -779,7 +779,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
35181 return -ENOMEM;
35182 }
35183
35184 - unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
35185 + unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
35186 if (unlink->seqnum == 0xffff)
35187 pr_info("seqnum max\n");
35188
35189 @@ -969,7 +969,7 @@ static int vhci_start(struct usb_hcd *hcd)
35190 vdev->rhport = rhport;
35191 }
35192
35193 - atomic_set(&vhci->seqnum, 0);
35194 + atomic_set_unchecked(&vhci->seqnum, 0);
35195 spin_lock_init(&vhci->lock);
35196
35197 hcd->power_budget = 0; /* no limit */
35198 diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
35199 index 3872b8c..fe6d2f4 100644
35200 --- a/drivers/staging/usbip/vhci_rx.c
35201 +++ b/drivers/staging/usbip/vhci_rx.c
35202 @@ -77,7 +77,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
35203 if (!urb) {
35204 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
35205 pr_info("max seqnum %d\n",
35206 - atomic_read(&the_controller->seqnum));
35207 + atomic_read_unchecked(&the_controller->seqnum));
35208 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
35209 return;
35210 }
35211 diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
35212 index 7735027..30eed13 100644
35213 --- a/drivers/staging/vt6655/hostap.c
35214 +++ b/drivers/staging/vt6655/hostap.c
35215 @@ -79,14 +79,13 @@ static int msglevel =MSG_LEVEL_INFO;
35216 *
35217 */
35218
35219 +static net_device_ops_no_const apdev_netdev_ops;
35220 +
35221 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
35222 {
35223 PSDevice apdev_priv;
35224 struct net_device *dev = pDevice->dev;
35225 int ret;
35226 - const struct net_device_ops apdev_netdev_ops = {
35227 - .ndo_start_xmit = pDevice->tx_80211,
35228 - };
35229
35230 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
35231
35232 @@ -98,6 +97,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
35233 *apdev_priv = *pDevice;
35234 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
35235
35236 + /* only half broken now */
35237 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
35238 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
35239
35240 pDevice->apdev->type = ARPHRD_IEEE80211;
35241 diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
35242 index 51b5adf..098e320 100644
35243 --- a/drivers/staging/vt6656/hostap.c
35244 +++ b/drivers/staging/vt6656/hostap.c
35245 @@ -80,14 +80,13 @@ static int msglevel =MSG_LEVEL_INFO;
35246 *
35247 */
35248
35249 +static net_device_ops_no_const apdev_netdev_ops;
35250 +
35251 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
35252 {
35253 PSDevice apdev_priv;
35254 struct net_device *dev = pDevice->dev;
35255 int ret;
35256 - const struct net_device_ops apdev_netdev_ops = {
35257 - .ndo_start_xmit = pDevice->tx_80211,
35258 - };
35259
35260 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
35261
35262 @@ -99,6 +98,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
35263 *apdev_priv = *pDevice;
35264 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
35265
35266 + /* only half broken now */
35267 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
35268 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
35269
35270 pDevice->apdev->type = ARPHRD_IEEE80211;
35271 diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
35272 index 7843dfd..3db105f 100644
35273 --- a/drivers/staging/wlan-ng/hfa384x_usb.c
35274 +++ b/drivers/staging/wlan-ng/hfa384x_usb.c
35275 @@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx);
35276
35277 struct usbctlx_completor {
35278 int (*complete) (struct usbctlx_completor *);
35279 -};
35280 +} __no_const;
35281
35282 static int
35283 hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
35284 diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
35285 index 1ca66ea..76f1343 100644
35286 --- a/drivers/staging/zcache/tmem.c
35287 +++ b/drivers/staging/zcache/tmem.c
35288 @@ -39,7 +39,7 @@
35289 * A tmem host implementation must use this function to register callbacks
35290 * for memory allocation.
35291 */
35292 -static struct tmem_hostops tmem_hostops;
35293 +static tmem_hostops_no_const tmem_hostops;
35294
35295 static void tmem_objnode_tree_init(void);
35296
35297 @@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_hostops *m)
35298 * A tmem host implementation must use this function to register
35299 * callbacks for a page-accessible memory (PAM) implementation
35300 */
35301 -static struct tmem_pamops tmem_pamops;
35302 +static tmem_pamops_no_const tmem_pamops;
35303
35304 void tmem_register_pamops(struct tmem_pamops *m)
35305 {
35306 diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
35307 index ed147c4..94fc3c6 100644
35308 --- a/drivers/staging/zcache/tmem.h
35309 +++ b/drivers/staging/zcache/tmem.h
35310 @@ -180,6 +180,7 @@ struct tmem_pamops {
35311 void (*new_obj)(struct tmem_obj *);
35312 int (*replace_in_obj)(void *, struct tmem_obj *);
35313 };
35314 +typedef struct tmem_pamops __no_const tmem_pamops_no_const;
35315 extern void tmem_register_pamops(struct tmem_pamops *m);
35316
35317 /* memory allocation methods provided by the host implementation */
35318 @@ -189,6 +190,7 @@ struct tmem_hostops {
35319 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
35320 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
35321 };
35322 +typedef struct tmem_hostops __no_const tmem_hostops_no_const;
35323 extern void tmem_register_hostops(struct tmem_hostops *m);
35324
35325 /* core tmem accessor functions */
35326 diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
35327 index 0c1d5c73..88e90a8 100644
35328 --- a/drivers/target/iscsi/iscsi_target.c
35329 +++ b/drivers/target/iscsi/iscsi_target.c
35330 @@ -1364,7 +1364,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
35331 * outstanding_r2ts reaches zero, go ahead and send the delayed
35332 * TASK_ABORTED status.
35333 */
35334 - if (atomic_read(&se_cmd->t_transport_aborted) != 0) {
35335 + if (atomic_read_unchecked(&se_cmd->t_transport_aborted) != 0) {
35336 if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
35337 if (--cmd->outstanding_r2ts < 1) {
35338 iscsit_stop_dataout_timer(cmd);
35339 diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
35340 index 6845228..df77141 100644
35341 --- a/drivers/target/target_core_tmr.c
35342 +++ b/drivers/target/target_core_tmr.c
35343 @@ -250,7 +250,7 @@ static void core_tmr_drain_task_list(
35344 cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key,
35345 cmd->t_task_list_num,
35346 atomic_read(&cmd->t_task_cdbs_left),
35347 - atomic_read(&cmd->t_task_cdbs_sent),
35348 + atomic_read_unchecked(&cmd->t_task_cdbs_sent),
35349 atomic_read(&cmd->t_transport_active),
35350 atomic_read(&cmd->t_transport_stop),
35351 atomic_read(&cmd->t_transport_sent));
35352 @@ -281,7 +281,7 @@ static void core_tmr_drain_task_list(
35353 pr_debug("LUN_RESET: got t_transport_active = 1 for"
35354 " task: %p, t_fe_count: %d dev: %p\n", task,
35355 fe_count, dev);
35356 - atomic_set(&cmd->t_transport_aborted, 1);
35357 + atomic_set_unchecked(&cmd->t_transport_aborted, 1);
35358 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
35359
35360 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
35361 @@ -289,7 +289,7 @@ static void core_tmr_drain_task_list(
35362 }
35363 pr_debug("LUN_RESET: Got t_transport_active = 0 for task: %p,"
35364 " t_fe_count: %d dev: %p\n", task, fe_count, dev);
35365 - atomic_set(&cmd->t_transport_aborted, 1);
35366 + atomic_set_unchecked(&cmd->t_transport_aborted, 1);
35367 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
35368
35369 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
35370 diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
35371 index 861628e..659ae80 100644
35372 --- a/drivers/target/target_core_transport.c
35373 +++ b/drivers/target/target_core_transport.c
35374 @@ -1343,7 +1343,7 @@ struct se_device *transport_add_device_to_core_hba(
35375
35376 dev->queue_depth = dev_limits->queue_depth;
35377 atomic_set(&dev->depth_left, dev->queue_depth);
35378 - atomic_set(&dev->dev_ordered_id, 0);
35379 + atomic_set_unchecked(&dev->dev_ordered_id, 0);
35380
35381 se_dev_set_default_attribs(dev, dev_limits);
35382
35383 @@ -1530,7 +1530,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
35384 * Used to determine when ORDERED commands should go from
35385 * Dormant to Active status.
35386 */
35387 - cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
35388 + cmd->se_ordered_id = atomic_inc_return_unchecked(&cmd->se_dev->dev_ordered_id);
35389 smp_mb__after_atomic_inc();
35390 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
35391 cmd->se_ordered_id, cmd->sam_task_attr,
35392 @@ -1800,7 +1800,7 @@ static void transport_generic_request_failure(struct se_cmd *cmd)
35393 " t_transport_active: %d t_transport_stop: %d"
35394 " t_transport_sent: %d\n", cmd->t_task_list_num,
35395 atomic_read(&cmd->t_task_cdbs_left),
35396 - atomic_read(&cmd->t_task_cdbs_sent),
35397 + atomic_read_unchecked(&cmd->t_task_cdbs_sent),
35398 atomic_read(&cmd->t_task_cdbs_ex_left),
35399 atomic_read(&cmd->t_transport_active),
35400 atomic_read(&cmd->t_transport_stop),
35401 @@ -2089,9 +2089,9 @@ check_depth:
35402
35403 spin_lock_irqsave(&cmd->t_state_lock, flags);
35404 task->task_flags |= (TF_ACTIVE | TF_SENT);
35405 - atomic_inc(&cmd->t_task_cdbs_sent);
35406 + atomic_inc_unchecked(&cmd->t_task_cdbs_sent);
35407
35408 - if (atomic_read(&cmd->t_task_cdbs_sent) ==
35409 + if (atomic_read_unchecked(&cmd->t_task_cdbs_sent) ==
35410 cmd->t_task_list_num)
35411 atomic_set(&cmd->t_transport_sent, 1);
35412
35413 @@ -4273,7 +4273,7 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)
35414 atomic_set(&cmd->transport_lun_stop, 0);
35415 }
35416 if (!atomic_read(&cmd->t_transport_active) ||
35417 - atomic_read(&cmd->t_transport_aborted)) {
35418 + atomic_read_unchecked(&cmd->t_transport_aborted)) {
35419 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
35420 return false;
35421 }
35422 @@ -4522,7 +4522,7 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
35423 {
35424 int ret = 0;
35425
35426 - if (atomic_read(&cmd->t_transport_aborted) != 0) {
35427 + if (atomic_read_unchecked(&cmd->t_transport_aborted) != 0) {
35428 if (!send_status ||
35429 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
35430 return 1;
35431 @@ -4559,7 +4559,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
35432 */
35433 if (cmd->data_direction == DMA_TO_DEVICE) {
35434 if (cmd->se_tfo->write_pending_status(cmd) != 0) {
35435 - atomic_inc(&cmd->t_transport_aborted);
35436 + atomic_inc_unchecked(&cmd->t_transport_aborted);
35437 smp_mb__after_atomic_inc();
35438 }
35439 }
35440 diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
35441 index b9040be..e3f5aab 100644
35442 --- a/drivers/tty/hvc/hvcs.c
35443 +++ b/drivers/tty/hvc/hvcs.c
35444 @@ -83,6 +83,7 @@
35445 #include <asm/hvcserver.h>
35446 #include <asm/uaccess.h>
35447 #include <asm/vio.h>
35448 +#include <asm/local.h>
35449
35450 /*
35451 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
35452 @@ -270,7 +271,7 @@ struct hvcs_struct {
35453 unsigned int index;
35454
35455 struct tty_struct *tty;
35456 - int open_count;
35457 + local_t open_count;
35458
35459 /*
35460 * Used to tell the driver kernel_thread what operations need to take
35461 @@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
35462
35463 spin_lock_irqsave(&hvcsd->lock, flags);
35464
35465 - if (hvcsd->open_count > 0) {
35466 + if (local_read(&hvcsd->open_count) > 0) {
35467 spin_unlock_irqrestore(&hvcsd->lock, flags);
35468 printk(KERN_INFO "HVCS: vterm state unchanged. "
35469 "The hvcs device node is still in use.\n");
35470 @@ -1145,7 +1146,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
35471 if ((retval = hvcs_partner_connect(hvcsd)))
35472 goto error_release;
35473
35474 - hvcsd->open_count = 1;
35475 + local_set(&hvcsd->open_count, 1);
35476 hvcsd->tty = tty;
35477 tty->driver_data = hvcsd;
35478
35479 @@ -1179,7 +1180,7 @@ fast_open:
35480
35481 spin_lock_irqsave(&hvcsd->lock, flags);
35482 kref_get(&hvcsd->kref);
35483 - hvcsd->open_count++;
35484 + local_inc(&hvcsd->open_count);
35485 hvcsd->todo_mask |= HVCS_SCHED_READ;
35486 spin_unlock_irqrestore(&hvcsd->lock, flags);
35487
35488 @@ -1223,7 +1224,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
35489 hvcsd = tty->driver_data;
35490
35491 spin_lock_irqsave(&hvcsd->lock, flags);
35492 - if (--hvcsd->open_count == 0) {
35493 + if (local_dec_and_test(&hvcsd->open_count)) {
35494
35495 vio_disable_interrupts(hvcsd->vdev);
35496
35497 @@ -1249,10 +1250,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
35498 free_irq(irq, hvcsd);
35499 kref_put(&hvcsd->kref, destroy_hvcs_struct);
35500 return;
35501 - } else if (hvcsd->open_count < 0) {
35502 + } else if (local_read(&hvcsd->open_count) < 0) {
35503 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
35504 " is missmanaged.\n",
35505 - hvcsd->vdev->unit_address, hvcsd->open_count);
35506 + hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
35507 }
35508
35509 spin_unlock_irqrestore(&hvcsd->lock, flags);
35510 @@ -1268,7 +1269,7 @@ static void hvcs_hangup(struct tty_struct * tty)
35511
35512 spin_lock_irqsave(&hvcsd->lock, flags);
35513 /* Preserve this so that we know how many kref refs to put */
35514 - temp_open_count = hvcsd->open_count;
35515 + temp_open_count = local_read(&hvcsd->open_count);
35516
35517 /*
35518 * Don't kref put inside the spinlock because the destruction
35519 @@ -1283,7 +1284,7 @@ static void hvcs_hangup(struct tty_struct * tty)
35520 hvcsd->tty->driver_data = NULL;
35521 hvcsd->tty = NULL;
35522
35523 - hvcsd->open_count = 0;
35524 + local_set(&hvcsd->open_count, 0);
35525
35526 /* This will drop any buffered data on the floor which is OK in a hangup
35527 * scenario. */
35528 @@ -1354,7 +1355,7 @@ static int hvcs_write(struct tty_struct *tty,
35529 * the middle of a write operation? This is a crummy place to do this
35530 * but we want to keep it all in the spinlock.
35531 */
35532 - if (hvcsd->open_count <= 0) {
35533 + if (local_read(&hvcsd->open_count) <= 0) {
35534 spin_unlock_irqrestore(&hvcsd->lock, flags);
35535 return -ENODEV;
35536 }
35537 @@ -1428,7 +1429,7 @@ static int hvcs_write_room(struct tty_struct *tty)
35538 {
35539 struct hvcs_struct *hvcsd = tty->driver_data;
35540
35541 - if (!hvcsd || hvcsd->open_count <= 0)
35542 + if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
35543 return 0;
35544
35545 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
35546 diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
35547 index ef92869..f4ebd88 100644
35548 --- a/drivers/tty/ipwireless/tty.c
35549 +++ b/drivers/tty/ipwireless/tty.c
35550 @@ -29,6 +29,7 @@
35551 #include <linux/tty_driver.h>
35552 #include <linux/tty_flip.h>
35553 #include <linux/uaccess.h>
35554 +#include <asm/local.h>
35555
35556 #include "tty.h"
35557 #include "network.h"
35558 @@ -51,7 +52,7 @@ struct ipw_tty {
35559 int tty_type;
35560 struct ipw_network *network;
35561 struct tty_struct *linux_tty;
35562 - int open_count;
35563 + local_t open_count;
35564 unsigned int control_lines;
35565 struct mutex ipw_tty_mutex;
35566 int tx_bytes_queued;
35567 @@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
35568 mutex_unlock(&tty->ipw_tty_mutex);
35569 return -ENODEV;
35570 }
35571 - if (tty->open_count == 0)
35572 + if (local_read(&tty->open_count) == 0)
35573 tty->tx_bytes_queued = 0;
35574
35575 - tty->open_count++;
35576 + local_inc(&tty->open_count);
35577
35578 tty->linux_tty = linux_tty;
35579 linux_tty->driver_data = tty;
35580 @@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
35581
35582 static void do_ipw_close(struct ipw_tty *tty)
35583 {
35584 - tty->open_count--;
35585 -
35586 - if (tty->open_count == 0) {
35587 + if (local_dec_return(&tty->open_count) == 0) {
35588 struct tty_struct *linux_tty = tty->linux_tty;
35589
35590 if (linux_tty != NULL) {
35591 @@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
35592 return;
35593
35594 mutex_lock(&tty->ipw_tty_mutex);
35595 - if (tty->open_count == 0) {
35596 + if (local_read(&tty->open_count) == 0) {
35597 mutex_unlock(&tty->ipw_tty_mutex);
35598 return;
35599 }
35600 @@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
35601 return;
35602 }
35603
35604 - if (!tty->open_count) {
35605 + if (!local_read(&tty->open_count)) {
35606 mutex_unlock(&tty->ipw_tty_mutex);
35607 return;
35608 }
35609 @@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *linux_tty,
35610 return -ENODEV;
35611
35612 mutex_lock(&tty->ipw_tty_mutex);
35613 - if (!tty->open_count) {
35614 + if (!local_read(&tty->open_count)) {
35615 mutex_unlock(&tty->ipw_tty_mutex);
35616 return -EINVAL;
35617 }
35618 @@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
35619 if (!tty)
35620 return -ENODEV;
35621
35622 - if (!tty->open_count)
35623 + if (!local_read(&tty->open_count))
35624 return -EINVAL;
35625
35626 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
35627 @@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
35628 if (!tty)
35629 return 0;
35630
35631 - if (!tty->open_count)
35632 + if (!local_read(&tty->open_count))
35633 return 0;
35634
35635 return tty->tx_bytes_queued;
35636 @@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
35637 if (!tty)
35638 return -ENODEV;
35639
35640 - if (!tty->open_count)
35641 + if (!local_read(&tty->open_count))
35642 return -EINVAL;
35643
35644 return get_control_lines(tty);
35645 @@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
35646 if (!tty)
35647 return -ENODEV;
35648
35649 - if (!tty->open_count)
35650 + if (!local_read(&tty->open_count))
35651 return -EINVAL;
35652
35653 return set_control_lines(tty, set, clear);
35654 @@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
35655 if (!tty)
35656 return -ENODEV;
35657
35658 - if (!tty->open_count)
35659 + if (!local_read(&tty->open_count))
35660 return -EINVAL;
35661
35662 /* FIXME: Exactly how is the tty object locked here .. */
35663 @@ -582,7 +581,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
35664 against a parallel ioctl etc */
35665 mutex_lock(&ttyj->ipw_tty_mutex);
35666 }
35667 - while (ttyj->open_count)
35668 + while (local_read(&ttyj->open_count))
35669 do_ipw_close(ttyj);
35670 ipwireless_disassociate_network_ttys(network,
35671 ttyj->channel_idx);
35672 diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
35673 index fc7bbba..9527e93 100644
35674 --- a/drivers/tty/n_gsm.c
35675 +++ b/drivers/tty/n_gsm.c
35676 @@ -1629,7 +1629,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
35677 kref_init(&dlci->ref);
35678 mutex_init(&dlci->mutex);
35679 dlci->fifo = &dlci->_fifo;
35680 - if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
35681 + if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
35682 kfree(dlci);
35683 return NULL;
35684 }
35685 diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
35686 index 39d6ab6..eb97f41 100644
35687 --- a/drivers/tty/n_tty.c
35688 +++ b/drivers/tty/n_tty.c
35689 @@ -2123,6 +2123,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
35690 {
35691 *ops = tty_ldisc_N_TTY;
35692 ops->owner = NULL;
35693 - ops->refcount = ops->flags = 0;
35694 + atomic_set(&ops->refcount, 0);
35695 + ops->flags = 0;
35696 }
35697 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
35698 diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
35699 index e18604b..a7d5a11 100644
35700 --- a/drivers/tty/pty.c
35701 +++ b/drivers/tty/pty.c
35702 @@ -773,8 +773,10 @@ static void __init unix98_pty_init(void)
35703 register_sysctl_table(pty_root_table);
35704
35705 /* Now create the /dev/ptmx special device */
35706 + pax_open_kernel();
35707 tty_default_fops(&ptmx_fops);
35708 - ptmx_fops.open = ptmx_open;
35709 + *(void **)&ptmx_fops.open = ptmx_open;
35710 + pax_close_kernel();
35711
35712 cdev_init(&ptmx_cdev, &ptmx_fops);
35713 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
35714 diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
35715 index 2b42a01..32a2ed3 100644
35716 --- a/drivers/tty/serial/kgdboc.c
35717 +++ b/drivers/tty/serial/kgdboc.c
35718 @@ -24,8 +24,9 @@
35719 #define MAX_CONFIG_LEN 40
35720
35721 static struct kgdb_io kgdboc_io_ops;
35722 +static struct kgdb_io kgdboc_io_ops_console;
35723
35724 -/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
35725 +/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
35726 static int configured = -1;
35727
35728 static char config[MAX_CONFIG_LEN];
35729 @@ -148,6 +149,8 @@ static void cleanup_kgdboc(void)
35730 kgdboc_unregister_kbd();
35731 if (configured == 1)
35732 kgdb_unregister_io_module(&kgdboc_io_ops);
35733 + else if (configured == 2)
35734 + kgdb_unregister_io_module(&kgdboc_io_ops_console);
35735 }
35736
35737 static int configure_kgdboc(void)
35738 @@ -157,13 +160,13 @@ static int configure_kgdboc(void)
35739 int err;
35740 char *cptr = config;
35741 struct console *cons;
35742 + int is_console = 0;
35743
35744 err = kgdboc_option_setup(config);
35745 if (err || !strlen(config) || isspace(config[0]))
35746 goto noconfig;
35747
35748 err = -ENODEV;
35749 - kgdboc_io_ops.is_console = 0;
35750 kgdb_tty_driver = NULL;
35751
35752 kgdboc_use_kms = 0;
35753 @@ -184,7 +187,7 @@ static int configure_kgdboc(void)
35754 int idx;
35755 if (cons->device && cons->device(cons, &idx) == p &&
35756 idx == tty_line) {
35757 - kgdboc_io_ops.is_console = 1;
35758 + is_console = 1;
35759 break;
35760 }
35761 cons = cons->next;
35762 @@ -194,12 +197,16 @@ static int configure_kgdboc(void)
35763 kgdb_tty_line = tty_line;
35764
35765 do_register:
35766 - err = kgdb_register_io_module(&kgdboc_io_ops);
35767 + if (is_console) {
35768 + err = kgdb_register_io_module(&kgdboc_io_ops_console);
35769 + configured = 2;
35770 + } else {
35771 + err = kgdb_register_io_module(&kgdboc_io_ops);
35772 + configured = 1;
35773 + }
35774 if (err)
35775 goto noconfig;
35776
35777 - configured = 1;
35778 -
35779 return 0;
35780
35781 noconfig:
35782 @@ -213,7 +220,7 @@ noconfig:
35783 static int __init init_kgdboc(void)
35784 {
35785 /* Already configured? */
35786 - if (configured == 1)
35787 + if (configured >= 1)
35788 return 0;
35789
35790 return configure_kgdboc();
35791 @@ -262,7 +269,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
35792 if (config[len - 1] == '\n')
35793 config[len - 1] = '\0';
35794
35795 - if (configured == 1)
35796 + if (configured >= 1)
35797 cleanup_kgdboc();
35798
35799 /* Go and configure with the new params. */
35800 @@ -302,6 +309,15 @@ static struct kgdb_io kgdboc_io_ops = {
35801 .post_exception = kgdboc_post_exp_handler,
35802 };
35803
35804 +static struct kgdb_io kgdboc_io_ops_console = {
35805 + .name = "kgdboc",
35806 + .read_char = kgdboc_get_char,
35807 + .write_char = kgdboc_put_char,
35808 + .pre_exception = kgdboc_pre_exp_handler,
35809 + .post_exception = kgdboc_post_exp_handler,
35810 + .is_console = 1
35811 +};
35812 +
35813 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
35814 /* This is only available if kgdboc is a built in for early debugging */
35815 static int __init kgdboc_early_init(char *opt)
35816 diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
35817 index 05085be..67eadb0 100644
35818 --- a/drivers/tty/tty_io.c
35819 +++ b/drivers/tty/tty_io.c
35820 @@ -3240,7 +3240,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
35821
35822 void tty_default_fops(struct file_operations *fops)
35823 {
35824 - *fops = tty_fops;
35825 + memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
35826 }
35827
35828 /*
35829 diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
35830 index 8e0924f..4204eb4 100644
35831 --- a/drivers/tty/tty_ldisc.c
35832 +++ b/drivers/tty/tty_ldisc.c
35833 @@ -75,7 +75,7 @@ static void put_ldisc(struct tty_ldisc *ld)
35834 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
35835 struct tty_ldisc_ops *ldo = ld->ops;
35836
35837 - ldo->refcount--;
35838 + atomic_dec(&ldo->refcount);
35839 module_put(ldo->owner);
35840 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
35841
35842 @@ -110,7 +110,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
35843 spin_lock_irqsave(&tty_ldisc_lock, flags);
35844 tty_ldiscs[disc] = new_ldisc;
35845 new_ldisc->num = disc;
35846 - new_ldisc->refcount = 0;
35847 + atomic_set(&new_ldisc->refcount, 0);
35848 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
35849
35850 return ret;
35851 @@ -138,7 +138,7 @@ int tty_unregister_ldisc(int disc)
35852 return -EINVAL;
35853
35854 spin_lock_irqsave(&tty_ldisc_lock, flags);
35855 - if (tty_ldiscs[disc]->refcount)
35856 + if (atomic_read(&tty_ldiscs[disc]->refcount))
35857 ret = -EBUSY;
35858 else
35859 tty_ldiscs[disc] = NULL;
35860 @@ -159,7 +159,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
35861 if (ldops) {
35862 ret = ERR_PTR(-EAGAIN);
35863 if (try_module_get(ldops->owner)) {
35864 - ldops->refcount++;
35865 + atomic_inc(&ldops->refcount);
35866 ret = ldops;
35867 }
35868 }
35869 @@ -172,7 +172,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
35870 unsigned long flags;
35871
35872 spin_lock_irqsave(&tty_ldisc_lock, flags);
35873 - ldops->refcount--;
35874 + atomic_dec(&ldops->refcount);
35875 module_put(ldops->owner);
35876 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
35877 }
35878 diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
35879 index a605549..6bd3c96 100644
35880 --- a/drivers/tty/vt/keyboard.c
35881 +++ b/drivers/tty/vt/keyboard.c
35882 @@ -657,6 +657,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
35883 kbd->kbdmode == VC_OFF) &&
35884 value != KVAL(K_SAK))
35885 return; /* SAK is allowed even in raw mode */
35886 +
35887 +#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
35888 + {
35889 + void *func = fn_handler[value];
35890 + if (func == fn_show_state || func == fn_show_ptregs ||
35891 + func == fn_show_mem)
35892 + return;
35893 + }
35894 +#endif
35895 +
35896 fn_handler[value](vc);
35897 }
35898
35899 diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
35900 index 65447c5..0526f0a 100644
35901 --- a/drivers/tty/vt/vt_ioctl.c
35902 +++ b/drivers/tty/vt/vt_ioctl.c
35903 @@ -207,9 +207,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
35904 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
35905 return -EFAULT;
35906
35907 - if (!capable(CAP_SYS_TTY_CONFIG))
35908 - perm = 0;
35909 -
35910 switch (cmd) {
35911 case KDGKBENT:
35912 key_map = key_maps[s];
35913 @@ -221,6 +218,9 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
35914 val = (i ? K_HOLE : K_NOSUCHMAP);
35915 return put_user(val, &user_kbe->kb_value);
35916 case KDSKBENT:
35917 + if (!capable(CAP_SYS_TTY_CONFIG))
35918 + perm = 0;
35919 +
35920 if (!perm)
35921 return -EPERM;
35922 if (!i && v == K_NOSUCHMAP) {
35923 @@ -322,9 +322,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
35924 int i, j, k;
35925 int ret;
35926
35927 - if (!capable(CAP_SYS_TTY_CONFIG))
35928 - perm = 0;
35929 -
35930 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
35931 if (!kbs) {
35932 ret = -ENOMEM;
35933 @@ -358,6 +355,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
35934 kfree(kbs);
35935 return ((p && *p) ? -EOVERFLOW : 0);
35936 case KDSKBSENT:
35937 + if (!capable(CAP_SYS_TTY_CONFIG))
35938 + perm = 0;
35939 +
35940 if (!perm) {
35941 ret = -EPERM;
35942 goto reterr;
35943 diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
35944 index a783d53..cb30d94 100644
35945 --- a/drivers/uio/uio.c
35946 +++ b/drivers/uio/uio.c
35947 @@ -25,6 +25,7 @@
35948 #include <linux/kobject.h>
35949 #include <linux/cdev.h>
35950 #include <linux/uio_driver.h>
35951 +#include <asm/local.h>
35952
35953 #define UIO_MAX_DEVICES (1U << MINORBITS)
35954
35955 @@ -32,10 +33,10 @@ struct uio_device {
35956 struct module *owner;
35957 struct device *dev;
35958 int minor;
35959 - atomic_t event;
35960 + atomic_unchecked_t event;
35961 struct fasync_struct *async_queue;
35962 wait_queue_head_t wait;
35963 - int vma_count;
35964 + local_t vma_count;
35965 struct uio_info *info;
35966 struct kobject *map_dir;
35967 struct kobject *portio_dir;
35968 @@ -242,7 +243,7 @@ static ssize_t show_event(struct device *dev,
35969 struct device_attribute *attr, char *buf)
35970 {
35971 struct uio_device *idev = dev_get_drvdata(dev);
35972 - return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
35973 + return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
35974 }
35975
35976 static struct device_attribute uio_class_attributes[] = {
35977 @@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *info)
35978 {
35979 struct uio_device *idev = info->uio_dev;
35980
35981 - atomic_inc(&idev->event);
35982 + atomic_inc_unchecked(&idev->event);
35983 wake_up_interruptible(&idev->wait);
35984 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
35985 }
35986 @@ -461,7 +462,7 @@ static int uio_open(struct inode *inode, struct file *filep)
35987 }
35988
35989 listener->dev = idev;
35990 - listener->event_count = atomic_read(&idev->event);
35991 + listener->event_count = atomic_read_unchecked(&idev->event);
35992 filep->private_data = listener;
35993
35994 if (idev->info->open) {
35995 @@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
35996 return -EIO;
35997
35998 poll_wait(filep, &idev->wait, wait);
35999 - if (listener->event_count != atomic_read(&idev->event))
36000 + if (listener->event_count != atomic_read_unchecked(&idev->event))
36001 return POLLIN | POLLRDNORM;
36002 return 0;
36003 }
36004 @@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
36005 do {
36006 set_current_state(TASK_INTERRUPTIBLE);
36007
36008 - event_count = atomic_read(&idev->event);
36009 + event_count = atomic_read_unchecked(&idev->event);
36010 if (event_count != listener->event_count) {
36011 if (copy_to_user(buf, &event_count, count))
36012 retval = -EFAULT;
36013 @@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
36014 static void uio_vma_open(struct vm_area_struct *vma)
36015 {
36016 struct uio_device *idev = vma->vm_private_data;
36017 - idev->vma_count++;
36018 + local_inc(&idev->vma_count);
36019 }
36020
36021 static void uio_vma_close(struct vm_area_struct *vma)
36022 {
36023 struct uio_device *idev = vma->vm_private_data;
36024 - idev->vma_count--;
36025 + local_dec(&idev->vma_count);
36026 }
36027
36028 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
36029 @@ -821,7 +822,7 @@ int __uio_register_device(struct module *owner,
36030 idev->owner = owner;
36031 idev->info = info;
36032 init_waitqueue_head(&idev->wait);
36033 - atomic_set(&idev->event, 0);
36034 + atomic_set_unchecked(&idev->event, 0);
36035
36036 ret = uio_get_minor(idev);
36037 if (ret)
36038 diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
36039 index a845f8b..4f54072 100644
36040 --- a/drivers/usb/atm/cxacru.c
36041 +++ b/drivers/usb/atm/cxacru.c
36042 @@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
36043 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
36044 if (ret < 2)
36045 return -EINVAL;
36046 - if (index < 0 || index > 0x7f)
36047 + if (index > 0x7f)
36048 return -EINVAL;
36049 pos += tmp;
36050
36051 diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
36052 index d3448ca..d2864ca 100644
36053 --- a/drivers/usb/atm/usbatm.c
36054 +++ b/drivers/usb/atm/usbatm.c
36055 @@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
36056 if (printk_ratelimit())
36057 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
36058 __func__, vpi, vci);
36059 - atomic_inc(&vcc->stats->rx_err);
36060 + atomic_inc_unchecked(&vcc->stats->rx_err);
36061 return;
36062 }
36063
36064 @@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
36065 if (length > ATM_MAX_AAL5_PDU) {
36066 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
36067 __func__, length, vcc);
36068 - atomic_inc(&vcc->stats->rx_err);
36069 + atomic_inc_unchecked(&vcc->stats->rx_err);
36070 goto out;
36071 }
36072
36073 @@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
36074 if (sarb->len < pdu_length) {
36075 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
36076 __func__, pdu_length, sarb->len, vcc);
36077 - atomic_inc(&vcc->stats->rx_err);
36078 + atomic_inc_unchecked(&vcc->stats->rx_err);
36079 goto out;
36080 }
36081
36082 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
36083 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
36084 __func__, vcc);
36085 - atomic_inc(&vcc->stats->rx_err);
36086 + atomic_inc_unchecked(&vcc->stats->rx_err);
36087 goto out;
36088 }
36089
36090 @@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
36091 if (printk_ratelimit())
36092 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
36093 __func__, length);
36094 - atomic_inc(&vcc->stats->rx_drop);
36095 + atomic_inc_unchecked(&vcc->stats->rx_drop);
36096 goto out;
36097 }
36098
36099 @@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
36100
36101 vcc->push(vcc, skb);
36102
36103 - atomic_inc(&vcc->stats->rx);
36104 + atomic_inc_unchecked(&vcc->stats->rx);
36105 out:
36106 skb_trim(sarb, 0);
36107 }
36108 @@ -615,7 +615,7 @@ static void usbatm_tx_process(unsigned long data)
36109 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
36110
36111 usbatm_pop(vcc, skb);
36112 - atomic_inc(&vcc->stats->tx);
36113 + atomic_inc_unchecked(&vcc->stats->tx);
36114
36115 skb = skb_dequeue(&instance->sndqueue);
36116 }
36117 @@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
36118 if (!left--)
36119 return sprintf(page,
36120 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
36121 - atomic_read(&atm_dev->stats.aal5.tx),
36122 - atomic_read(&atm_dev->stats.aal5.tx_err),
36123 - atomic_read(&atm_dev->stats.aal5.rx),
36124 - atomic_read(&atm_dev->stats.aal5.rx_err),
36125 - atomic_read(&atm_dev->stats.aal5.rx_drop));
36126 + atomic_read_unchecked(&atm_dev->stats.aal5.tx),
36127 + atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
36128 + atomic_read_unchecked(&atm_dev->stats.aal5.rx),
36129 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
36130 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
36131
36132 if (!left--) {
36133 if (instance->disconnected)
36134 diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
36135 index d956965..4179a77 100644
36136 --- a/drivers/usb/core/devices.c
36137 +++ b/drivers/usb/core/devices.c
36138 @@ -126,7 +126,7 @@ static const char format_endpt[] =
36139 * time it gets called.
36140 */
36141 static struct device_connect_event {
36142 - atomic_t count;
36143 + atomic_unchecked_t count;
36144 wait_queue_head_t wait;
36145 } device_event = {
36146 .count = ATOMIC_INIT(1),
36147 @@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
36148
36149 void usbfs_conn_disc_event(void)
36150 {
36151 - atomic_add(2, &device_event.count);
36152 + atomic_add_unchecked(2, &device_event.count);
36153 wake_up(&device_event.wait);
36154 }
36155
36156 @@ -648,7 +648,7 @@ static unsigned int usb_device_poll(struct file *file,
36157
36158 poll_wait(file, &device_event.wait, wait);
36159
36160 - event_count = atomic_read(&device_event.count);
36161 + event_count = atomic_read_unchecked(&device_event.count);
36162 if (file->f_version != event_count) {
36163 file->f_version = event_count;
36164 return POLLIN | POLLRDNORM;
36165 diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
36166 index b3bdfed..a9460e0 100644
36167 --- a/drivers/usb/core/message.c
36168 +++ b/drivers/usb/core/message.c
36169 @@ -869,8 +869,8 @@ char *usb_cache_string(struct usb_device *udev, int index)
36170 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
36171 if (buf) {
36172 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
36173 - if (len > 0) {
36174 - smallbuf = kmalloc(++len, GFP_NOIO);
36175 + if (len++ > 0) {
36176 + smallbuf = kmalloc(len, GFP_NOIO);
36177 if (!smallbuf)
36178 return buf;
36179 memcpy(smallbuf, buf, len);
36180 diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
36181 index 1fc8f12..20647c1 100644
36182 --- a/drivers/usb/early/ehci-dbgp.c
36183 +++ b/drivers/usb/early/ehci-dbgp.c
36184 @@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
36185
36186 #ifdef CONFIG_KGDB
36187 static struct kgdb_io kgdbdbgp_io_ops;
36188 -#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
36189 +static struct kgdb_io kgdbdbgp_io_ops_console;
36190 +#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
36191 #else
36192 #define dbgp_kgdb_mode (0)
36193 #endif
36194 @@ -1035,6 +1036,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
36195 .write_char = kgdbdbgp_write_char,
36196 };
36197
36198 +static struct kgdb_io kgdbdbgp_io_ops_console = {
36199 + .name = "kgdbdbgp",
36200 + .read_char = kgdbdbgp_read_char,
36201 + .write_char = kgdbdbgp_write_char,
36202 + .is_console = 1
36203 +};
36204 +
36205 static int kgdbdbgp_wait_time;
36206
36207 static int __init kgdbdbgp_parse_config(char *str)
36208 @@ -1050,8 +1058,10 @@ static int __init kgdbdbgp_parse_config(char *str)
36209 ptr++;
36210 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
36211 }
36212 - kgdb_register_io_module(&kgdbdbgp_io_ops);
36213 - kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
36214 + if (early_dbgp_console.index != -1)
36215 + kgdb_register_io_module(&kgdbdbgp_io_ops_console);
36216 + else
36217 + kgdb_register_io_module(&kgdbdbgp_io_ops);
36218
36219 return 0;
36220 }
36221 diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
36222 index d6bea3e..60b250e 100644
36223 --- a/drivers/usb/wusbcore/wa-hc.h
36224 +++ b/drivers/usb/wusbcore/wa-hc.h
36225 @@ -192,7 +192,7 @@ struct wahc {
36226 struct list_head xfer_delayed_list;
36227 spinlock_t xfer_list_lock;
36228 struct work_struct xfer_work;
36229 - atomic_t xfer_id_count;
36230 + atomic_unchecked_t xfer_id_count;
36231 };
36232
36233
36234 @@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
36235 INIT_LIST_HEAD(&wa->xfer_delayed_list);
36236 spin_lock_init(&wa->xfer_list_lock);
36237 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
36238 - atomic_set(&wa->xfer_id_count, 1);
36239 + atomic_set_unchecked(&wa->xfer_id_count, 1);
36240 }
36241
36242 /**
36243 diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
36244 index 57c01ab..8a05959 100644
36245 --- a/drivers/usb/wusbcore/wa-xfer.c
36246 +++ b/drivers/usb/wusbcore/wa-xfer.c
36247 @@ -296,7 +296,7 @@ out:
36248 */
36249 static void wa_xfer_id_init(struct wa_xfer *xfer)
36250 {
36251 - xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
36252 + xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
36253 }
36254
36255 /*
36256 diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
36257 index c14c42b..f955cc2 100644
36258 --- a/drivers/vhost/vhost.c
36259 +++ b/drivers/vhost/vhost.c
36260 @@ -629,7 +629,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
36261 return 0;
36262 }
36263
36264 -static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
36265 +static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
36266 {
36267 struct file *eventfp, *filep = NULL,
36268 *pollstart = NULL, *pollstop = NULL;
36269 diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
36270 index b0b2ac3..89a4399 100644
36271 --- a/drivers/video/aty/aty128fb.c
36272 +++ b/drivers/video/aty/aty128fb.c
36273 @@ -148,7 +148,7 @@ enum {
36274 };
36275
36276 /* Must match above enum */
36277 -static const char *r128_family[] __devinitdata = {
36278 +static const char *r128_family[] __devinitconst = {
36279 "AGP",
36280 "PCI",
36281 "PRO AGP",
36282 diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
36283 index 5c3960d..15cf8fc 100644
36284 --- a/drivers/video/fbcmap.c
36285 +++ b/drivers/video/fbcmap.c
36286 @@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
36287 rc = -ENODEV;
36288 goto out;
36289 }
36290 - if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
36291 - !info->fbops->fb_setcmap)) {
36292 + if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
36293 rc = -EINVAL;
36294 goto out1;
36295 }
36296 diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
36297 index ad93629..e020fc3 100644
36298 --- a/drivers/video/fbmem.c
36299 +++ b/drivers/video/fbmem.c
36300 @@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
36301 image->dx += image->width + 8;
36302 }
36303 } else if (rotate == FB_ROTATE_UD) {
36304 - for (x = 0; x < num && image->dx >= 0; x++) {
36305 + for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
36306 info->fbops->fb_imageblit(info, image);
36307 image->dx -= image->width + 8;
36308 }
36309 @@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
36310 image->dy += image->height + 8;
36311 }
36312 } else if (rotate == FB_ROTATE_CCW) {
36313 - for (x = 0; x < num && image->dy >= 0; x++) {
36314 + for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
36315 info->fbops->fb_imageblit(info, image);
36316 image->dy -= image->height + 8;
36317 }
36318 @@ -1143,7 +1143,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
36319 return -EFAULT;
36320 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
36321 return -EINVAL;
36322 - if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
36323 + if (con2fb.framebuffer >= FB_MAX)
36324 return -EINVAL;
36325 if (!registered_fb[con2fb.framebuffer])
36326 request_module("fb%d", con2fb.framebuffer);
36327 diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
36328 index 5a5d092..265c5ed 100644
36329 --- a/drivers/video/geode/gx1fb_core.c
36330 +++ b/drivers/video/geode/gx1fb_core.c
36331 @@ -29,7 +29,7 @@ static int crt_option = 1;
36332 static char panel_option[32] = "";
36333
36334 /* Modes relevant to the GX1 (taken from modedb.c) */
36335 -static const struct fb_videomode __devinitdata gx1_modedb[] = {
36336 +static const struct fb_videomode __devinitconst gx1_modedb[] = {
36337 /* 640x480-60 VESA */
36338 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
36339 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
36340 diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c
36341 index 0fad23f..0e9afa4 100644
36342 --- a/drivers/video/gxt4500.c
36343 +++ b/drivers/video/gxt4500.c
36344 @@ -156,7 +156,7 @@ struct gxt4500_par {
36345 static char *mode_option;
36346
36347 /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
36348 -static const struct fb_videomode defaultmode __devinitdata = {
36349 +static const struct fb_videomode defaultmode __devinitconst = {
36350 .refresh = 60,
36351 .xres = 1280,
36352 .yres = 1024,
36353 @@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, struct fb_info *info)
36354 return 0;
36355 }
36356
36357 -static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
36358 +static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
36359 .id = "IBM GXT4500P",
36360 .type = FB_TYPE_PACKED_PIXELS,
36361 .visual = FB_VISUAL_PSEUDOCOLOR,
36362 diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
36363 index 7672d2e..b56437f 100644
36364 --- a/drivers/video/i810/i810_accel.c
36365 +++ b/drivers/video/i810/i810_accel.c
36366 @@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
36367 }
36368 }
36369 printk("ringbuffer lockup!!!\n");
36370 + printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
36371 i810_report_error(mmio);
36372 par->dev_flags |= LOCKUP;
36373 info->pixmap.scan_align = 1;
36374 diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
36375 index 318f6fb..9a389c1 100644
36376 --- a/drivers/video/i810/i810_main.c
36377 +++ b/drivers/video/i810/i810_main.c
36378 @@ -97,7 +97,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info);
36379 static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
36380
36381 /* PCI */
36382 -static const char *i810_pci_list[] __devinitdata = {
36383 +static const char *i810_pci_list[] __devinitconst = {
36384 "Intel(R) 810 Framebuffer Device" ,
36385 "Intel(R) 810-DC100 Framebuffer Device" ,
36386 "Intel(R) 810E Framebuffer Device" ,
36387 diff --git a/drivers/video/jz4740_fb.c b/drivers/video/jz4740_fb.c
36388 index de36693..3c63fc2 100644
36389 --- a/drivers/video/jz4740_fb.c
36390 +++ b/drivers/video/jz4740_fb.c
36391 @@ -136,7 +136,7 @@ struct jzfb {
36392 uint32_t pseudo_palette[16];
36393 };
36394
36395 -static const struct fb_fix_screeninfo jzfb_fix __devinitdata = {
36396 +static const struct fb_fix_screeninfo jzfb_fix __devinitconst = {
36397 .id = "JZ4740 FB",
36398 .type = FB_TYPE_PACKED_PIXELS,
36399 .visual = FB_VISUAL_TRUECOLOR,
36400 diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
36401 index 3c14e43..eafa544 100644
36402 --- a/drivers/video/logo/logo_linux_clut224.ppm
36403 +++ b/drivers/video/logo/logo_linux_clut224.ppm
36404 @@ -1,1604 +1,1123 @@
36405 P3
36406 -# Standard 224-color Linux logo
36407 80 80
36408 255
36409 - 0 0 0 0 0 0 0 0 0 0 0 0
36410 - 0 0 0 0 0 0 0 0 0 0 0 0
36411 - 0 0 0 0 0 0 0 0 0 0 0 0
36412 - 0 0 0 0 0 0 0 0 0 0 0 0
36413 - 0 0 0 0 0 0 0 0 0 0 0 0
36414 - 0 0 0 0 0 0 0 0 0 0 0 0
36415 - 0 0 0 0 0 0 0 0 0 0 0 0
36416 - 0 0 0 0 0 0 0 0 0 0 0 0
36417 - 0 0 0 0 0 0 0 0 0 0 0 0
36418 - 6 6 6 6 6 6 10 10 10 10 10 10
36419 - 10 10 10 6 6 6 6 6 6 6 6 6
36420 - 0 0 0 0 0 0 0 0 0 0 0 0
36421 - 0 0 0 0 0 0 0 0 0 0 0 0
36422 - 0 0 0 0 0 0 0 0 0 0 0 0
36423 - 0 0 0 0 0 0 0 0 0 0 0 0
36424 - 0 0 0 0 0 0 0 0 0 0 0 0
36425 - 0 0 0 0 0 0 0 0 0 0 0 0
36426 - 0 0 0 0 0 0 0 0 0 0 0 0
36427 - 0 0 0 0 0 0 0 0 0 0 0 0
36428 - 0 0 0 0 0 0 0 0 0 0 0 0
36429 - 0 0 0 0 0 0 0 0 0 0 0 0
36430 - 0 0 0 0 0 0 0 0 0 0 0 0
36431 - 0 0 0 0 0 0 0 0 0 0 0 0
36432 - 0 0 0 0 0 0 0 0 0 0 0 0
36433 - 0 0 0 0 0 0 0 0 0 0 0 0
36434 - 0 0 0 0 0 0 0 0 0 0 0 0
36435 - 0 0 0 0 0 0 0 0 0 0 0 0
36436 - 0 0 0 0 0 0 0 0 0 0 0 0
36437 - 0 0 0 6 6 6 10 10 10 14 14 14
36438 - 22 22 22 26 26 26 30 30 30 34 34 34
36439 - 30 30 30 30 30 30 26 26 26 18 18 18
36440 - 14 14 14 10 10 10 6 6 6 0 0 0
36441 - 0 0 0 0 0 0 0 0 0 0 0 0
36442 - 0 0 0 0 0 0 0 0 0 0 0 0
36443 - 0 0 0 0 0 0 0 0 0 0 0 0
36444 - 0 0 0 0 0 0 0 0 0 0 0 0
36445 - 0 0 0 0 0 0 0 0 0 0 0 0
36446 - 0 0 0 0 0 0 0 0 0 0 0 0
36447 - 0 0 0 0 0 0 0 0 0 0 0 0
36448 - 0 0 0 0 0 0 0 0 0 0 0 0
36449 - 0 0 0 0 0 0 0 0 0 0 0 0
36450 - 0 0 0 0 0 1 0 0 1 0 0 0
36451 - 0 0 0 0 0 0 0 0 0 0 0 0
36452 - 0 0 0 0 0 0 0 0 0 0 0 0
36453 - 0 0 0 0 0 0 0 0 0 0 0 0
36454 - 0 0 0 0 0 0 0 0 0 0 0 0
36455 - 0 0 0 0 0 0 0 0 0 0 0 0
36456 - 0 0 0 0 0 0 0 0 0 0 0 0
36457 - 6 6 6 14 14 14 26 26 26 42 42 42
36458 - 54 54 54 66 66 66 78 78 78 78 78 78
36459 - 78 78 78 74 74 74 66 66 66 54 54 54
36460 - 42 42 42 26 26 26 18 18 18 10 10 10
36461 - 6 6 6 0 0 0 0 0 0 0 0 0
36462 - 0 0 0 0 0 0 0 0 0 0 0 0
36463 - 0 0 0 0 0 0 0 0 0 0 0 0
36464 - 0 0 0 0 0 0 0 0 0 0 0 0
36465 - 0 0 0 0 0 0 0 0 0 0 0 0
36466 - 0 0 0 0 0 0 0 0 0 0 0 0
36467 - 0 0 0 0 0 0 0 0 0 0 0 0
36468 - 0 0 0 0 0 0 0 0 0 0 0 0
36469 - 0 0 0 0 0 0 0 0 0 0 0 0
36470 - 0 0 1 0 0 0 0 0 0 0 0 0
36471 - 0 0 0 0 0 0 0 0 0 0 0 0
36472 - 0 0 0 0 0 0 0 0 0 0 0 0
36473 - 0 0 0 0 0 0 0 0 0 0 0 0
36474 - 0 0 0 0 0 0 0 0 0 0 0 0
36475 - 0 0 0 0 0 0 0 0 0 0 0 0
36476 - 0 0 0 0 0 0 0 0 0 10 10 10
36477 - 22 22 22 42 42 42 66 66 66 86 86 86
36478 - 66 66 66 38 38 38 38 38 38 22 22 22
36479 - 26 26 26 34 34 34 54 54 54 66 66 66
36480 - 86 86 86 70 70 70 46 46 46 26 26 26
36481 - 14 14 14 6 6 6 0 0 0 0 0 0
36482 - 0 0 0 0 0 0 0 0 0 0 0 0
36483 - 0 0 0 0 0 0 0 0 0 0 0 0
36484 - 0 0 0 0 0 0 0 0 0 0 0 0
36485 - 0 0 0 0 0 0 0 0 0 0 0 0
36486 - 0 0 0 0 0 0 0 0 0 0 0 0
36487 - 0 0 0 0 0 0 0 0 0 0 0 0
36488 - 0 0 0 0 0 0 0 0 0 0 0 0
36489 - 0 0 0 0 0 0 0 0 0 0 0 0
36490 - 0 0 1 0 0 1 0 0 1 0 0 0
36491 - 0 0 0 0 0 0 0 0 0 0 0 0
36492 - 0 0 0 0 0 0 0 0 0 0 0 0
36493 - 0 0 0 0 0 0 0 0 0 0 0 0
36494 - 0 0 0 0 0 0 0 0 0 0 0 0
36495 - 0 0 0 0 0 0 0 0 0 0 0 0
36496 - 0 0 0 0 0 0 10 10 10 26 26 26
36497 - 50 50 50 82 82 82 58 58 58 6 6 6
36498 - 2 2 6 2 2 6 2 2 6 2 2 6
36499 - 2 2 6 2 2 6 2 2 6 2 2 6
36500 - 6 6 6 54 54 54 86 86 86 66 66 66
36501 - 38 38 38 18 18 18 6 6 6 0 0 0
36502 - 0 0 0 0 0 0 0 0 0 0 0 0
36503 - 0 0 0 0 0 0 0 0 0 0 0 0
36504 - 0 0 0 0 0 0 0 0 0 0 0 0
36505 - 0 0 0 0 0 0 0 0 0 0 0 0
36506 - 0 0 0 0 0 0 0 0 0 0 0 0
36507 - 0 0 0 0 0 0 0 0 0 0 0 0
36508 - 0 0 0 0 0 0 0 0 0 0 0 0
36509 - 0 0 0 0 0 0 0 0 0 0 0 0
36510 - 0 0 0 0 0 0 0 0 0 0 0 0
36511 - 0 0 0 0 0 0 0 0 0 0 0 0
36512 - 0 0 0 0 0 0 0 0 0 0 0 0
36513 - 0 0 0 0 0 0 0 0 0 0 0 0
36514 - 0 0 0 0 0 0 0 0 0 0 0 0
36515 - 0 0 0 0 0 0 0 0 0 0 0 0
36516 - 0 0 0 6 6 6 22 22 22 50 50 50
36517 - 78 78 78 34 34 34 2 2 6 2 2 6
36518 - 2 2 6 2 2 6 2 2 6 2 2 6
36519 - 2 2 6 2 2 6 2 2 6 2 2 6
36520 - 2 2 6 2 2 6 6 6 6 70 70 70
36521 - 78 78 78 46 46 46 22 22 22 6 6 6
36522 - 0 0 0 0 0 0 0 0 0 0 0 0
36523 - 0 0 0 0 0 0 0 0 0 0 0 0
36524 - 0 0 0 0 0 0 0 0 0 0 0 0
36525 - 0 0 0 0 0 0 0 0 0 0 0 0
36526 - 0 0 0 0 0 0 0 0 0 0 0 0
36527 - 0 0 0 0 0 0 0 0 0 0 0 0
36528 - 0 0 0 0 0 0 0 0 0 0 0 0
36529 - 0 0 0 0 0 0 0 0 0 0 0 0
36530 - 0 0 1 0 0 1 0 0 1 0 0 0
36531 - 0 0 0 0 0 0 0 0 0 0 0 0
36532 - 0 0 0 0 0 0 0 0 0 0 0 0
36533 - 0 0 0 0 0 0 0 0 0 0 0 0
36534 - 0 0 0 0 0 0 0 0 0 0 0 0
36535 - 0 0 0 0 0 0 0 0 0 0 0 0
36536 - 6 6 6 18 18 18 42 42 42 82 82 82
36537 - 26 26 26 2 2 6 2 2 6 2 2 6
36538 - 2 2 6 2 2 6 2 2 6 2 2 6
36539 - 2 2 6 2 2 6 2 2 6 14 14 14
36540 - 46 46 46 34 34 34 6 6 6 2 2 6
36541 - 42 42 42 78 78 78 42 42 42 18 18 18
36542 - 6 6 6 0 0 0 0 0 0 0 0 0
36543 - 0 0 0 0 0 0 0 0 0 0 0 0
36544 - 0 0 0 0 0 0 0 0 0 0 0 0
36545 - 0 0 0 0 0 0 0 0 0 0 0 0
36546 - 0 0 0 0 0 0 0 0 0 0 0 0
36547 - 0 0 0 0 0 0 0 0 0 0 0 0
36548 - 0 0 0 0 0 0 0 0 0 0 0 0
36549 - 0 0 0 0 0 0 0 0 0 0 0 0
36550 - 0 0 1 0 0 0 0 0 1 0 0 0
36551 - 0 0 0 0 0 0 0 0 0 0 0 0
36552 - 0 0 0 0 0 0 0 0 0 0 0 0
36553 - 0 0 0 0 0 0 0 0 0 0 0 0
36554 - 0 0 0 0 0 0 0 0 0 0 0 0
36555 - 0 0 0 0 0 0 0 0 0 0 0 0
36556 - 10 10 10 30 30 30 66 66 66 58 58 58
36557 - 2 2 6 2 2 6 2 2 6 2 2 6
36558 - 2 2 6 2 2 6 2 2 6 2 2 6
36559 - 2 2 6 2 2 6 2 2 6 26 26 26
36560 - 86 86 86 101 101 101 46 46 46 10 10 10
36561 - 2 2 6 58 58 58 70 70 70 34 34 34
36562 - 10 10 10 0 0 0 0 0 0 0 0 0
36563 - 0 0 0 0 0 0 0 0 0 0 0 0
36564 - 0 0 0 0 0 0 0 0 0 0 0 0
36565 - 0 0 0 0 0 0 0 0 0 0 0 0
36566 - 0 0 0 0 0 0 0 0 0 0 0 0
36567 - 0 0 0 0 0 0 0 0 0 0 0 0
36568 - 0 0 0 0 0 0 0 0 0 0 0 0
36569 - 0 0 0 0 0 0 0 0 0 0 0 0
36570 - 0 0 1 0 0 1 0 0 1 0 0 0
36571 - 0 0 0 0 0 0 0 0 0 0 0 0
36572 - 0 0 0 0 0 0 0 0 0 0 0 0
36573 - 0 0 0 0 0 0 0 0 0 0 0 0
36574 - 0 0 0 0 0 0 0 0 0 0 0 0
36575 - 0 0 0 0 0 0 0 0 0 0 0 0
36576 - 14 14 14 42 42 42 86 86 86 10 10 10
36577 - 2 2 6 2 2 6 2 2 6 2 2 6
36578 - 2 2 6 2 2 6 2 2 6 2 2 6
36579 - 2 2 6 2 2 6 2 2 6 30 30 30
36580 - 94 94 94 94 94 94 58 58 58 26 26 26
36581 - 2 2 6 6 6 6 78 78 78 54 54 54
36582 - 22 22 22 6 6 6 0 0 0 0 0 0
36583 - 0 0 0 0 0 0 0 0 0 0 0 0
36584 - 0 0 0 0 0 0 0 0 0 0 0 0
36585 - 0 0 0 0 0 0 0 0 0 0 0 0
36586 - 0 0 0 0 0 0 0 0 0 0 0 0
36587 - 0 0 0 0 0 0 0 0 0 0 0 0
36588 - 0 0 0 0 0 0 0 0 0 0 0 0
36589 - 0 0 0 0 0 0 0 0 0 0 0 0
36590 - 0 0 0 0 0 0 0 0 0 0 0 0
36591 - 0 0 0 0 0 0 0 0 0 0 0 0
36592 - 0 0 0 0 0 0 0 0 0 0 0 0
36593 - 0 0 0 0 0 0 0 0 0 0 0 0
36594 - 0 0 0 0 0 0 0 0 0 0 0 0
36595 - 0 0 0 0 0 0 0 0 0 6 6 6
36596 - 22 22 22 62 62 62 62 62 62 2 2 6
36597 - 2 2 6 2 2 6 2 2 6 2 2 6
36598 - 2 2 6 2 2 6 2 2 6 2 2 6
36599 - 2 2 6 2 2 6 2 2 6 26 26 26
36600 - 54 54 54 38 38 38 18 18 18 10 10 10
36601 - 2 2 6 2 2 6 34 34 34 82 82 82
36602 - 38 38 38 14 14 14 0 0 0 0 0 0
36603 - 0 0 0 0 0 0 0 0 0 0 0 0
36604 - 0 0 0 0 0 0 0 0 0 0 0 0
36605 - 0 0 0 0 0 0 0 0 0 0 0 0
36606 - 0 0 0 0 0 0 0 0 0 0 0 0
36607 - 0 0 0 0 0 0 0 0 0 0 0 0
36608 - 0 0 0 0 0 0 0 0 0 0 0 0
36609 - 0 0 0 0 0 0 0 0 0 0 0 0
36610 - 0 0 0 0 0 1 0 0 1 0 0 0
36611 - 0 0 0 0 0 0 0 0 0 0 0 0
36612 - 0 0 0 0 0 0 0 0 0 0 0 0
36613 - 0 0 0 0 0 0 0 0 0 0 0 0
36614 - 0 0 0 0 0 0 0 0 0 0 0 0
36615 - 0 0 0 0 0 0 0 0 0 6 6 6
36616 - 30 30 30 78 78 78 30 30 30 2 2 6
36617 - 2 2 6 2 2 6 2 2 6 2 2 6
36618 - 2 2 6 2 2 6 2 2 6 2 2 6
36619 - 2 2 6 2 2 6 2 2 6 10 10 10
36620 - 10 10 10 2 2 6 2 2 6 2 2 6
36621 - 2 2 6 2 2 6 2 2 6 78 78 78
36622 - 50 50 50 18 18 18 6 6 6 0 0 0
36623 - 0 0 0 0 0 0 0 0 0 0 0 0
36624 - 0 0 0 0 0 0 0 0 0 0 0 0
36625 - 0 0 0 0 0 0 0 0 0 0 0 0
36626 - 0 0 0 0 0 0 0 0 0 0 0 0
36627 - 0 0 0 0 0 0 0 0 0 0 0 0
36628 - 0 0 0 0 0 0 0 0 0 0 0 0
36629 - 0 0 0 0 0 0 0 0 0 0 0 0
36630 - 0 0 1 0 0 0 0 0 0 0 0 0
36631 - 0 0 0 0 0 0 0 0 0 0 0 0
36632 - 0 0 0 0 0 0 0 0 0 0 0 0
36633 - 0 0 0 0 0 0 0 0 0 0 0 0
36634 - 0 0 0 0 0 0 0 0 0 0 0 0
36635 - 0 0 0 0 0 0 0 0 0 10 10 10
36636 - 38 38 38 86 86 86 14 14 14 2 2 6
36637 - 2 2 6 2 2 6 2 2 6 2 2 6
36638 - 2 2 6 2 2 6 2 2 6 2 2 6
36639 - 2 2 6 2 2 6 2 2 6 2 2 6
36640 - 2 2 6 2 2 6 2 2 6 2 2 6
36641 - 2 2 6 2 2 6 2 2 6 54 54 54
36642 - 66 66 66 26 26 26 6 6 6 0 0 0
36643 - 0 0 0 0 0 0 0 0 0 0 0 0
36644 - 0 0 0 0 0 0 0 0 0 0 0 0
36645 - 0 0 0 0 0 0 0 0 0 0 0 0
36646 - 0 0 0 0 0 0 0 0 0 0 0 0
36647 - 0 0 0 0 0 0 0 0 0 0 0 0
36648 - 0 0 0 0 0 0 0 0 0 0 0 0
36649 - 0 0 0 0 0 0 0 0 0 0 0 0
36650 - 0 0 0 0 0 1 0 0 1 0 0 0
36651 - 0 0 0 0 0 0 0 0 0 0 0 0
36652 - 0 0 0 0 0 0 0 0 0 0 0 0
36653 - 0 0 0 0 0 0 0 0 0 0 0 0
36654 - 0 0 0 0 0 0 0 0 0 0 0 0
36655 - 0 0 0 0 0 0 0 0 0 14 14 14
36656 - 42 42 42 82 82 82 2 2 6 2 2 6
36657 - 2 2 6 6 6 6 10 10 10 2 2 6
36658 - 2 2 6 2 2 6 2 2 6 2 2 6
36659 - 2 2 6 2 2 6 2 2 6 6 6 6
36660 - 14 14 14 10 10 10 2 2 6 2 2 6
36661 - 2 2 6 2 2 6 2 2 6 18 18 18
36662 - 82 82 82 34 34 34 10 10 10 0 0 0
36663 - 0 0 0 0 0 0 0 0 0 0 0 0
36664 - 0 0 0 0 0 0 0 0 0 0 0 0
36665 - 0 0 0 0 0 0 0 0 0 0 0 0
36666 - 0 0 0 0 0 0 0 0 0 0 0 0
36667 - 0 0 0 0 0 0 0 0 0 0 0 0
36668 - 0 0 0 0 0 0 0 0 0 0 0 0
36669 - 0 0 0 0 0 0 0 0 0 0 0 0
36670 - 0 0 1 0 0 0 0 0 0 0 0 0
36671 - 0 0 0 0 0 0 0 0 0 0 0 0
36672 - 0 0 0 0 0 0 0 0 0 0 0 0
36673 - 0 0 0 0 0 0 0 0 0 0 0 0
36674 - 0 0 0 0 0 0 0 0 0 0 0 0
36675 - 0 0 0 0 0 0 0 0 0 14 14 14
36676 - 46 46 46 86 86 86 2 2 6 2 2 6
36677 - 6 6 6 6 6 6 22 22 22 34 34 34
36678 - 6 6 6 2 2 6 2 2 6 2 2 6
36679 - 2 2 6 2 2 6 18 18 18 34 34 34
36680 - 10 10 10 50 50 50 22 22 22 2 2 6
36681 - 2 2 6 2 2 6 2 2 6 10 10 10
36682 - 86 86 86 42 42 42 14 14 14 0 0 0
36683 - 0 0 0 0 0 0 0 0 0 0 0 0
36684 - 0 0 0 0 0 0 0 0 0 0 0 0
36685 - 0 0 0 0 0 0 0 0 0 0 0 0
36686 - 0 0 0 0 0 0 0 0 0 0 0 0
36687 - 0 0 0 0 0 0 0 0 0 0 0 0
36688 - 0 0 0 0 0 0 0 0 0 0 0 0
36689 - 0 0 0 0 0 0 0 0 0 0 0 0
36690 - 0 0 1 0 0 1 0 0 1 0 0 0
36691 - 0 0 0 0 0 0 0 0 0 0 0 0
36692 - 0 0 0 0 0 0 0 0 0 0 0 0
36693 - 0 0 0 0 0 0 0 0 0 0 0 0
36694 - 0 0 0 0 0 0 0 0 0 0 0 0
36695 - 0 0 0 0 0 0 0 0 0 14 14 14
36696 - 46 46 46 86 86 86 2 2 6 2 2 6
36697 - 38 38 38 116 116 116 94 94 94 22 22 22
36698 - 22 22 22 2 2 6 2 2 6 2 2 6
36699 - 14 14 14 86 86 86 138 138 138 162 162 162
36700 -154 154 154 38 38 38 26 26 26 6 6 6
36701 - 2 2 6 2 2 6 2 2 6 2 2 6
36702 - 86 86 86 46 46 46 14 14 14 0 0 0
36703 - 0 0 0 0 0 0 0 0 0 0 0 0
36704 - 0 0 0 0 0 0 0 0 0 0 0 0
36705 - 0 0 0 0 0 0 0 0 0 0 0 0
36706 - 0 0 0 0 0 0 0 0 0 0 0 0
36707 - 0 0 0 0 0 0 0 0 0 0 0 0
36708 - 0 0 0 0 0 0 0 0 0 0 0 0
36709 - 0 0 0 0 0 0 0 0 0 0 0 0
36710 - 0 0 0 0 0 0 0 0 0 0 0 0
36711 - 0 0 0 0 0 0 0 0 0 0 0 0
36712 - 0 0 0 0 0 0 0 0 0 0 0 0
36713 - 0 0 0 0 0 0 0 0 0 0 0 0
36714 - 0 0 0 0 0 0 0 0 0 0 0 0
36715 - 0 0 0 0 0 0 0 0 0 14 14 14
36716 - 46 46 46 86 86 86 2 2 6 14 14 14
36717 -134 134 134 198 198 198 195 195 195 116 116 116
36718 - 10 10 10 2 2 6 2 2 6 6 6 6
36719 -101 98 89 187 187 187 210 210 210 218 218 218
36720 -214 214 214 134 134 134 14 14 14 6 6 6
36721 - 2 2 6 2 2 6 2 2 6 2 2 6
36722 - 86 86 86 50 50 50 18 18 18 6 6 6
36723 - 0 0 0 0 0 0 0 0 0 0 0 0
36724 - 0 0 0 0 0 0 0 0 0 0 0 0
36725 - 0 0 0 0 0 0 0 0 0 0 0 0
36726 - 0 0 0 0 0 0 0 0 0 0 0 0
36727 - 0 0 0 0 0 0 0 0 0 0 0 0
36728 - 0 0 0 0 0 0 0 0 0 0 0 0
36729 - 0 0 0 0 0 0 0 0 1 0 0 0
36730 - 0 0 1 0 0 1 0 0 1 0 0 0
36731 - 0 0 0 0 0 0 0 0 0 0 0 0
36732 - 0 0 0 0 0 0 0 0 0 0 0 0
36733 - 0 0 0 0 0 0 0 0 0 0 0 0
36734 - 0 0 0 0 0 0 0 0 0 0 0 0
36735 - 0 0 0 0 0 0 0 0 0 14 14 14
36736 - 46 46 46 86 86 86 2 2 6 54 54 54
36737 -218 218 218 195 195 195 226 226 226 246 246 246
36738 - 58 58 58 2 2 6 2 2 6 30 30 30
36739 -210 210 210 253 253 253 174 174 174 123 123 123
36740 -221 221 221 234 234 234 74 74 74 2 2 6
36741 - 2 2 6 2 2 6 2 2 6 2 2 6
36742 - 70 70 70 58 58 58 22 22 22 6 6 6
36743 - 0 0 0 0 0 0 0 0 0 0 0 0
36744 - 0 0 0 0 0 0 0 0 0 0 0 0
36745 - 0 0 0 0 0 0 0 0 0 0 0 0
36746 - 0 0 0 0 0 0 0 0 0 0 0 0
36747 - 0 0 0 0 0 0 0 0 0 0 0 0
36748 - 0 0 0 0 0 0 0 0 0 0 0 0
36749 - 0 0 0 0 0 0 0 0 0 0 0 0
36750 - 0 0 0 0 0 0 0 0 0 0 0 0
36751 - 0 0 0 0 0 0 0 0 0 0 0 0
36752 - 0 0 0 0 0 0 0 0 0 0 0 0
36753 - 0 0 0 0 0 0 0 0 0 0 0 0
36754 - 0 0 0 0 0 0 0 0 0 0 0 0
36755 - 0 0 0 0 0 0 0 0 0 14 14 14
36756 - 46 46 46 82 82 82 2 2 6 106 106 106
36757 -170 170 170 26 26 26 86 86 86 226 226 226
36758 -123 123 123 10 10 10 14 14 14 46 46 46
36759 -231 231 231 190 190 190 6 6 6 70 70 70
36760 - 90 90 90 238 238 238 158 158 158 2 2 6
36761 - 2 2 6 2 2 6 2 2 6 2 2 6
36762 - 70 70 70 58 58 58 22 22 22 6 6 6
36763 - 0 0 0 0 0 0 0 0 0 0 0 0
36764 - 0 0 0 0 0 0 0 0 0 0 0 0
36765 - 0 0 0 0 0 0 0 0 0 0 0 0
36766 - 0 0 0 0 0 0 0 0 0 0 0 0
36767 - 0 0 0 0 0 0 0 0 0 0 0 0
36768 - 0 0 0 0 0 0 0 0 0 0 0 0
36769 - 0 0 0 0 0 0 0 0 1 0 0 0
36770 - 0 0 1 0 0 1 0 0 1 0 0 0
36771 - 0 0 0 0 0 0 0 0 0 0 0 0
36772 - 0 0 0 0 0 0 0 0 0 0 0 0
36773 - 0 0 0 0 0 0 0 0 0 0 0 0
36774 - 0 0 0 0 0 0 0 0 0 0 0 0
36775 - 0 0 0 0 0 0 0 0 0 14 14 14
36776 - 42 42 42 86 86 86 6 6 6 116 116 116
36777 -106 106 106 6 6 6 70 70 70 149 149 149
36778 -128 128 128 18 18 18 38 38 38 54 54 54
36779 -221 221 221 106 106 106 2 2 6 14 14 14
36780 - 46 46 46 190 190 190 198 198 198 2 2 6
36781 - 2 2 6 2 2 6 2 2 6 2 2 6
36782 - 74 74 74 62 62 62 22 22 22 6 6 6
36783 - 0 0 0 0 0 0 0 0 0 0 0 0
36784 - 0 0 0 0 0 0 0 0 0 0 0 0
36785 - 0 0 0 0 0 0 0 0 0 0 0 0
36786 - 0 0 0 0 0 0 0 0 0 0 0 0
36787 - 0 0 0 0 0 0 0 0 0 0 0 0
36788 - 0 0 0 0 0 0 0 0 0 0 0 0
36789 - 0 0 0 0 0 0 0 0 1 0 0 0
36790 - 0 0 1 0 0 0 0 0 1 0 0 0
36791 - 0 0 0 0 0 0 0 0 0 0 0 0
36792 - 0 0 0 0 0 0 0 0 0 0 0 0
36793 - 0 0 0 0 0 0 0 0 0 0 0 0
36794 - 0 0 0 0 0 0 0 0 0 0 0 0
36795 - 0 0 0 0 0 0 0 0 0 14 14 14
36796 - 42 42 42 94 94 94 14 14 14 101 101 101
36797 -128 128 128 2 2 6 18 18 18 116 116 116
36798 -118 98 46 121 92 8 121 92 8 98 78 10
36799 -162 162 162 106 106 106 2 2 6 2 2 6
36800 - 2 2 6 195 195 195 195 195 195 6 6 6
36801 - 2 2 6 2 2 6 2 2 6 2 2 6
36802 - 74 74 74 62 62 62 22 22 22 6 6 6
36803 - 0 0 0 0 0 0 0 0 0 0 0 0
36804 - 0 0 0 0 0 0 0 0 0 0 0 0
36805 - 0 0 0 0 0 0 0 0 0 0 0 0
36806 - 0 0 0 0 0 0 0 0 0 0 0 0
36807 - 0 0 0 0 0 0 0 0 0 0 0 0
36808 - 0 0 0 0 0 0 0 0 0 0 0 0
36809 - 0 0 0 0 0 0 0 0 1 0 0 1
36810 - 0 0 1 0 0 0 0 0 1 0 0 0
36811 - 0 0 0 0 0 0 0 0 0 0 0 0
36812 - 0 0 0 0 0 0 0 0 0 0 0 0
36813 - 0 0 0 0 0 0 0 0 0 0 0 0
36814 - 0 0 0 0 0 0 0 0 0 0 0 0
36815 - 0 0 0 0 0 0 0 0 0 10 10 10
36816 - 38 38 38 90 90 90 14 14 14 58 58 58
36817 -210 210 210 26 26 26 54 38 6 154 114 10
36818 -226 170 11 236 186 11 225 175 15 184 144 12
36819 -215 174 15 175 146 61 37 26 9 2 2 6
36820 - 70 70 70 246 246 246 138 138 138 2 2 6
36821 - 2 2 6 2 2 6 2 2 6 2 2 6
36822 - 70 70 70 66 66 66 26 26 26 6 6 6
36823 - 0 0 0 0 0 0 0 0 0 0 0 0
36824 - 0 0 0 0 0 0 0 0 0 0 0 0
36825 - 0 0 0 0 0 0 0 0 0 0 0 0
36826 - 0 0 0 0 0 0 0 0 0 0 0 0
36827 - 0 0 0 0 0 0 0 0 0 0 0 0
36828 - 0 0 0 0 0 0 0 0 0 0 0 0
36829 - 0 0 0 0 0 0 0 0 0 0 0 0
36830 - 0 0 0 0 0 0 0 0 0 0 0 0
36831 - 0 0 0 0 0 0 0 0 0 0 0 0
36832 - 0 0 0 0 0 0 0 0 0 0 0 0
36833 - 0 0 0 0 0 0 0 0 0 0 0 0
36834 - 0 0 0 0 0 0 0 0 0 0 0 0
36835 - 0 0 0 0 0 0 0 0 0 10 10 10
36836 - 38 38 38 86 86 86 14 14 14 10 10 10
36837 -195 195 195 188 164 115 192 133 9 225 175 15
36838 -239 182 13 234 190 10 232 195 16 232 200 30
36839 -245 207 45 241 208 19 232 195 16 184 144 12
36840 -218 194 134 211 206 186 42 42 42 2 2 6
36841 - 2 2 6 2 2 6 2 2 6 2 2 6
36842 - 50 50 50 74 74 74 30 30 30 6 6 6
36843 - 0 0 0 0 0 0 0 0 0 0 0 0
36844 - 0 0 0 0 0 0 0 0 0 0 0 0
36845 - 0 0 0 0 0 0 0 0 0 0 0 0
36846 - 0 0 0 0 0 0 0 0 0 0 0 0
36847 - 0 0 0 0 0 0 0 0 0 0 0 0
36848 - 0 0 0 0 0 0 0 0 0 0 0 0
36849 - 0 0 0 0 0 0 0 0 0 0 0 0
36850 - 0 0 0 0 0 0 0 0 0 0 0 0
36851 - 0 0 0 0 0 0 0 0 0 0 0 0
36852 - 0 0 0 0 0 0 0 0 0 0 0 0
36853 - 0 0 0 0 0 0 0 0 0 0 0 0
36854 - 0 0 0 0 0 0 0 0 0 0 0 0
36855 - 0 0 0 0 0 0 0 0 0 10 10 10
36856 - 34 34 34 86 86 86 14 14 14 2 2 6
36857 -121 87 25 192 133 9 219 162 10 239 182 13
36858 -236 186 11 232 195 16 241 208 19 244 214 54
36859 -246 218 60 246 218 38 246 215 20 241 208 19
36860 -241 208 19 226 184 13 121 87 25 2 2 6
36861 - 2 2 6 2 2 6 2 2 6 2 2 6
36862 - 50 50 50 82 82 82 34 34 34 10 10 10
36863 - 0 0 0 0 0 0 0 0 0 0 0 0
36864 - 0 0 0 0 0 0 0 0 0 0 0 0
36865 - 0 0 0 0 0 0 0 0 0 0 0 0
36866 - 0 0 0 0 0 0 0 0 0 0 0 0
36867 - 0 0 0 0 0 0 0 0 0 0 0 0
36868 - 0 0 0 0 0 0 0 0 0 0 0 0
36869 - 0 0 0 0 0 0 0 0 0 0 0 0
36870 - 0 0 0 0 0 0 0 0 0 0 0 0
36871 - 0 0 0 0 0 0 0 0 0 0 0 0
36872 - 0 0 0 0 0 0 0 0 0 0 0 0
36873 - 0 0 0 0 0 0 0 0 0 0 0 0
36874 - 0 0 0 0 0 0 0 0 0 0 0 0
36875 - 0 0 0 0 0 0 0 0 0 10 10 10
36876 - 34 34 34 82 82 82 30 30 30 61 42 6
36877 -180 123 7 206 145 10 230 174 11 239 182 13
36878 -234 190 10 238 202 15 241 208 19 246 218 74
36879 -246 218 38 246 215 20 246 215 20 246 215 20
36880 -226 184 13 215 174 15 184 144 12 6 6 6
36881 - 2 2 6 2 2 6 2 2 6 2 2 6
36882 - 26 26 26 94 94 94 42 42 42 14 14 14
36883 - 0 0 0 0 0 0 0 0 0 0 0 0
36884 - 0 0 0 0 0 0 0 0 0 0 0 0
36885 - 0 0 0 0 0 0 0 0 0 0 0 0
36886 - 0 0 0 0 0 0 0 0 0 0 0 0
36887 - 0 0 0 0 0 0 0 0 0 0 0 0
36888 - 0 0 0 0 0 0 0 0 0 0 0 0
36889 - 0 0 0 0 0 0 0 0 0 0 0 0
36890 - 0 0 0 0 0 0 0 0 0 0 0 0
36891 - 0 0 0 0 0 0 0 0 0 0 0 0
36892 - 0 0 0 0 0 0 0 0 0 0 0 0
36893 - 0 0 0 0 0 0 0 0 0 0 0 0
36894 - 0 0 0 0 0 0 0 0 0 0 0 0
36895 - 0 0 0 0 0 0 0 0 0 10 10 10
36896 - 30 30 30 78 78 78 50 50 50 104 69 6
36897 -192 133 9 216 158 10 236 178 12 236 186 11
36898 -232 195 16 241 208 19 244 214 54 245 215 43
36899 -246 215 20 246 215 20 241 208 19 198 155 10
36900 -200 144 11 216 158 10 156 118 10 2 2 6
36901 - 2 2 6 2 2 6 2 2 6 2 2 6
36902 - 6 6 6 90 90 90 54 54 54 18 18 18
36903 - 6 6 6 0 0 0 0 0 0 0 0 0
36904 - 0 0 0 0 0 0 0 0 0 0 0 0
36905 - 0 0 0 0 0 0 0 0 0 0 0 0
36906 - 0 0 0 0 0 0 0 0 0 0 0 0
36907 - 0 0 0 0 0 0 0 0 0 0 0 0
36908 - 0 0 0 0 0 0 0 0 0 0 0 0
36909 - 0 0 0 0 0 0 0 0 0 0 0 0
36910 - 0 0 0 0 0 0 0 0 0 0 0 0
36911 - 0 0 0 0 0 0 0 0 0 0 0 0
36912 - 0 0 0 0 0 0 0 0 0 0 0 0
36913 - 0 0 0 0 0 0 0 0 0 0 0 0
36914 - 0 0 0 0 0 0 0 0 0 0 0 0
36915 - 0 0 0 0 0 0 0 0 0 10 10 10
36916 - 30 30 30 78 78 78 46 46 46 22 22 22
36917 -137 92 6 210 162 10 239 182 13 238 190 10
36918 -238 202 15 241 208 19 246 215 20 246 215 20
36919 -241 208 19 203 166 17 185 133 11 210 150 10
36920 -216 158 10 210 150 10 102 78 10 2 2 6
36921 - 6 6 6 54 54 54 14 14 14 2 2 6
36922 - 2 2 6 62 62 62 74 74 74 30 30 30
36923 - 10 10 10 0 0 0 0 0 0 0 0 0
36924 - 0 0 0 0 0 0 0 0 0 0 0 0
36925 - 0 0 0 0 0 0 0 0 0 0 0 0
36926 - 0 0 0 0 0 0 0 0 0 0 0 0
36927 - 0 0 0 0 0 0 0 0 0 0 0 0
36928 - 0 0 0 0 0 0 0 0 0 0 0 0
36929 - 0 0 0 0 0 0 0 0 0 0 0 0
36930 - 0 0 0 0 0 0 0 0 0 0 0 0
36931 - 0 0 0 0 0 0 0 0 0 0 0 0
36932 - 0 0 0 0 0 0 0 0 0 0 0 0
36933 - 0 0 0 0 0 0 0 0 0 0 0 0
36934 - 0 0 0 0 0 0 0 0 0 0 0 0
36935 - 0 0 0 0 0 0 0 0 0 10 10 10
36936 - 34 34 34 78 78 78 50 50 50 6 6 6
36937 - 94 70 30 139 102 15 190 146 13 226 184 13
36938 -232 200 30 232 195 16 215 174 15 190 146 13
36939 -168 122 10 192 133 9 210 150 10 213 154 11
36940 -202 150 34 182 157 106 101 98 89 2 2 6
36941 - 2 2 6 78 78 78 116 116 116 58 58 58
36942 - 2 2 6 22 22 22 90 90 90 46 46 46
36943 - 18 18 18 6 6 6 0 0 0 0 0 0
36944 - 0 0 0 0 0 0 0 0 0 0 0 0
36945 - 0 0 0 0 0 0 0 0 0 0 0 0
36946 - 0 0 0 0 0 0 0 0 0 0 0 0
36947 - 0 0 0 0 0 0 0 0 0 0 0 0
36948 - 0 0 0 0 0 0 0 0 0 0 0 0
36949 - 0 0 0 0 0 0 0 0 0 0 0 0
36950 - 0 0 0 0 0 0 0 0 0 0 0 0
36951 - 0 0 0 0 0 0 0 0 0 0 0 0
36952 - 0 0 0 0 0 0 0 0 0 0 0 0
36953 - 0 0 0 0 0 0 0 0 0 0 0 0
36954 - 0 0 0 0 0 0 0 0 0 0 0 0
36955 - 0 0 0 0 0 0 0 0 0 10 10 10
36956 - 38 38 38 86 86 86 50 50 50 6 6 6
36957 -128 128 128 174 154 114 156 107 11 168 122 10
36958 -198 155 10 184 144 12 197 138 11 200 144 11
36959 -206 145 10 206 145 10 197 138 11 188 164 115
36960 -195 195 195 198 198 198 174 174 174 14 14 14
36961 - 2 2 6 22 22 22 116 116 116 116 116 116
36962 - 22 22 22 2 2 6 74 74 74 70 70 70
36963 - 30 30 30 10 10 10 0 0 0 0 0 0
36964 - 0 0 0 0 0 0 0 0 0 0 0 0
36965 - 0 0 0 0 0 0 0 0 0 0 0 0
36966 - 0 0 0 0 0 0 0 0 0 0 0 0
36967 - 0 0 0 0 0 0 0 0 0 0 0 0
36968 - 0 0 0 0 0 0 0 0 0 0 0 0
36969 - 0 0 0 0 0 0 0 0 0 0 0 0
36970 - 0 0 0 0 0 0 0 0 0 0 0 0
36971 - 0 0 0 0 0 0 0 0 0 0 0 0
36972 - 0 0 0 0 0 0 0 0 0 0 0 0
36973 - 0 0 0 0 0 0 0 0 0 0 0 0
36974 - 0 0 0 0 0 0 0 0 0 0 0 0
36975 - 0 0 0 0 0 0 6 6 6 18 18 18
36976 - 50 50 50 101 101 101 26 26 26 10 10 10
36977 -138 138 138 190 190 190 174 154 114 156 107 11
36978 -197 138 11 200 144 11 197 138 11 192 133 9
36979 -180 123 7 190 142 34 190 178 144 187 187 187
36980 -202 202 202 221 221 221 214 214 214 66 66 66
36981 - 2 2 6 2 2 6 50 50 50 62 62 62
36982 - 6 6 6 2 2 6 10 10 10 90 90 90
36983 - 50 50 50 18 18 18 6 6 6 0 0 0
36984 - 0 0 0 0 0 0 0 0 0 0 0 0
36985 - 0 0 0 0 0 0 0 0 0 0 0 0
36986 - 0 0 0 0 0 0 0 0 0 0 0 0
36987 - 0 0 0 0 0 0 0 0 0 0 0 0
36988 - 0 0 0 0 0 0 0 0 0 0 0 0
36989 - 0 0 0 0 0 0 0 0 0 0 0 0
36990 - 0 0 0 0 0 0 0 0 0 0 0 0
36991 - 0 0 0 0 0 0 0 0 0 0 0 0
36992 - 0 0 0 0 0 0 0 0 0 0 0 0
36993 - 0 0 0 0 0 0 0 0 0 0 0 0
36994 - 0 0 0 0 0 0 0 0 0 0 0 0
36995 - 0 0 0 0 0 0 10 10 10 34 34 34
36996 - 74 74 74 74 74 74 2 2 6 6 6 6
36997 -144 144 144 198 198 198 190 190 190 178 166 146
36998 -154 121 60 156 107 11 156 107 11 168 124 44
36999 -174 154 114 187 187 187 190 190 190 210 210 210
37000 -246 246 246 253 253 253 253 253 253 182 182 182
37001 - 6 6 6 2 2 6 2 2 6 2 2 6
37002 - 2 2 6 2 2 6 2 2 6 62 62 62
37003 - 74 74 74 34 34 34 14 14 14 0 0 0
37004 - 0 0 0 0 0 0 0 0 0 0 0 0
37005 - 0 0 0 0 0 0 0 0 0 0 0 0
37006 - 0 0 0 0 0 0 0 0 0 0 0 0
37007 - 0 0 0 0 0 0 0 0 0 0 0 0
37008 - 0 0 0 0 0 0 0 0 0 0 0 0
37009 - 0 0 0 0 0 0 0 0 0 0 0 0
37010 - 0 0 0 0 0 0 0 0 0 0 0 0
37011 - 0 0 0 0 0 0 0 0 0 0 0 0
37012 - 0 0 0 0 0 0 0 0 0 0 0 0
37013 - 0 0 0 0 0 0 0 0 0 0 0 0
37014 - 0 0 0 0 0 0 0 0 0 0 0 0
37015 - 0 0 0 10 10 10 22 22 22 54 54 54
37016 - 94 94 94 18 18 18 2 2 6 46 46 46
37017 -234 234 234 221 221 221 190 190 190 190 190 190
37018 -190 190 190 187 187 187 187 187 187 190 190 190
37019 -190 190 190 195 195 195 214 214 214 242 242 242
37020 -253 253 253 253 253 253 253 253 253 253 253 253
37021 - 82 82 82 2 2 6 2 2 6 2 2 6
37022 - 2 2 6 2 2 6 2 2 6 14 14 14
37023 - 86 86 86 54 54 54 22 22 22 6 6 6
37024 - 0 0 0 0 0 0 0 0 0 0 0 0
37025 - 0 0 0 0 0 0 0 0 0 0 0 0
37026 - 0 0 0 0 0 0 0 0 0 0 0 0
37027 - 0 0 0 0 0 0 0 0 0 0 0 0
37028 - 0 0 0 0 0 0 0 0 0 0 0 0
37029 - 0 0 0 0 0 0 0 0 0 0 0 0
37030 - 0 0 0 0 0 0 0 0 0 0 0 0
37031 - 0 0 0 0 0 0 0 0 0 0 0 0
37032 - 0 0 0 0 0 0 0 0 0 0 0 0
37033 - 0 0 0 0 0 0 0 0 0 0 0 0
37034 - 0 0 0 0 0 0 0 0 0 0 0 0
37035 - 6 6 6 18 18 18 46 46 46 90 90 90
37036 - 46 46 46 18 18 18 6 6 6 182 182 182
37037 -253 253 253 246 246 246 206 206 206 190 190 190
37038 -190 190 190 190 190 190 190 190 190 190 190 190
37039 -206 206 206 231 231 231 250 250 250 253 253 253
37040 -253 253 253 253 253 253 253 253 253 253 253 253
37041 -202 202 202 14 14 14 2 2 6 2 2 6
37042 - 2 2 6 2 2 6 2 2 6 2 2 6
37043 - 42 42 42 86 86 86 42 42 42 18 18 18
37044 - 6 6 6 0 0 0 0 0 0 0 0 0
37045 - 0 0 0 0 0 0 0 0 0 0 0 0
37046 - 0 0 0 0 0 0 0 0 0 0 0 0
37047 - 0 0 0 0 0 0 0 0 0 0 0 0
37048 - 0 0 0 0 0 0 0 0 0 0 0 0
37049 - 0 0 0 0 0 0 0 0 0 0 0 0
37050 - 0 0 0 0 0 0 0 0 0 0 0 0
37051 - 0 0 0 0 0 0 0 0 0 0 0 0
37052 - 0 0 0 0 0 0 0 0 0 0 0 0
37053 - 0 0 0 0 0 0 0 0 0 0 0 0
37054 - 0 0 0 0 0 0 0 0 0 6 6 6
37055 - 14 14 14 38 38 38 74 74 74 66 66 66
37056 - 2 2 6 6 6 6 90 90 90 250 250 250
37057 -253 253 253 253 253 253 238 238 238 198 198 198
37058 -190 190 190 190 190 190 195 195 195 221 221 221
37059 -246 246 246 253 253 253 253 253 253 253 253 253
37060 -253 253 253 253 253 253 253 253 253 253 253 253
37061 -253 253 253 82 82 82 2 2 6 2 2 6
37062 - 2 2 6 2 2 6 2 2 6 2 2 6
37063 - 2 2 6 78 78 78 70 70 70 34 34 34
37064 - 14 14 14 6 6 6 0 0 0 0 0 0
37065 - 0 0 0 0 0 0 0 0 0 0 0 0
37066 - 0 0 0 0 0 0 0 0 0 0 0 0
37067 - 0 0 0 0 0 0 0 0 0 0 0 0
37068 - 0 0 0 0 0 0 0 0 0 0 0 0
37069 - 0 0 0 0 0 0 0 0 0 0 0 0
37070 - 0 0 0 0 0 0 0 0 0 0 0 0
37071 - 0 0 0 0 0 0 0 0 0 0 0 0
37072 - 0 0 0 0 0 0 0 0 0 0 0 0
37073 - 0 0 0 0 0 0 0 0 0 0 0 0
37074 - 0 0 0 0 0 0 0 0 0 14 14 14
37075 - 34 34 34 66 66 66 78 78 78 6 6 6
37076 - 2 2 6 18 18 18 218 218 218 253 253 253
37077 -253 253 253 253 253 253 253 253 253 246 246 246
37078 -226 226 226 231 231 231 246 246 246 253 253 253
37079 -253 253 253 253 253 253 253 253 253 253 253 253
37080 -253 253 253 253 253 253 253 253 253 253 253 253
37081 -253 253 253 178 178 178 2 2 6 2 2 6
37082 - 2 2 6 2 2 6 2 2 6 2 2 6
37083 - 2 2 6 18 18 18 90 90 90 62 62 62
37084 - 30 30 30 10 10 10 0 0 0 0 0 0
37085 - 0 0 0 0 0 0 0 0 0 0 0 0
37086 - 0 0 0 0 0 0 0 0 0 0 0 0
37087 - 0 0 0 0 0 0 0 0 0 0 0 0
37088 - 0 0 0 0 0 0 0 0 0 0 0 0
37089 - 0 0 0 0 0 0 0 0 0 0 0 0
37090 - 0 0 0 0 0 0 0 0 0 0 0 0
37091 - 0 0 0 0 0 0 0 0 0 0 0 0
37092 - 0 0 0 0 0 0 0 0 0 0 0 0
37093 - 0 0 0 0 0 0 0 0 0 0 0 0
37094 - 0 0 0 0 0 0 10 10 10 26 26 26
37095 - 58 58 58 90 90 90 18 18 18 2 2 6
37096 - 2 2 6 110 110 110 253 253 253 253 253 253
37097 -253 253 253 253 253 253 253 253 253 253 253 253
37098 -250 250 250 253 253 253 253 253 253 253 253 253
37099 -253 253 253 253 253 253 253 253 253 253 253 253
37100 -253 253 253 253 253 253 253 253 253 253 253 253
37101 -253 253 253 231 231 231 18 18 18 2 2 6
37102 - 2 2 6 2 2 6 2 2 6 2 2 6
37103 - 2 2 6 2 2 6 18 18 18 94 94 94
37104 - 54 54 54 26 26 26 10 10 10 0 0 0
37105 - 0 0 0 0 0 0 0 0 0 0 0 0
37106 - 0 0 0 0 0 0 0 0 0 0 0 0
37107 - 0 0 0 0 0 0 0 0 0 0 0 0
37108 - 0 0 0 0 0 0 0 0 0 0 0 0
37109 - 0 0 0 0 0 0 0 0 0 0 0 0
37110 - 0 0 0 0 0 0 0 0 0 0 0 0
37111 - 0 0 0 0 0 0 0 0 0 0 0 0
37112 - 0 0 0 0 0 0 0 0 0 0 0 0
37113 - 0 0 0 0 0 0 0 0 0 0 0 0
37114 - 0 0 0 6 6 6 22 22 22 50 50 50
37115 - 90 90 90 26 26 26 2 2 6 2 2 6
37116 - 14 14 14 195 195 195 250 250 250 253 253 253
37117 -253 253 253 253 253 253 253 253 253 253 253 253
37118 -253 253 253 253 253 253 253 253 253 253 253 253
37119 -253 253 253 253 253 253 253 253 253 253 253 253
37120 -253 253 253 253 253 253 253 253 253 253 253 253
37121 -250 250 250 242 242 242 54 54 54 2 2 6
37122 - 2 2 6 2 2 6 2 2 6 2 2 6
37123 - 2 2 6 2 2 6 2 2 6 38 38 38
37124 - 86 86 86 50 50 50 22 22 22 6 6 6
37125 - 0 0 0 0 0 0 0 0 0 0 0 0
37126 - 0 0 0 0 0 0 0 0 0 0 0 0
37127 - 0 0 0 0 0 0 0 0 0 0 0 0
37128 - 0 0 0 0 0 0 0 0 0 0 0 0
37129 - 0 0 0 0 0 0 0 0 0 0 0 0
37130 - 0 0 0 0 0 0 0 0 0 0 0 0
37131 - 0 0 0 0 0 0 0 0 0 0 0 0
37132 - 0 0 0 0 0 0 0 0 0 0 0 0
37133 - 0 0 0 0 0 0 0 0 0 0 0 0
37134 - 6 6 6 14 14 14 38 38 38 82 82 82
37135 - 34 34 34 2 2 6 2 2 6 2 2 6
37136 - 42 42 42 195 195 195 246 246 246 253 253 253
37137 -253 253 253 253 253 253 253 253 253 250 250 250
37138 -242 242 242 242 242 242 250 250 250 253 253 253
37139 -253 253 253 253 253 253 253 253 253 253 253 253
37140 -253 253 253 250 250 250 246 246 246 238 238 238
37141 -226 226 226 231 231 231 101 101 101 6 6 6
37142 - 2 2 6 2 2 6 2 2 6 2 2 6
37143 - 2 2 6 2 2 6 2 2 6 2 2 6
37144 - 38 38 38 82 82 82 42 42 42 14 14 14
37145 - 6 6 6 0 0 0 0 0 0 0 0 0
37146 - 0 0 0 0 0 0 0 0 0 0 0 0
37147 - 0 0 0 0 0 0 0 0 0 0 0 0
37148 - 0 0 0 0 0 0 0 0 0 0 0 0
37149 - 0 0 0 0 0 0 0 0 0 0 0 0
37150 - 0 0 0 0 0 0 0 0 0 0 0 0
37151 - 0 0 0 0 0 0 0 0 0 0 0 0
37152 - 0 0 0 0 0 0 0 0 0 0 0 0
37153 - 0 0 0 0 0 0 0 0 0 0 0 0
37154 - 10 10 10 26 26 26 62 62 62 66 66 66
37155 - 2 2 6 2 2 6 2 2 6 6 6 6
37156 - 70 70 70 170 170 170 206 206 206 234 234 234
37157 -246 246 246 250 250 250 250 250 250 238 238 238
37158 -226 226 226 231 231 231 238 238 238 250 250 250
37159 -250 250 250 250 250 250 246 246 246 231 231 231
37160 -214 214 214 206 206 206 202 202 202 202 202 202
37161 -198 198 198 202 202 202 182 182 182 18 18 18
37162 - 2 2 6 2 2 6 2 2 6 2 2 6
37163 - 2 2 6 2 2 6 2 2 6 2 2 6
37164 - 2 2 6 62 62 62 66 66 66 30 30 30
37165 - 10 10 10 0 0 0 0 0 0 0 0 0
37166 - 0 0 0 0 0 0 0 0 0 0 0 0
37167 - 0 0 0 0 0 0 0 0 0 0 0 0
37168 - 0 0 0 0 0 0 0 0 0 0 0 0
37169 - 0 0 0 0 0 0 0 0 0 0 0 0
37170 - 0 0 0 0 0 0 0 0 0 0 0 0
37171 - 0 0 0 0 0 0 0 0 0 0 0 0
37172 - 0 0 0 0 0 0 0 0 0 0 0 0
37173 - 0 0 0 0 0 0 0 0 0 0 0 0
37174 - 14 14 14 42 42 42 82 82 82 18 18 18
37175 - 2 2 6 2 2 6 2 2 6 10 10 10
37176 - 94 94 94 182 182 182 218 218 218 242 242 242
37177 -250 250 250 253 253 253 253 253 253 250 250 250
37178 -234 234 234 253 253 253 253 253 253 253 253 253
37179 -253 253 253 253 253 253 253 253 253 246 246 246
37180 -238 238 238 226 226 226 210 210 210 202 202 202
37181 -195 195 195 195 195 195 210 210 210 158 158 158
37182 - 6 6 6 14 14 14 50 50 50 14 14 14
37183 - 2 2 6 2 2 6 2 2 6 2 2 6
37184 - 2 2 6 6 6 6 86 86 86 46 46 46
37185 - 18 18 18 6 6 6 0 0 0 0 0 0
37186 - 0 0 0 0 0 0 0 0 0 0 0 0
37187 - 0 0 0 0 0 0 0 0 0 0 0 0
37188 - 0 0 0 0 0 0 0 0 0 0 0 0
37189 - 0 0 0 0 0 0 0 0 0 0 0 0
37190 - 0 0 0 0 0 0 0 0 0 0 0 0
37191 - 0 0 0 0 0 0 0 0 0 0 0 0
37192 - 0 0 0 0 0 0 0 0 0 0 0 0
37193 - 0 0 0 0 0 0 0 0 0 6 6 6
37194 - 22 22 22 54 54 54 70 70 70 2 2 6
37195 - 2 2 6 10 10 10 2 2 6 22 22 22
37196 -166 166 166 231 231 231 250 250 250 253 253 253
37197 -253 253 253 253 253 253 253 253 253 250 250 250
37198 -242 242 242 253 253 253 253 253 253 253 253 253
37199 -253 253 253 253 253 253 253 253 253 253 253 253
37200 -253 253 253 253 253 253 253 253 253 246 246 246
37201 -231 231 231 206 206 206 198 198 198 226 226 226
37202 - 94 94 94 2 2 6 6 6 6 38 38 38
37203 - 30 30 30 2 2 6 2 2 6 2 2 6
37204 - 2 2 6 2 2 6 62 62 62 66 66 66
37205 - 26 26 26 10 10 10 0 0 0 0 0 0
37206 - 0 0 0 0 0 0 0 0 0 0 0 0
37207 - 0 0 0 0 0 0 0 0 0 0 0 0
37208 - 0 0 0 0 0 0 0 0 0 0 0 0
37209 - 0 0 0 0 0 0 0 0 0 0 0 0
37210 - 0 0 0 0 0 0 0 0 0 0 0 0
37211 - 0 0 0 0 0 0 0 0 0 0 0 0
37212 - 0 0 0 0 0 0 0 0 0 0 0 0
37213 - 0 0 0 0 0 0 0 0 0 10 10 10
37214 - 30 30 30 74 74 74 50 50 50 2 2 6
37215 - 26 26 26 26 26 26 2 2 6 106 106 106
37216 -238 238 238 253 253 253 253 253 253 253 253 253
37217 -253 253 253 253 253 253 253 253 253 253 253 253
37218 -253 253 253 253 253 253 253 253 253 253 253 253
37219 -253 253 253 253 253 253 253 253 253 253 253 253
37220 -253 253 253 253 253 253 253 253 253 253 253 253
37221 -253 253 253 246 246 246 218 218 218 202 202 202
37222 -210 210 210 14 14 14 2 2 6 2 2 6
37223 - 30 30 30 22 22 22 2 2 6 2 2 6
37224 - 2 2 6 2 2 6 18 18 18 86 86 86
37225 - 42 42 42 14 14 14 0 0 0 0 0 0
37226 - 0 0 0 0 0 0 0 0 0 0 0 0
37227 - 0 0 0 0 0 0 0 0 0 0 0 0
37228 - 0 0 0 0 0 0 0 0 0 0 0 0
37229 - 0 0 0 0 0 0 0 0 0 0 0 0
37230 - 0 0 0 0 0 0 0 0 0 0 0 0
37231 - 0 0 0 0 0 0 0 0 0 0 0 0
37232 - 0 0 0 0 0 0 0 0 0 0 0 0
37233 - 0 0 0 0 0 0 0 0 0 14 14 14
37234 - 42 42 42 90 90 90 22 22 22 2 2 6
37235 - 42 42 42 2 2 6 18 18 18 218 218 218
37236 -253 253 253 253 253 253 253 253 253 253 253 253
37237 -253 253 253 253 253 253 253 253 253 253 253 253
37238 -253 253 253 253 253 253 253 253 253 253 253 253
37239 -253 253 253 253 253 253 253 253 253 253 253 253
37240 -253 253 253 253 253 253 253 253 253 253 253 253
37241 -253 253 253 253 253 253 250 250 250 221 221 221
37242 -218 218 218 101 101 101 2 2 6 14 14 14
37243 - 18 18 18 38 38 38 10 10 10 2 2 6
37244 - 2 2 6 2 2 6 2 2 6 78 78 78
37245 - 58 58 58 22 22 22 6 6 6 0 0 0
37246 - 0 0 0 0 0 0 0 0 0 0 0 0
37247 - 0 0 0 0 0 0 0 0 0 0 0 0
37248 - 0 0 0 0 0 0 0 0 0 0 0 0
37249 - 0 0 0 0 0 0 0 0 0 0 0 0
37250 - 0 0 0 0 0 0 0 0 0 0 0 0
37251 - 0 0 0 0 0 0 0 0 0 0 0 0
37252 - 0 0 0 0 0 0 0 0 0 0 0 0
37253 - 0 0 0 0 0 0 6 6 6 18 18 18
37254 - 54 54 54 82 82 82 2 2 6 26 26 26
37255 - 22 22 22 2 2 6 123 123 123 253 253 253
37256 -253 253 253 253 253 253 253 253 253 253 253 253
37257 -253 253 253 253 253 253 253 253 253 253 253 253
37258 -253 253 253 253 253 253 253 253 253 253 253 253
37259 -253 253 253 253 253 253 253 253 253 253 253 253
37260 -253 253 253 253 253 253 253 253 253 253 253 253
37261 -253 253 253 253 253 253 253 253 253 250 250 250
37262 -238 238 238 198 198 198 6 6 6 38 38 38
37263 - 58 58 58 26 26 26 38 38 38 2 2 6
37264 - 2 2 6 2 2 6 2 2 6 46 46 46
37265 - 78 78 78 30 30 30 10 10 10 0 0 0
37266 - 0 0 0 0 0 0 0 0 0 0 0 0
37267 - 0 0 0 0 0 0 0 0 0 0 0 0
37268 - 0 0 0 0 0 0 0 0 0 0 0 0
37269 - 0 0 0 0 0 0 0 0 0 0 0 0
37270 - 0 0 0 0 0 0 0 0 0 0 0 0
37271 - 0 0 0 0 0 0 0 0 0 0 0 0
37272 - 0 0 0 0 0 0 0 0 0 0 0 0
37273 - 0 0 0 0 0 0 10 10 10 30 30 30
37274 - 74 74 74 58 58 58 2 2 6 42 42 42
37275 - 2 2 6 22 22 22 231 231 231 253 253 253
37276 -253 253 253 253 253 253 253 253 253 253 253 253
37277 -253 253 253 253 253 253 253 253 253 250 250 250
37278 -253 253 253 253 253 253 253 253 253 253 253 253
37279 -253 253 253 253 253 253 253 253 253 253 253 253
37280 -253 253 253 253 253 253 253 253 253 253 253 253
37281 -253 253 253 253 253 253 253 253 253 253 253 253
37282 -253 253 253 246 246 246 46 46 46 38 38 38
37283 - 42 42 42 14 14 14 38 38 38 14 14 14
37284 - 2 2 6 2 2 6 2 2 6 6 6 6
37285 - 86 86 86 46 46 46 14 14 14 0 0 0
37286 - 0 0 0 0 0 0 0 0 0 0 0 0
37287 - 0 0 0 0 0 0 0 0 0 0 0 0
37288 - 0 0 0 0 0 0 0 0 0 0 0 0
37289 - 0 0 0 0 0 0 0 0 0 0 0 0
37290 - 0 0 0 0 0 0 0 0 0 0 0 0
37291 - 0 0 0 0 0 0 0 0 0 0 0 0
37292 - 0 0 0 0 0 0 0 0 0 0 0 0
37293 - 0 0 0 6 6 6 14 14 14 42 42 42
37294 - 90 90 90 18 18 18 18 18 18 26 26 26
37295 - 2 2 6 116 116 116 253 253 253 253 253 253
37296 -253 253 253 253 253 253 253 253 253 253 253 253
37297 -253 253 253 253 253 253 250 250 250 238 238 238
37298 -253 253 253 253 253 253 253 253 253 253 253 253
37299 -253 253 253 253 253 253 253 253 253 253 253 253
37300 -253 253 253 253 253 253 253 253 253 253 253 253
37301 -253 253 253 253 253 253 253 253 253 253 253 253
37302 -253 253 253 253 253 253 94 94 94 6 6 6
37303 - 2 2 6 2 2 6 10 10 10 34 34 34
37304 - 2 2 6 2 2 6 2 2 6 2 2 6
37305 - 74 74 74 58 58 58 22 22 22 6 6 6
37306 - 0 0 0 0 0 0 0 0 0 0 0 0
37307 - 0 0 0 0 0 0 0 0 0 0 0 0
37308 - 0 0 0 0 0 0 0 0 0 0 0 0
37309 - 0 0 0 0 0 0 0 0 0 0 0 0
37310 - 0 0 0 0 0 0 0 0 0 0 0 0
37311 - 0 0 0 0 0 0 0 0 0 0 0 0
37312 - 0 0 0 0 0 0 0 0 0 0 0 0
37313 - 0 0 0 10 10 10 26 26 26 66 66 66
37314 - 82 82 82 2 2 6 38 38 38 6 6 6
37315 - 14 14 14 210 210 210 253 253 253 253 253 253
37316 -253 253 253 253 253 253 253 253 253 253 253 253
37317 -253 253 253 253 253 253 246 246 246 242 242 242
37318 -253 253 253 253 253 253 253 253 253 253 253 253
37319 -253 253 253 253 253 253 253 253 253 253 253 253
37320 -253 253 253 253 253 253 253 253 253 253 253 253
37321 -253 253 253 253 253 253 253 253 253 253 253 253
37322 -253 253 253 253 253 253 144 144 144 2 2 6
37323 - 2 2 6 2 2 6 2 2 6 46 46 46
37324 - 2 2 6 2 2 6 2 2 6 2 2 6
37325 - 42 42 42 74 74 74 30 30 30 10 10 10
37326 - 0 0 0 0 0 0 0 0 0 0 0 0
37327 - 0 0 0 0 0 0 0 0 0 0 0 0
37328 - 0 0 0 0 0 0 0 0 0 0 0 0
37329 - 0 0 0 0 0 0 0 0 0 0 0 0
37330 - 0 0 0 0 0 0 0 0 0 0 0 0
37331 - 0 0 0 0 0 0 0 0 0 0 0 0
37332 - 0 0 0 0 0 0 0 0 0 0 0 0
37333 - 6 6 6 14 14 14 42 42 42 90 90 90
37334 - 26 26 26 6 6 6 42 42 42 2 2 6
37335 - 74 74 74 250 250 250 253 253 253 253 253 253
37336 -253 253 253 253 253 253 253 253 253 253 253 253
37337 -253 253 253 253 253 253 242 242 242 242 242 242
37338 -253 253 253 253 253 253 253 253 253 253 253 253
37339 -253 253 253 253 253 253 253 253 253 253 253 253
37340 -253 253 253 253 253 253 253 253 253 253 253 253
37341 -253 253 253 253 253 253 253 253 253 253 253 253
37342 -253 253 253 253 253 253 182 182 182 2 2 6
37343 - 2 2 6 2 2 6 2 2 6 46 46 46
37344 - 2 2 6 2 2 6 2 2 6 2 2 6
37345 - 10 10 10 86 86 86 38 38 38 10 10 10
37346 - 0 0 0 0 0 0 0 0 0 0 0 0
37347 - 0 0 0 0 0 0 0 0 0 0 0 0
37348 - 0 0 0 0 0 0 0 0 0 0 0 0
37349 - 0 0 0 0 0 0 0 0 0 0 0 0
37350 - 0 0 0 0 0 0 0 0 0 0 0 0
37351 - 0 0 0 0 0 0 0 0 0 0 0 0
37352 - 0 0 0 0 0 0 0 0 0 0 0 0
37353 - 10 10 10 26 26 26 66 66 66 82 82 82
37354 - 2 2 6 22 22 22 18 18 18 2 2 6
37355 -149 149 149 253 253 253 253 253 253 253 253 253
37356 -253 253 253 253 253 253 253 253 253 253 253 253
37357 -253 253 253 253 253 253 234 234 234 242 242 242
37358 -253 253 253 253 253 253 253 253 253 253 253 253
37359 -253 253 253 253 253 253 253 253 253 253 253 253
37360 -253 253 253 253 253 253 253 253 253 253 253 253
37361 -253 253 253 253 253 253 253 253 253 253 253 253
37362 -253 253 253 253 253 253 206 206 206 2 2 6
37363 - 2 2 6 2 2 6 2 2 6 38 38 38
37364 - 2 2 6 2 2 6 2 2 6 2 2 6
37365 - 6 6 6 86 86 86 46 46 46 14 14 14
37366 - 0 0 0 0 0 0 0 0 0 0 0 0
37367 - 0 0 0 0 0 0 0 0 0 0 0 0
37368 - 0 0 0 0 0 0 0 0 0 0 0 0
37369 - 0 0 0 0 0 0 0 0 0 0 0 0
37370 - 0 0 0 0 0 0 0 0 0 0 0 0
37371 - 0 0 0 0 0 0 0 0 0 0 0 0
37372 - 0 0 0 0 0 0 0 0 0 6 6 6
37373 - 18 18 18 46 46 46 86 86 86 18 18 18
37374 - 2 2 6 34 34 34 10 10 10 6 6 6
37375 -210 210 210 253 253 253 253 253 253 253 253 253
37376 -253 253 253 253 253 253 253 253 253 253 253 253
37377 -253 253 253 253 253 253 234 234 234 242 242 242
37378 -253 253 253 253 253 253 253 253 253 253 253 253
37379 -253 253 253 253 253 253 253 253 253 253 253 253
37380 -253 253 253 253 253 253 253 253 253 253 253 253
37381 -253 253 253 253 253 253 253 253 253 253 253 253
37382 -253 253 253 253 253 253 221 221 221 6 6 6
37383 - 2 2 6 2 2 6 6 6 6 30 30 30
37384 - 2 2 6 2 2 6 2 2 6 2 2 6
37385 - 2 2 6 82 82 82 54 54 54 18 18 18
37386 - 6 6 6 0 0 0 0 0 0 0 0 0
37387 - 0 0 0 0 0 0 0 0 0 0 0 0
37388 - 0 0 0 0 0 0 0 0 0 0 0 0
37389 - 0 0 0 0 0 0 0 0 0 0 0 0
37390 - 0 0 0 0 0 0 0 0 0 0 0 0
37391 - 0 0 0 0 0 0 0 0 0 0 0 0
37392 - 0 0 0 0 0 0 0 0 0 10 10 10
37393 - 26 26 26 66 66 66 62 62 62 2 2 6
37394 - 2 2 6 38 38 38 10 10 10 26 26 26
37395 -238 238 238 253 253 253 253 253 253 253 253 253
37396 -253 253 253 253 253 253 253 253 253 253 253 253
37397 -253 253 253 253 253 253 231 231 231 238 238 238
37398 -253 253 253 253 253 253 253 253 253 253 253 253
37399 -253 253 253 253 253 253 253 253 253 253 253 253
37400 -253 253 253 253 253 253 253 253 253 253 253 253
37401 -253 253 253 253 253 253 253 253 253 253 253 253
37402 -253 253 253 253 253 253 231 231 231 6 6 6
37403 - 2 2 6 2 2 6 10 10 10 30 30 30
37404 - 2 2 6 2 2 6 2 2 6 2 2 6
37405 - 2 2 6 66 66 66 58 58 58 22 22 22
37406 - 6 6 6 0 0 0 0 0 0 0 0 0
37407 - 0 0 0 0 0 0 0 0 0 0 0 0
37408 - 0 0 0 0 0 0 0 0 0 0 0 0
37409 - 0 0 0 0 0 0 0 0 0 0 0 0
37410 - 0 0 0 0 0 0 0 0 0 0 0 0
37411 - 0 0 0 0 0 0 0 0 0 0 0 0
37412 - 0 0 0 0 0 0 0 0 0 10 10 10
37413 - 38 38 38 78 78 78 6 6 6 2 2 6
37414 - 2 2 6 46 46 46 14 14 14 42 42 42
37415 -246 246 246 253 253 253 253 253 253 253 253 253
37416 -253 253 253 253 253 253 253 253 253 253 253 253
37417 -253 253 253 253 253 253 231 231 231 242 242 242
37418 -253 253 253 253 253 253 253 253 253 253 253 253
37419 -253 253 253 253 253 253 253 253 253 253 253 253
37420 -253 253 253 253 253 253 253 253 253 253 253 253
37421 -253 253 253 253 253 253 253 253 253 253 253 253
37422 -253 253 253 253 253 253 234 234 234 10 10 10
37423 - 2 2 6 2 2 6 22 22 22 14 14 14
37424 - 2 2 6 2 2 6 2 2 6 2 2 6
37425 - 2 2 6 66 66 66 62 62 62 22 22 22
37426 - 6 6 6 0 0 0 0 0 0 0 0 0
37427 - 0 0 0 0 0 0 0 0 0 0 0 0
37428 - 0 0 0 0 0 0 0 0 0 0 0 0
37429 - 0 0 0 0 0 0 0 0 0 0 0 0
37430 - 0 0 0 0 0 0 0 0 0 0 0 0
37431 - 0 0 0 0 0 0 0 0 0 0 0 0
37432 - 0 0 0 0 0 0 6 6 6 18 18 18
37433 - 50 50 50 74 74 74 2 2 6 2 2 6
37434 - 14 14 14 70 70 70 34 34 34 62 62 62
37435 -250 250 250 253 253 253 253 253 253 253 253 253
37436 -253 253 253 253 253 253 253 253 253 253 253 253
37437 -253 253 253 253 253 253 231 231 231 246 246 246
37438 -253 253 253 253 253 253 253 253 253 253 253 253
37439 -253 253 253 253 253 253 253 253 253 253 253 253
37440 -253 253 253 253 253 253 253 253 253 253 253 253
37441 -253 253 253 253 253 253 253 253 253 253 253 253
37442 -253 253 253 253 253 253 234 234 234 14 14 14
37443 - 2 2 6 2 2 6 30 30 30 2 2 6
37444 - 2 2 6 2 2 6 2 2 6 2 2 6
37445 - 2 2 6 66 66 66 62 62 62 22 22 22
37446 - 6 6 6 0 0 0 0 0 0 0 0 0
37447 - 0 0 0 0 0 0 0 0 0 0 0 0
37448 - 0 0 0 0 0 0 0 0 0 0 0 0
37449 - 0 0 0 0 0 0 0 0 0 0 0 0
37450 - 0 0 0 0 0 0 0 0 0 0 0 0
37451 - 0 0 0 0 0 0 0 0 0 0 0 0
37452 - 0 0 0 0 0 0 6 6 6 18 18 18
37453 - 54 54 54 62 62 62 2 2 6 2 2 6
37454 - 2 2 6 30 30 30 46 46 46 70 70 70
37455 -250 250 250 253 253 253 253 253 253 253 253 253
37456 -253 253 253 253 253 253 253 253 253 253 253 253
37457 -253 253 253 253 253 253 231 231 231 246 246 246
37458 -253 253 253 253 253 253 253 253 253 253 253 253
37459 -253 253 253 253 253 253 253 253 253 253 253 253
37460 -253 253 253 253 253 253 253 253 253 253 253 253
37461 -253 253 253 253 253 253 253 253 253 253 253 253
37462 -253 253 253 253 253 253 226 226 226 10 10 10
37463 - 2 2 6 6 6 6 30 30 30 2 2 6
37464 - 2 2 6 2 2 6 2 2 6 2 2 6
37465 - 2 2 6 66 66 66 58 58 58 22 22 22
37466 - 6 6 6 0 0 0 0 0 0 0 0 0
37467 - 0 0 0 0 0 0 0 0 0 0 0 0
37468 - 0 0 0 0 0 0 0 0 0 0 0 0
37469 - 0 0 0 0 0 0 0 0 0 0 0 0
37470 - 0 0 0 0 0 0 0 0 0 0 0 0
37471 - 0 0 0 0 0 0 0 0 0 0 0 0
37472 - 0 0 0 0 0 0 6 6 6 22 22 22
37473 - 58 58 58 62 62 62 2 2 6 2 2 6
37474 - 2 2 6 2 2 6 30 30 30 78 78 78
37475 -250 250 250 253 253 253 253 253 253 253 253 253
37476 -253 253 253 253 253 253 253 253 253 253 253 253
37477 -253 253 253 253 253 253 231 231 231 246 246 246
37478 -253 253 253 253 253 253 253 253 253 253 253 253
37479 -253 253 253 253 253 253 253 253 253 253 253 253
37480 -253 253 253 253 253 253 253 253 253 253 253 253
37481 -253 253 253 253 253 253 253 253 253 253 253 253
37482 -253 253 253 253 253 253 206 206 206 2 2 6
37483 - 22 22 22 34 34 34 18 14 6 22 22 22
37484 - 26 26 26 18 18 18 6 6 6 2 2 6
37485 - 2 2 6 82 82 82 54 54 54 18 18 18
37486 - 6 6 6 0 0 0 0 0 0 0 0 0
37487 - 0 0 0 0 0 0 0 0 0 0 0 0
37488 - 0 0 0 0 0 0 0 0 0 0 0 0
37489 - 0 0 0 0 0 0 0 0 0 0 0 0
37490 - 0 0 0 0 0 0 0 0 0 0 0 0
37491 - 0 0 0 0 0 0 0 0 0 0 0 0
37492 - 0 0 0 0 0 0 6 6 6 26 26 26
37493 - 62 62 62 106 106 106 74 54 14 185 133 11
37494 -210 162 10 121 92 8 6 6 6 62 62 62
37495 -238 238 238 253 253 253 253 253 253 253 253 253
37496 -253 253 253 253 253 253 253 253 253 253 253 253
37497 -253 253 253 253 253 253 231 231 231 246 246 246
37498 -253 253 253 253 253 253 253 253 253 253 253 253
37499 -253 253 253 253 253 253 253 253 253 253 253 253
37500 -253 253 253 253 253 253 253 253 253 253 253 253
37501 -253 253 253 253 253 253 253 253 253 253 253 253
37502 -253 253 253 253 253 253 158 158 158 18 18 18
37503 - 14 14 14 2 2 6 2 2 6 2 2 6
37504 - 6 6 6 18 18 18 66 66 66 38 38 38
37505 - 6 6 6 94 94 94 50 50 50 18 18 18
37506 - 6 6 6 0 0 0 0 0 0 0 0 0
37507 - 0 0 0 0 0 0 0 0 0 0 0 0
37508 - 0 0 0 0 0 0 0 0 0 0 0 0
37509 - 0 0 0 0 0 0 0 0 0 0 0 0
37510 - 0 0 0 0 0 0 0 0 0 0 0 0
37511 - 0 0 0 0 0 0 0 0 0 6 6 6
37512 - 10 10 10 10 10 10 18 18 18 38 38 38
37513 - 78 78 78 142 134 106 216 158 10 242 186 14
37514 -246 190 14 246 190 14 156 118 10 10 10 10
37515 - 90 90 90 238 238 238 253 253 253 253 253 253
37516 -253 253 253 253 253 253 253 253 253 253 253 253
37517 -253 253 253 253 253 253 231 231 231 250 250 250
37518 -253 253 253 253 253 253 253 253 253 253 253 253
37519 -253 253 253 253 253 253 253 253 253 253 253 253
37520 -253 253 253 253 253 253 253 253 253 253 253 253
37521 -253 253 253 253 253 253 253 253 253 246 230 190
37522 -238 204 91 238 204 91 181 142 44 37 26 9
37523 - 2 2 6 2 2 6 2 2 6 2 2 6
37524 - 2 2 6 2 2 6 38 38 38 46 46 46
37525 - 26 26 26 106 106 106 54 54 54 18 18 18
37526 - 6 6 6 0 0 0 0 0 0 0 0 0
37527 - 0 0 0 0 0 0 0 0 0 0 0 0
37528 - 0 0 0 0 0 0 0 0 0 0 0 0
37529 - 0 0 0 0 0 0 0 0 0 0 0 0
37530 - 0 0 0 0 0 0 0 0 0 0 0 0
37531 - 0 0 0 6 6 6 14 14 14 22 22 22
37532 - 30 30 30 38 38 38 50 50 50 70 70 70
37533 -106 106 106 190 142 34 226 170 11 242 186 14
37534 -246 190 14 246 190 14 246 190 14 154 114 10
37535 - 6 6 6 74 74 74 226 226 226 253 253 253
37536 -253 253 253 253 253 253 253 253 253 253 253 253
37537 -253 253 253 253 253 253 231 231 231 250 250 250
37538 -253 253 253 253 253 253 253 253 253 253 253 253
37539 -253 253 253 253 253 253 253 253 253 253 253 253
37540 -253 253 253 253 253 253 253 253 253 253 253 253
37541 -253 253 253 253 253 253 253 253 253 228 184 62
37542 -241 196 14 241 208 19 232 195 16 38 30 10
37543 - 2 2 6 2 2 6 2 2 6 2 2 6
37544 - 2 2 6 6 6 6 30 30 30 26 26 26
37545 -203 166 17 154 142 90 66 66 66 26 26 26
37546 - 6 6 6 0 0 0 0 0 0 0 0 0
37547 - 0 0 0 0 0 0 0 0 0 0 0 0
37548 - 0 0 0 0 0 0 0 0 0 0 0 0
37549 - 0 0 0 0 0 0 0 0 0 0 0 0
37550 - 0 0 0 0 0 0 0 0 0 0 0 0
37551 - 6 6 6 18 18 18 38 38 38 58 58 58
37552 - 78 78 78 86 86 86 101 101 101 123 123 123
37553 -175 146 61 210 150 10 234 174 13 246 186 14
37554 -246 190 14 246 190 14 246 190 14 238 190 10
37555 -102 78 10 2 2 6 46 46 46 198 198 198
37556 -253 253 253 253 253 253 253 253 253 253 253 253
37557 -253 253 253 253 253 253 234 234 234 242 242 242
37558 -253 253 253 253 253 253 253 253 253 253 253 253
37559 -253 253 253 253 253 253 253 253 253 253 253 253
37560 -253 253 253 253 253 253 253 253 253 253 253 253
37561 -253 253 253 253 253 253 253 253 253 224 178 62
37562 -242 186 14 241 196 14 210 166 10 22 18 6
37563 - 2 2 6 2 2 6 2 2 6 2 2 6
37564 - 2 2 6 2 2 6 6 6 6 121 92 8
37565 -238 202 15 232 195 16 82 82 82 34 34 34
37566 - 10 10 10 0 0 0 0 0 0 0 0 0
37567 - 0 0 0 0 0 0 0 0 0 0 0 0
37568 - 0 0 0 0 0 0 0 0 0 0 0 0
37569 - 0 0 0 0 0 0 0 0 0 0 0 0
37570 - 0 0 0 0 0 0 0 0 0 0 0 0
37571 - 14 14 14 38 38 38 70 70 70 154 122 46
37572 -190 142 34 200 144 11 197 138 11 197 138 11
37573 -213 154 11 226 170 11 242 186 14 246 190 14
37574 -246 190 14 246 190 14 246 190 14 246 190 14
37575 -225 175 15 46 32 6 2 2 6 22 22 22
37576 -158 158 158 250 250 250 253 253 253 253 253 253
37577 -253 253 253 253 253 253 253 253 253 253 253 253
37578 -253 253 253 253 253 253 253 253 253 253 253 253
37579 -253 253 253 253 253 253 253 253 253 253 253 253
37580 -253 253 253 253 253 253 253 253 253 253 253 253
37581 -253 253 253 250 250 250 242 242 242 224 178 62
37582 -239 182 13 236 186 11 213 154 11 46 32 6
37583 - 2 2 6 2 2 6 2 2 6 2 2 6
37584 - 2 2 6 2 2 6 61 42 6 225 175 15
37585 -238 190 10 236 186 11 112 100 78 42 42 42
37586 - 14 14 14 0 0 0 0 0 0 0 0 0
37587 - 0 0 0 0 0 0 0 0 0 0 0 0
37588 - 0 0 0 0 0 0 0 0 0 0 0 0
37589 - 0 0 0 0 0 0 0 0 0 0 0 0
37590 - 0 0 0 0 0 0 0 0 0 6 6 6
37591 - 22 22 22 54 54 54 154 122 46 213 154 11
37592 -226 170 11 230 174 11 226 170 11 226 170 11
37593 -236 178 12 242 186 14 246 190 14 246 190 14
37594 -246 190 14 246 190 14 246 190 14 246 190 14
37595 -241 196 14 184 144 12 10 10 10 2 2 6
37596 - 6 6 6 116 116 116 242 242 242 253 253 253
37597 -253 253 253 253 253 253 253 253 253 253 253 253
37598 -253 253 253 253 253 253 253 253 253 253 253 253
37599 -253 253 253 253 253 253 253 253 253 253 253 253
37600 -253 253 253 253 253 253 253 253 253 253 253 253
37601 -253 253 253 231 231 231 198 198 198 214 170 54
37602 -236 178 12 236 178 12 210 150 10 137 92 6
37603 - 18 14 6 2 2 6 2 2 6 2 2 6
37604 - 6 6 6 70 47 6 200 144 11 236 178 12
37605 -239 182 13 239 182 13 124 112 88 58 58 58
37606 - 22 22 22 6 6 6 0 0 0 0 0 0
37607 - 0 0 0 0 0 0 0 0 0 0 0 0
37608 - 0 0 0 0 0 0 0 0 0 0 0 0
37609 - 0 0 0 0 0 0 0 0 0 0 0 0
37610 - 0 0 0 0 0 0 0 0 0 10 10 10
37611 - 30 30 30 70 70 70 180 133 36 226 170 11
37612 -239 182 13 242 186 14 242 186 14 246 186 14
37613 -246 190 14 246 190 14 246 190 14 246 190 14
37614 -246 190 14 246 190 14 246 190 14 246 190 14
37615 -246 190 14 232 195 16 98 70 6 2 2 6
37616 - 2 2 6 2 2 6 66 66 66 221 221 221
37617 -253 253 253 253 253 253 253 253 253 253 253 253
37618 -253 253 253 253 253 253 253 253 253 253 253 253
37619 -253 253 253 253 253 253 253 253 253 253 253 253
37620 -253 253 253 253 253 253 253 253 253 253 253 253
37621 -253 253 253 206 206 206 198 198 198 214 166 58
37622 -230 174 11 230 174 11 216 158 10 192 133 9
37623 -163 110 8 116 81 8 102 78 10 116 81 8
37624 -167 114 7 197 138 11 226 170 11 239 182 13
37625 -242 186 14 242 186 14 162 146 94 78 78 78
37626 - 34 34 34 14 14 14 6 6 6 0 0 0
37627 - 0 0 0 0 0 0 0 0 0 0 0 0
37628 - 0 0 0 0 0 0 0 0 0 0 0 0
37629 - 0 0 0 0 0 0 0 0 0 0 0 0
37630 - 0 0 0 0 0 0 0 0 0 6 6 6
37631 - 30 30 30 78 78 78 190 142 34 226 170 11
37632 -239 182 13 246 190 14 246 190 14 246 190 14
37633 -246 190 14 246 190 14 246 190 14 246 190 14
37634 -246 190 14 246 190 14 246 190 14 246 190 14
37635 -246 190 14 241 196 14 203 166 17 22 18 6
37636 - 2 2 6 2 2 6 2 2 6 38 38 38
37637 -218 218 218 253 253 253 253 253 253 253 253 253
37638 -253 253 253 253 253 253 253 253 253 253 253 253
37639 -253 253 253 253 253 253 253 253 253 253 253 253
37640 -253 253 253 253 253 253 253 253 253 253 253 253
37641 -250 250 250 206 206 206 198 198 198 202 162 69
37642 -226 170 11 236 178 12 224 166 10 210 150 10
37643 -200 144 11 197 138 11 192 133 9 197 138 11
37644 -210 150 10 226 170 11 242 186 14 246 190 14
37645 -246 190 14 246 186 14 225 175 15 124 112 88
37646 - 62 62 62 30 30 30 14 14 14 6 6 6
37647 - 0 0 0 0 0 0 0 0 0 0 0 0
37648 - 0 0 0 0 0 0 0 0 0 0 0 0
37649 - 0 0 0 0 0 0 0 0 0 0 0 0
37650 - 0 0 0 0 0 0 0 0 0 10 10 10
37651 - 30 30 30 78 78 78 174 135 50 224 166 10
37652 -239 182 13 246 190 14 246 190 14 246 190 14
37653 -246 190 14 246 190 14 246 190 14 246 190 14
37654 -246 190 14 246 190 14 246 190 14 246 190 14
37655 -246 190 14 246 190 14 241 196 14 139 102 15
37656 - 2 2 6 2 2 6 2 2 6 2 2 6
37657 - 78 78 78 250 250 250 253 253 253 253 253 253
37658 -253 253 253 253 253 253 253 253 253 253 253 253
37659 -253 253 253 253 253 253 253 253 253 253 253 253
37660 -253 253 253 253 253 253 253 253 253 253 253 253
37661 -250 250 250 214 214 214 198 198 198 190 150 46
37662 -219 162 10 236 178 12 234 174 13 224 166 10
37663 -216 158 10 213 154 11 213 154 11 216 158 10
37664 -226 170 11 239 182 13 246 190 14 246 190 14
37665 -246 190 14 246 190 14 242 186 14 206 162 42
37666 -101 101 101 58 58 58 30 30 30 14 14 14
37667 - 6 6 6 0 0 0 0 0 0 0 0 0
37668 - 0 0 0 0 0 0 0 0 0 0 0 0
37669 - 0 0 0 0 0 0 0 0 0 0 0 0
37670 - 0 0 0 0 0 0 0 0 0 10 10 10
37671 - 30 30 30 74 74 74 174 135 50 216 158 10
37672 -236 178 12 246 190 14 246 190 14 246 190 14
37673 -246 190 14 246 190 14 246 190 14 246 190 14
37674 -246 190 14 246 190 14 246 190 14 246 190 14
37675 -246 190 14 246 190 14 241 196 14 226 184 13
37676 - 61 42 6 2 2 6 2 2 6 2 2 6
37677 - 22 22 22 238 238 238 253 253 253 253 253 253
37678 -253 253 253 253 253 253 253 253 253 253 253 253
37679 -253 253 253 253 253 253 253 253 253 253 253 253
37680 -253 253 253 253 253 253 253 253 253 253 253 253
37681 -253 253 253 226 226 226 187 187 187 180 133 36
37682 -216 158 10 236 178 12 239 182 13 236 178 12
37683 -230 174 11 226 170 11 226 170 11 230 174 11
37684 -236 178 12 242 186 14 246 190 14 246 190 14
37685 -246 190 14 246 190 14 246 186 14 239 182 13
37686 -206 162 42 106 106 106 66 66 66 34 34 34
37687 - 14 14 14 6 6 6 0 0 0 0 0 0
37688 - 0 0 0 0 0 0 0 0 0 0 0 0
37689 - 0 0 0 0 0 0 0 0 0 0 0 0
37690 - 0 0 0 0 0 0 0 0 0 6 6 6
37691 - 26 26 26 70 70 70 163 133 67 213 154 11
37692 -236 178 12 246 190 14 246 190 14 246 190 14
37693 -246 190 14 246 190 14 246 190 14 246 190 14
37694 -246 190 14 246 190 14 246 190 14 246 190 14
37695 -246 190 14 246 190 14 246 190 14 241 196 14
37696 -190 146 13 18 14 6 2 2 6 2 2 6
37697 - 46 46 46 246 246 246 253 253 253 253 253 253
37698 -253 253 253 253 253 253 253 253 253 253 253 253
37699 -253 253 253 253 253 253 253 253 253 253 253 253
37700 -253 253 253 253 253 253 253 253 253 253 253 253
37701 -253 253 253 221 221 221 86 86 86 156 107 11
37702 -216 158 10 236 178 12 242 186 14 246 186 14
37703 -242 186 14 239 182 13 239 182 13 242 186 14
37704 -242 186 14 246 186 14 246 190 14 246 190 14
37705 -246 190 14 246 190 14 246 190 14 246 190 14
37706 -242 186 14 225 175 15 142 122 72 66 66 66
37707 - 30 30 30 10 10 10 0 0 0 0 0 0
37708 - 0 0 0 0 0 0 0 0 0 0 0 0
37709 - 0 0 0 0 0 0 0 0 0 0 0 0
37710 - 0 0 0 0 0 0 0 0 0 6 6 6
37711 - 26 26 26 70 70 70 163 133 67 210 150 10
37712 -236 178 12 246 190 14 246 190 14 246 190 14
37713 -246 190 14 246 190 14 246 190 14 246 190 14
37714 -246 190 14 246 190 14 246 190 14 246 190 14
37715 -246 190 14 246 190 14 246 190 14 246 190 14
37716 -232 195 16 121 92 8 34 34 34 106 106 106
37717 -221 221 221 253 253 253 253 253 253 253 253 253
37718 -253 253 253 253 253 253 253 253 253 253 253 253
37719 -253 253 253 253 253 253 253 253 253 253 253 253
37720 -253 253 253 253 253 253 253 253 253 253 253 253
37721 -242 242 242 82 82 82 18 14 6 163 110 8
37722 -216 158 10 236 178 12 242 186 14 246 190 14
37723 -246 190 14 246 190 14 246 190 14 246 190 14
37724 -246 190 14 246 190 14 246 190 14 246 190 14
37725 -246 190 14 246 190 14 246 190 14 246 190 14
37726 -246 190 14 246 190 14 242 186 14 163 133 67
37727 - 46 46 46 18 18 18 6 6 6 0 0 0
37728 - 0 0 0 0 0 0 0 0 0 0 0 0
37729 - 0 0 0 0 0 0 0 0 0 0 0 0
37730 - 0 0 0 0 0 0 0 0 0 10 10 10
37731 - 30 30 30 78 78 78 163 133 67 210 150 10
37732 -236 178 12 246 186 14 246 190 14 246 190 14
37733 -246 190 14 246 190 14 246 190 14 246 190 14
37734 -246 190 14 246 190 14 246 190 14 246 190 14
37735 -246 190 14 246 190 14 246 190 14 246 190 14
37736 -241 196 14 215 174 15 190 178 144 253 253 253
37737 -253 253 253 253 253 253 253 253 253 253 253 253
37738 -253 253 253 253 253 253 253 253 253 253 253 253
37739 -253 253 253 253 253 253 253 253 253 253 253 253
37740 -253 253 253 253 253 253 253 253 253 218 218 218
37741 - 58 58 58 2 2 6 22 18 6 167 114 7
37742 -216 158 10 236 178 12 246 186 14 246 190 14
37743 -246 190 14 246 190 14 246 190 14 246 190 14
37744 -246 190 14 246 190 14 246 190 14 246 190 14
37745 -246 190 14 246 190 14 246 190 14 246 190 14
37746 -246 190 14 246 186 14 242 186 14 190 150 46
37747 - 54 54 54 22 22 22 6 6 6 0 0 0
37748 - 0 0 0 0 0 0 0 0 0 0 0 0
37749 - 0 0 0 0 0 0 0 0 0 0 0 0
37750 - 0 0 0 0 0 0 0 0 0 14 14 14
37751 - 38 38 38 86 86 86 180 133 36 213 154 11
37752 -236 178 12 246 186 14 246 190 14 246 190 14
37753 -246 190 14 246 190 14 246 190 14 246 190 14
37754 -246 190 14 246 190 14 246 190 14 246 190 14
37755 -246 190 14 246 190 14 246 190 14 246 190 14
37756 -246 190 14 232 195 16 190 146 13 214 214 214
37757 -253 253 253 253 253 253 253 253 253 253 253 253
37758 -253 253 253 253 253 253 253 253 253 253 253 253
37759 -253 253 253 253 253 253 253 253 253 253 253 253
37760 -253 253 253 250 250 250 170 170 170 26 26 26
37761 - 2 2 6 2 2 6 37 26 9 163 110 8
37762 -219 162 10 239 182 13 246 186 14 246 190 14
37763 -246 190 14 246 190 14 246 190 14 246 190 14
37764 -246 190 14 246 190 14 246 190 14 246 190 14
37765 -246 190 14 246 190 14 246 190 14 246 190 14
37766 -246 186 14 236 178 12 224 166 10 142 122 72
37767 - 46 46 46 18 18 18 6 6 6 0 0 0
37768 - 0 0 0 0 0 0 0 0 0 0 0 0
37769 - 0 0 0 0 0 0 0 0 0 0 0 0
37770 - 0 0 0 0 0 0 6 6 6 18 18 18
37771 - 50 50 50 109 106 95 192 133 9 224 166 10
37772 -242 186 14 246 190 14 246 190 14 246 190 14
37773 -246 190 14 246 190 14 246 190 14 246 190 14
37774 -246 190 14 246 190 14 246 190 14 246 190 14
37775 -246 190 14 246 190 14 246 190 14 246 190 14
37776 -242 186 14 226 184 13 210 162 10 142 110 46
37777 -226 226 226 253 253 253 253 253 253 253 253 253
37778 -253 253 253 253 253 253 253 253 253 253 253 253
37779 -253 253 253 253 253 253 253 253 253 253 253 253
37780 -198 198 198 66 66 66 2 2 6 2 2 6
37781 - 2 2 6 2 2 6 50 34 6 156 107 11
37782 -219 162 10 239 182 13 246 186 14 246 190 14
37783 -246 190 14 246 190 14 246 190 14 246 190 14
37784 -246 190 14 246 190 14 246 190 14 246 190 14
37785 -246 190 14 246 190 14 246 190 14 242 186 14
37786 -234 174 13 213 154 11 154 122 46 66 66 66
37787 - 30 30 30 10 10 10 0 0 0 0 0 0
37788 - 0 0 0 0 0 0 0 0 0 0 0 0
37789 - 0 0 0 0 0 0 0 0 0 0 0 0
37790 - 0 0 0 0 0 0 6 6 6 22 22 22
37791 - 58 58 58 154 121 60 206 145 10 234 174 13
37792 -242 186 14 246 186 14 246 190 14 246 190 14
37793 -246 190 14 246 190 14 246 190 14 246 190 14
37794 -246 190 14 246 190 14 246 190 14 246 190 14
37795 -246 190 14 246 190 14 246 190 14 246 190 14
37796 -246 186 14 236 178 12 210 162 10 163 110 8
37797 - 61 42 6 138 138 138 218 218 218 250 250 250
37798 -253 253 253 253 253 253 253 253 253 250 250 250
37799 -242 242 242 210 210 210 144 144 144 66 66 66
37800 - 6 6 6 2 2 6 2 2 6 2 2 6
37801 - 2 2 6 2 2 6 61 42 6 163 110 8
37802 -216 158 10 236 178 12 246 190 14 246 190 14
37803 -246 190 14 246 190 14 246 190 14 246 190 14
37804 -246 190 14 246 190 14 246 190 14 246 190 14
37805 -246 190 14 239 182 13 230 174 11 216 158 10
37806 -190 142 34 124 112 88 70 70 70 38 38 38
37807 - 18 18 18 6 6 6 0 0 0 0 0 0
37808 - 0 0 0 0 0 0 0 0 0 0 0 0
37809 - 0 0 0 0 0 0 0 0 0 0 0 0
37810 - 0 0 0 0 0 0 6 6 6 22 22 22
37811 - 62 62 62 168 124 44 206 145 10 224 166 10
37812 -236 178 12 239 182 13 242 186 14 242 186 14
37813 -246 186 14 246 190 14 246 190 14 246 190 14
37814 -246 190 14 246 190 14 246 190 14 246 190 14
37815 -246 190 14 246 190 14 246 190 14 246 190 14
37816 -246 190 14 236 178 12 216 158 10 175 118 6
37817 - 80 54 7 2 2 6 6 6 6 30 30 30
37818 - 54 54 54 62 62 62 50 50 50 38 38 38
37819 - 14 14 14 2 2 6 2 2 6 2 2 6
37820 - 2 2 6 2 2 6 2 2 6 2 2 6
37821 - 2 2 6 6 6 6 80 54 7 167 114 7
37822 -213 154 11 236 178 12 246 190 14 246 190 14
37823 -246 190 14 246 190 14 246 190 14 246 190 14
37824 -246 190 14 242 186 14 239 182 13 239 182 13
37825 -230 174 11 210 150 10 174 135 50 124 112 88
37826 - 82 82 82 54 54 54 34 34 34 18 18 18
37827 - 6 6 6 0 0 0 0 0 0 0 0 0
37828 - 0 0 0 0 0 0 0 0 0 0 0 0
37829 - 0 0 0 0 0 0 0 0 0 0 0 0
37830 - 0 0 0 0 0 0 6 6 6 18 18 18
37831 - 50 50 50 158 118 36 192 133 9 200 144 11
37832 -216 158 10 219 162 10 224 166 10 226 170 11
37833 -230 174 11 236 178 12 239 182 13 239 182 13
37834 -242 186 14 246 186 14 246 190 14 246 190 14
37835 -246 190 14 246 190 14 246 190 14 246 190 14
37836 -246 186 14 230 174 11 210 150 10 163 110 8
37837 -104 69 6 10 10 10 2 2 6 2 2 6
37838 - 2 2 6 2 2 6 2 2 6 2 2 6
37839 - 2 2 6 2 2 6 2 2 6 2 2 6
37840 - 2 2 6 2 2 6 2 2 6 2 2 6
37841 - 2 2 6 6 6 6 91 60 6 167 114 7
37842 -206 145 10 230 174 11 242 186 14 246 190 14
37843 -246 190 14 246 190 14 246 186 14 242 186 14
37844 -239 182 13 230 174 11 224 166 10 213 154 11
37845 -180 133 36 124 112 88 86 86 86 58 58 58
37846 - 38 38 38 22 22 22 10 10 10 6 6 6
37847 - 0 0 0 0 0 0 0 0 0 0 0 0
37848 - 0 0 0 0 0 0 0 0 0 0 0 0
37849 - 0 0 0 0 0 0 0 0 0 0 0 0
37850 - 0 0 0 0 0 0 0 0 0 14 14 14
37851 - 34 34 34 70 70 70 138 110 50 158 118 36
37852 -167 114 7 180 123 7 192 133 9 197 138 11
37853 -200 144 11 206 145 10 213 154 11 219 162 10
37854 -224 166 10 230 174 11 239 182 13 242 186 14
37855 -246 186 14 246 186 14 246 186 14 246 186 14
37856 -239 182 13 216 158 10 185 133 11 152 99 6
37857 -104 69 6 18 14 6 2 2 6 2 2 6
37858 - 2 2 6 2 2 6 2 2 6 2 2 6
37859 - 2 2 6 2 2 6 2 2 6 2 2 6
37860 - 2 2 6 2 2 6 2 2 6 2 2 6
37861 - 2 2 6 6 6 6 80 54 7 152 99 6
37862 -192 133 9 219 162 10 236 178 12 239 182 13
37863 -246 186 14 242 186 14 239 182 13 236 178 12
37864 -224 166 10 206 145 10 192 133 9 154 121 60
37865 - 94 94 94 62 62 62 42 42 42 22 22 22
37866 - 14 14 14 6 6 6 0 0 0 0 0 0
37867 - 0 0 0 0 0 0 0 0 0 0 0 0
37868 - 0 0 0 0 0 0 0 0 0 0 0 0
37869 - 0 0 0 0 0 0 0 0 0 0 0 0
37870 - 0 0 0 0 0 0 0 0 0 6 6 6
37871 - 18 18 18 34 34 34 58 58 58 78 78 78
37872 -101 98 89 124 112 88 142 110 46 156 107 11
37873 -163 110 8 167 114 7 175 118 6 180 123 7
37874 -185 133 11 197 138 11 210 150 10 219 162 10
37875 -226 170 11 236 178 12 236 178 12 234 174 13
37876 -219 162 10 197 138 11 163 110 8 130 83 6
37877 - 91 60 6 10 10 10 2 2 6 2 2 6
37878 - 18 18 18 38 38 38 38 38 38 38 38 38
37879 - 38 38 38 38 38 38 38 38 38 38 38 38
37880 - 38 38 38 38 38 38 26 26 26 2 2 6
37881 - 2 2 6 6 6 6 70 47 6 137 92 6
37882 -175 118 6 200 144 11 219 162 10 230 174 11
37883 -234 174 13 230 174 11 219 162 10 210 150 10
37884 -192 133 9 163 110 8 124 112 88 82 82 82
37885 - 50 50 50 30 30 30 14 14 14 6 6 6
37886 - 0 0 0 0 0 0 0 0 0 0 0 0
37887 - 0 0 0 0 0 0 0 0 0 0 0 0
37888 - 0 0 0 0 0 0 0 0 0 0 0 0
37889 - 0 0 0 0 0 0 0 0 0 0 0 0
37890 - 0 0 0 0 0 0 0 0 0 0 0 0
37891 - 6 6 6 14 14 14 22 22 22 34 34 34
37892 - 42 42 42 58 58 58 74 74 74 86 86 86
37893 -101 98 89 122 102 70 130 98 46 121 87 25
37894 -137 92 6 152 99 6 163 110 8 180 123 7
37895 -185 133 11 197 138 11 206 145 10 200 144 11
37896 -180 123 7 156 107 11 130 83 6 104 69 6
37897 - 50 34 6 54 54 54 110 110 110 101 98 89
37898 - 86 86 86 82 82 82 78 78 78 78 78 78
37899 - 78 78 78 78 78 78 78 78 78 78 78 78
37900 - 78 78 78 82 82 82 86 86 86 94 94 94
37901 -106 106 106 101 101 101 86 66 34 124 80 6
37902 -156 107 11 180 123 7 192 133 9 200 144 11
37903 -206 145 10 200 144 11 192 133 9 175 118 6
37904 -139 102 15 109 106 95 70 70 70 42 42 42
37905 - 22 22 22 10 10 10 0 0 0 0 0 0
37906 - 0 0 0 0 0 0 0 0 0 0 0 0
37907 - 0 0 0 0 0 0 0 0 0 0 0 0
37908 - 0 0 0 0 0 0 0 0 0 0 0 0
37909 - 0 0 0 0 0 0 0 0 0 0 0 0
37910 - 0 0 0 0 0 0 0 0 0 0 0 0
37911 - 0 0 0 0 0 0 6 6 6 10 10 10
37912 - 14 14 14 22 22 22 30 30 30 38 38 38
37913 - 50 50 50 62 62 62 74 74 74 90 90 90
37914 -101 98 89 112 100 78 121 87 25 124 80 6
37915 -137 92 6 152 99 6 152 99 6 152 99 6
37916 -138 86 6 124 80 6 98 70 6 86 66 30
37917 -101 98 89 82 82 82 58 58 58 46 46 46
37918 - 38 38 38 34 34 34 34 34 34 34 34 34
37919 - 34 34 34 34 34 34 34 34 34 34 34 34
37920 - 34 34 34 34 34 34 38 38 38 42 42 42
37921 - 54 54 54 82 82 82 94 86 76 91 60 6
37922 -134 86 6 156 107 11 167 114 7 175 118 6
37923 -175 118 6 167 114 7 152 99 6 121 87 25
37924 -101 98 89 62 62 62 34 34 34 18 18 18
37925 - 6 6 6 0 0 0 0 0 0 0 0 0
37926 - 0 0 0 0 0 0 0 0 0 0 0 0
37927 - 0 0 0 0 0 0 0 0 0 0 0 0
37928 - 0 0 0 0 0 0 0 0 0 0 0 0
37929 - 0 0 0 0 0 0 0 0 0 0 0 0
37930 - 0 0 0 0 0 0 0 0 0 0 0 0
37931 - 0 0 0 0 0 0 0 0 0 0 0 0
37932 - 0 0 0 6 6 6 6 6 6 10 10 10
37933 - 18 18 18 22 22 22 30 30 30 42 42 42
37934 - 50 50 50 66 66 66 86 86 86 101 98 89
37935 -106 86 58 98 70 6 104 69 6 104 69 6
37936 -104 69 6 91 60 6 82 62 34 90 90 90
37937 - 62 62 62 38 38 38 22 22 22 14 14 14
37938 - 10 10 10 10 10 10 10 10 10 10 10 10
37939 - 10 10 10 10 10 10 6 6 6 10 10 10
37940 - 10 10 10 10 10 10 10 10 10 14 14 14
37941 - 22 22 22 42 42 42 70 70 70 89 81 66
37942 - 80 54 7 104 69 6 124 80 6 137 92 6
37943 -134 86 6 116 81 8 100 82 52 86 86 86
37944 - 58 58 58 30 30 30 14 14 14 6 6 6
37945 - 0 0 0 0 0 0 0 0 0 0 0 0
37946 - 0 0 0 0 0 0 0 0 0 0 0 0
37947 - 0 0 0 0 0 0 0 0 0 0 0 0
37948 - 0 0 0 0 0 0 0 0 0 0 0 0
37949 - 0 0 0 0 0 0 0 0 0 0 0 0
37950 - 0 0 0 0 0 0 0 0 0 0 0 0
37951 - 0 0 0 0 0 0 0 0 0 0 0 0
37952 - 0 0 0 0 0 0 0 0 0 0 0 0
37953 - 0 0 0 6 6 6 10 10 10 14 14 14
37954 - 18 18 18 26 26 26 38 38 38 54 54 54
37955 - 70 70 70 86 86 86 94 86 76 89 81 66
37956 - 89 81 66 86 86 86 74 74 74 50 50 50
37957 - 30 30 30 14 14 14 6 6 6 0 0 0
37958 - 0 0 0 0 0 0 0 0 0 0 0 0
37959 - 0 0 0 0 0 0 0 0 0 0 0 0
37960 - 0 0 0 0 0 0 0 0 0 0 0 0
37961 - 6 6 6 18 18 18 34 34 34 58 58 58
37962 - 82 82 82 89 81 66 89 81 66 89 81 66
37963 - 94 86 66 94 86 76 74 74 74 50 50 50
37964 - 26 26 26 14 14 14 6 6 6 0 0 0
37965 - 0 0 0 0 0 0 0 0 0 0 0 0
37966 - 0 0 0 0 0 0 0 0 0 0 0 0
37967 - 0 0 0 0 0 0 0 0 0 0 0 0
37968 - 0 0 0 0 0 0 0 0 0 0 0 0
37969 - 0 0 0 0 0 0 0 0 0 0 0 0
37970 - 0 0 0 0 0 0 0 0 0 0 0 0
37971 - 0 0 0 0 0 0 0 0 0 0 0 0
37972 - 0 0 0 0 0 0 0 0 0 0 0 0
37973 - 0 0 0 0 0 0 0 0 0 0 0 0
37974 - 6 6 6 6 6 6 14 14 14 18 18 18
37975 - 30 30 30 38 38 38 46 46 46 54 54 54
37976 - 50 50 50 42 42 42 30 30 30 18 18 18
37977 - 10 10 10 0 0 0 0 0 0 0 0 0
37978 - 0 0 0 0 0 0 0 0 0 0 0 0
37979 - 0 0 0 0 0 0 0 0 0 0 0 0
37980 - 0 0 0 0 0 0 0 0 0 0 0 0
37981 - 0 0 0 6 6 6 14 14 14 26 26 26
37982 - 38 38 38 50 50 50 58 58 58 58 58 58
37983 - 54 54 54 42 42 42 30 30 30 18 18 18
37984 - 10 10 10 0 0 0 0 0 0 0 0 0
37985 - 0 0 0 0 0 0 0 0 0 0 0 0
37986 - 0 0 0 0 0 0 0 0 0 0 0 0
37987 - 0 0 0 0 0 0 0 0 0 0 0 0
37988 - 0 0 0 0 0 0 0 0 0 0 0 0
37989 - 0 0 0 0 0 0 0 0 0 0 0 0
37990 - 0 0 0 0 0 0 0 0 0 0 0 0
37991 - 0 0 0 0 0 0 0 0 0 0 0 0
37992 - 0 0 0 0 0 0 0 0 0 0 0 0
37993 - 0 0 0 0 0 0 0 0 0 0 0 0
37994 - 0 0 0 0 0 0 0 0 0 6 6 6
37995 - 6 6 6 10 10 10 14 14 14 18 18 18
37996 - 18 18 18 14 14 14 10 10 10 6 6 6
37997 - 0 0 0 0 0 0 0 0 0 0 0 0
37998 - 0 0 0 0 0 0 0 0 0 0 0 0
37999 - 0 0 0 0 0 0 0 0 0 0 0 0
38000 - 0 0 0 0 0 0 0 0 0 0 0 0
38001 - 0 0 0 0 0 0 0 0 0 6 6 6
38002 - 14 14 14 18 18 18 22 22 22 22 22 22
38003 - 18 18 18 14 14 14 10 10 10 6 6 6
38004 - 0 0 0 0 0 0 0 0 0 0 0 0
38005 - 0 0 0 0 0 0 0 0 0 0 0 0
38006 - 0 0 0 0 0 0 0 0 0 0 0 0
38007 - 0 0 0 0 0 0 0 0 0 0 0 0
38008 - 0 0 0 0 0 0 0 0 0 0 0 0
38009 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38010 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38011 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38012 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38013 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38014 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38015 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38016 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38017 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38018 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38019 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38020 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38021 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38022 +4 4 4 4 4 4
38023 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38024 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38025 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38026 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38027 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38028 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38029 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38030 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38031 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38032 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38033 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38034 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38035 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38036 +4 4 4 4 4 4
38037 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38038 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38039 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38040 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38041 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38042 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38043 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38044 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38045 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38046 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38047 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38048 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38049 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38050 +4 4 4 4 4 4
38051 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38052 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38053 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38054 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38055 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38056 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38057 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38058 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38059 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38060 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38061 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38062 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38063 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38064 +4 4 4 4 4 4
38065 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38066 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38067 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38068 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38069 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38070 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38071 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38072 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38073 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38074 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38075 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38076 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38077 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38078 +4 4 4 4 4 4
38079 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38080 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38081 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38082 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38083 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38084 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38085 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38086 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38087 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38088 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38089 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38090 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38091 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38092 +4 4 4 4 4 4
38093 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38094 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38095 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38096 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38097 +4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
38098 +0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
38099 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38100 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38101 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38102 +4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
38103 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
38104 +4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
38105 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38106 +4 4 4 4 4 4
38107 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38108 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38109 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38110 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38111 +4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
38112 +37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
38113 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38114 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38115 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38116 +4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
38117 +2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
38118 +4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
38119 +1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38120 +4 4 4 4 4 4
38121 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38122 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38123 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38124 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38125 +2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
38126 +153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
38127 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
38128 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38129 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38130 +4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
38131 +60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
38132 +4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
38133 +2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
38134 +4 4 4 4 4 4
38135 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38136 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38137 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38138 +4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
38139 +4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
38140 +165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
38141 +1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
38142 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38143 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
38144 +3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
38145 +163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
38146 +0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
38147 +37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
38148 +4 4 4 4 4 4
38149 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38150 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38151 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38152 +4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
38153 +37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
38154 +156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
38155 +125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
38156 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
38157 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
38158 +0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
38159 +174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
38160 +0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
38161 +64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
38162 +4 4 4 4 4 4
38163 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38164 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38165 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
38166 +5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
38167 +156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
38168 +156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
38169 +174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
38170 +1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
38171 +4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
38172 +13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
38173 +174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
38174 +22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
38175 +90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
38176 +4 4 4 4 4 4
38177 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38178 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38179 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
38180 +0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
38181 +174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
38182 +156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
38183 +163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
38184 +4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
38185 +5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
38186 +131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
38187 +190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
38188 +90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
38189 +31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
38190 +4 4 4 4 4 4
38191 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38192 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38193 +4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
38194 +4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
38195 +155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
38196 +167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
38197 +153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
38198 +41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
38199 +1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
38200 +177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
38201 +125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
38202 +136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
38203 +7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
38204 +4 4 4 4 4 4
38205 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38206 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38207 +4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
38208 +125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
38209 +156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
38210 +137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
38211 +156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
38212 +167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
38213 +0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
38214 +166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
38215 +6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
38216 +90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
38217 +1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
38218 +4 4 4 4 4 4
38219 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38220 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38221 +1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
38222 +167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
38223 +157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
38224 +26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
38225 +158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
38226 +165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
38227 +60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
38228 +137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
38229 +52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
38230 +13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
38231 +4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
38232 +4 4 4 4 4 4
38233 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38234 +4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
38235 +0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
38236 +158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
38237 +167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
38238 +4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
38239 +174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
38240 +155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
38241 +137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
38242 +16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
38243 +136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
38244 +2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
38245 +4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
38246 +4 4 4 4 4 4
38247 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38248 +4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
38249 +37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
38250 +157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
38251 +153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
38252 +4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
38253 +125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
38254 +156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
38255 +174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
38256 +4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
38257 +136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
38258 +1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
38259 +2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
38260 +0 0 0 4 4 4
38261 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
38262 +4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
38263 +158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
38264 +153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
38265 +37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
38266 +4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
38267 +4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
38268 +154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
38269 +174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
38270 +32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
38271 +28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
38272 +50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
38273 +0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
38274 +2 0 0 0 0 0
38275 +4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
38276 +0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
38277 +174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
38278 +165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
38279 +4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
38280 +4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
38281 +4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
38282 +174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
38283 +60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
38284 +136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
38285 +22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
38286 +136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
38287 +26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
38288 +37 38 37 0 0 0
38289 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
38290 +13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
38291 +153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
38292 +177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
38293 +4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
38294 +5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
38295 +6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
38296 +166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
38297 +4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
38298 +146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
38299 +71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
38300 +90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
38301 +125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
38302 +85 115 134 4 0 0
38303 +4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
38304 +125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
38305 +155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
38306 +125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
38307 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
38308 +0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
38309 +5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
38310 +37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
38311 +4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
38312 +90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
38313 +2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
38314 +13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
38315 +166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
38316 +60 73 81 4 0 0
38317 +4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
38318 +174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
38319 +156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
38320 +4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
38321 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
38322 +10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
38323 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
38324 +4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
38325 +80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
38326 +28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
38327 +50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
38328 +1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
38329 +167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
38330 +16 19 21 4 0 0
38331 +4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
38332 +158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
38333 +167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
38334 +4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
38335 +4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
38336 +80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
38337 +4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
38338 +3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
38339 +146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
38340 +68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
38341 +136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
38342 +24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
38343 +163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
38344 +4 0 0 4 3 3
38345 +3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
38346 +156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
38347 +155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
38348 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
38349 +2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
38350 +136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
38351 +0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
38352 +0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
38353 +136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
38354 +28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
38355 +22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
38356 +137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
38357 +60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
38358 +3 2 2 4 4 4
38359 +3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
38360 +157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
38361 +37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
38362 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
38363 +0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
38364 +101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
38365 +14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
38366 +22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
38367 +136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
38368 +17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
38369 +2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
38370 +166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
38371 +13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
38372 +4 4 4 4 4 4
38373 +1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
38374 +163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
38375 +4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
38376 +4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
38377 +40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
38378 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
38379 +101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
38380 +136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
38381 +136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
38382 +136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
38383 +3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
38384 +174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
38385 +4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
38386 +4 4 4 4 4 4
38387 +4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
38388 +155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
38389 +4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
38390 +4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
38391 +101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
38392 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
38393 +136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
38394 +136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
38395 +136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
38396 +90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
38397 +85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
38398 +167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
38399 +6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
38400 +5 5 5 5 5 5
38401 +1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
38402 +131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
38403 +6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
38404 +0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
38405 +101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
38406 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38407 +101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
38408 +136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
38409 +101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
38410 +7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
38411 +174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
38412 +24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
38413 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
38414 +5 5 5 4 4 4
38415 +4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
38416 +131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
38417 +6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
38418 +13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
38419 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
38420 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
38421 +101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
38422 +136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
38423 +136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
38424 +2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
38425 +174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
38426 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
38427 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38428 +4 4 4 4 4 4
38429 +1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
38430 +137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
38431 +4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
38432 +64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
38433 +90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
38434 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38435 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38436 +136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
38437 +101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
38438 +37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
38439 +167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
38440 +3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
38441 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38442 +4 4 4 4 4 4
38443 +4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
38444 +153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
38445 +4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
38446 +90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
38447 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
38448 +90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
38449 +101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
38450 +101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
38451 +35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
38452 +154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
38453 +60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
38454 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38455 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38456 +4 4 4 4 4 4
38457 +1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
38458 +153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
38459 +4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
38460 +64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
38461 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
38462 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38463 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38464 +136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
38465 +13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
38466 +174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
38467 +6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
38468 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38469 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38470 +4 4 4 4 4 4
38471 +4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
38472 +156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
38473 +4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
38474 +90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
38475 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
38476 +90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
38477 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
38478 +101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
38479 +2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
38480 +174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
38481 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38482 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38483 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38484 +4 4 4 4 4 4
38485 +3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
38486 +158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
38487 +4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
38488 +37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
38489 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
38490 +90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
38491 +101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
38492 +90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
38493 +5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
38494 +167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
38495 +6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
38496 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38497 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38498 +4 4 4 4 4 4
38499 +4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
38500 +163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
38501 +4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
38502 +18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
38503 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
38504 +90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
38505 +101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
38506 +13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
38507 +3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
38508 +174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
38509 +4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
38510 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38511 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38512 +4 4 4 4 4 4
38513 +1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
38514 +167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
38515 +4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
38516 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
38517 +26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
38518 +90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
38519 +101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
38520 +7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
38521 +4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
38522 +174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
38523 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38524 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38525 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38526 +4 4 4 4 4 4
38527 +4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
38528 +174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
38529 +5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
38530 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
38531 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38532 +90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
38533 +101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
38534 +2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
38535 +3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
38536 +153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
38537 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38538 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38539 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38540 +4 4 4 4 4 4
38541 +1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
38542 +174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
38543 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
38544 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
38545 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38546 +26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
38547 +35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
38548 +2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
38549 +3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
38550 +131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
38551 +4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38552 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38553 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38554 +4 4 4 4 4 4
38555 +3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
38556 +174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
38557 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
38558 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
38559 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38560 +26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
38561 +7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
38562 +4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
38563 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
38564 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38565 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38566 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38567 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38568 +4 4 4 4 4 4
38569 +1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
38570 +174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
38571 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
38572 +18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
38573 +18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
38574 +26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
38575 +28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
38576 +3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
38577 +4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38578 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38579 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38580 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38581 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38582 +4 4 4 4 4 4
38583 +4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
38584 +174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
38585 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
38586 +10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
38587 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38588 +18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
38589 +90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
38590 +3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
38591 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
38592 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38593 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38594 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38595 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38596 +4 4 4 4 4 4
38597 +1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
38598 +177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
38599 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
38600 +10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
38601 +26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
38602 +6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
38603 +10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
38604 +2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
38605 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38606 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38607 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38608 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38609 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38610 +4 4 4 4 4 4
38611 +4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
38612 +177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
38613 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
38614 +10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
38615 +26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
38616 +7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
38617 +3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
38618 +21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
38619 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
38620 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38621 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38622 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38623 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38624 +4 4 4 4 4 4
38625 +3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
38626 +190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
38627 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
38628 +10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
38629 +24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
38630 +18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
38631 +28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
38632 +26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
38633 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38634 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38635 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38636 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38637 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38638 +4 4 4 4 4 4
38639 +4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
38640 +190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
38641 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
38642 +10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
38643 +0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
38644 +26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
38645 +37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
38646 +90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
38647 +4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
38648 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38649 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38650 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38651 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38652 +4 4 4 4 4 4
38653 +4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
38654 +193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
38655 +5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
38656 +10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
38657 +1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
38658 +26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
38659 +22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
38660 +26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
38661 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38662 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38663 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38664 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38665 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38666 +4 4 4 4 4 4
38667 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38668 +190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
38669 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
38670 +10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
38671 +2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
38672 +26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
38673 +10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
38674 +26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
38675 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
38676 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38677 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38678 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38679 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38680 +4 4 4 4 4 4
38681 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
38682 +193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
38683 +5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
38684 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
38685 +13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
38686 +10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
38687 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38688 +26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
38689 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38690 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38691 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38692 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38693 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38694 +4 4 4 4 4 4
38695 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38696 +190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
38697 +5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
38698 +28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
38699 +10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
38700 +28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
38701 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38702 +26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
38703 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
38704 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38705 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38706 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38707 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38708 +4 4 4 4 4 4
38709 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
38710 +193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
38711 +5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
38712 +4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
38713 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
38714 +10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
38715 +18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
38716 +22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
38717 +4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
38718 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38719 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38720 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38721 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38722 +4 4 4 4 4 4
38723 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38724 +190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
38725 +6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
38726 +1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
38727 +18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
38728 +10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
38729 +26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
38730 +1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
38731 +5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
38732 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38733 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38734 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38735 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38736 +4 4 4 4 4 4
38737 +4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
38738 +193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
38739 +2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
38740 +4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
38741 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
38742 +10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
38743 +26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
38744 +2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
38745 +3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
38746 +131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38747 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38748 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38749 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38750 +4 4 4 4 4 4
38751 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38752 +193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
38753 +0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
38754 +4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
38755 +13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
38756 +10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
38757 +28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
38758 +4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
38759 +0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
38760 +125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38761 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38762 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38763 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38764 +4 4 4 4 4 4
38765 +4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
38766 +193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
38767 +120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
38768 +4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
38769 +4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
38770 +10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
38771 +4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
38772 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
38773 +24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
38774 +125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
38775 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38776 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38777 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38778 +4 4 4 4 4 4
38779 +4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
38780 +174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
38781 +220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
38782 +3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
38783 +4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
38784 +10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
38785 +1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
38786 +5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
38787 +137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
38788 +125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
38789 +0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38790 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38791 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38792 +4 4 4 4 4 4
38793 +5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
38794 +193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
38795 +220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
38796 +4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
38797 +4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
38798 +22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
38799 +4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38800 +1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
38801 +166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
38802 +125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
38803 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
38804 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38805 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38806 +4 4 4 4 4 4
38807 +4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
38808 +220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
38809 +205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
38810 +24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
38811 +4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
38812 +4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
38813 +4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
38814 +2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
38815 +156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
38816 +137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
38817 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38818 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38819 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38820 +4 4 4 4 4 4
38821 +5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
38822 +125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
38823 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
38824 +193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
38825 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
38826 +1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
38827 +5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
38828 +60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
38829 +153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
38830 +125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
38831 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38832 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38833 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38834 +4 4 4 4 4 4
38835 +4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
38836 +6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
38837 +193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
38838 +244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
38839 +0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
38840 +4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
38841 +3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
38842 +220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
38843 +153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
38844 +13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
38845 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38846 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38847 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38848 +4 4 4 4 4 4
38849 +5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
38850 +6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
38851 +244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
38852 +220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
38853 +3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
38854 +4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
38855 +0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
38856 +177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
38857 +158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
38858 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
38859 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38860 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38861 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38862 +4 4 4 4 4 4
38863 +5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
38864 +6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
38865 +177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
38866 +220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
38867 +125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
38868 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
38869 +37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
38870 +174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
38871 +158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
38872 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
38873 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38874 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38875 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38876 +4 4 4 4 4 4
38877 +4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
38878 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
38879 +26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
38880 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
38881 +244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
38882 +0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
38883 +177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
38884 +174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
38885 +60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
38886 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38887 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38888 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38889 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38890 +4 4 4 4 4 4
38891 +5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
38892 +6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
38893 +6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
38894 +220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
38895 +220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
38896 +0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
38897 +220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
38898 +174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
38899 +4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
38900 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38901 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38902 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38903 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38904 +4 4 4 4 4 4
38905 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
38906 +6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
38907 +4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
38908 +220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
38909 +205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
38910 +60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
38911 +177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
38912 +190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
38913 +4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38914 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38915 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38916 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38917 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38918 +4 4 4 4 4 4
38919 +4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
38920 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
38921 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
38922 +125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
38923 +205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
38924 +193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
38925 +190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
38926 +153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
38927 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38928 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38929 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38930 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38931 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38932 +4 4 4 4 4 4
38933 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
38934 +6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
38935 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
38936 +4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
38937 +205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
38938 +220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
38939 +174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
38940 +6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
38941 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38942 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38943 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38944 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38945 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38946 +4 4 4 4 4 4
38947 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
38948 +5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
38949 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
38950 +4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
38951 +220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
38952 +190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
38953 +193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
38954 +4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
38955 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38956 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38957 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38958 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38959 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38960 +4 4 4 4 4 4
38961 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38962 +4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
38963 +4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
38964 +6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
38965 +174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
38966 +193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
38967 +193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
38968 +6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
38969 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38970 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38971 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38972 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38973 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38974 +4 4 4 4 4 4
38975 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38976 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
38977 +5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
38978 +5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
38979 +6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
38980 +193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
38981 +60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
38982 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
38983 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38984 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38985 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38986 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38987 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38988 +4 4 4 4 4 4
38989 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38990 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38991 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
38992 +5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
38993 +4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
38994 +193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
38995 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
38996 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
38997 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38998 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38999 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39000 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39001 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39002 +4 4 4 4 4 4
39003 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39004 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39005 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
39006 +4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
39007 +6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
39008 +153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
39009 +6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
39010 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39011 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39012 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39013 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39014 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39015 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39016 +4 4 4 4 4 4
39017 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39018 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39019 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39020 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
39021 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
39022 +24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
39023 +6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
39024 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39025 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39026 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39027 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39028 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39029 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39030 +4 4 4 4 4 4
39031 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39032 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39033 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39034 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
39035 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
39036 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
39037 +4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
39038 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39039 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39040 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39041 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39042 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39043 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39044 +4 4 4 4 4 4
39045 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39046 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39047 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39048 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
39049 +5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
39050 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
39051 +6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
39052 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39053 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39054 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39055 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39056 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39057 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39058 +4 4 4 4 4 4
39059 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39060 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39061 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39062 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
39063 +4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
39064 +4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
39065 +6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
39066 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39067 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39068 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39069 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39070 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39071 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39072 +4 4 4 4 4 4
39073 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39074 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39075 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39076 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39077 +4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
39078 +6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
39079 +4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
39080 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39081 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39082 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39083 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39084 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39085 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39086 +4 4 4 4 4 4
39087 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39088 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39089 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39090 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39091 +4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
39092 +4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
39093 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39094 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39095 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39096 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39097 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39098 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39099 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39100 +4 4 4 4 4 4
39101 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39102 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39103 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39104 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39105 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
39106 +5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
39107 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39108 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39109 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39110 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39111 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39112 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39113 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39114 +4 4 4 4 4 4
39115 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39116 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39117 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39118 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39119 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
39120 +5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
39121 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39122 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39123 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39124 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39125 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39126 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39127 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39128 +4 4 4 4 4 4
39129 diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
39130 index 3473e75..c930142 100644
39131 --- a/drivers/video/udlfb.c
39132 +++ b/drivers/video/udlfb.c
39133 @@ -619,11 +619,11 @@ int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
39134 dlfb_urb_completion(urb);
39135
39136 error:
39137 - atomic_add(bytes_sent, &dev->bytes_sent);
39138 - atomic_add(bytes_identical, &dev->bytes_identical);
39139 - atomic_add(width*height*2, &dev->bytes_rendered);
39140 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
39141 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
39142 + atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
39143 end_cycles = get_cycles();
39144 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
39145 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
39146 >> 10)), /* Kcycles */
39147 &dev->cpu_kcycles_used);
39148
39149 @@ -744,11 +744,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
39150 dlfb_urb_completion(urb);
39151
39152 error:
39153 - atomic_add(bytes_sent, &dev->bytes_sent);
39154 - atomic_add(bytes_identical, &dev->bytes_identical);
39155 - atomic_add(bytes_rendered, &dev->bytes_rendered);
39156 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
39157 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
39158 + atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
39159 end_cycles = get_cycles();
39160 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
39161 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
39162 >> 10)), /* Kcycles */
39163 &dev->cpu_kcycles_used);
39164 }
39165 @@ -1368,7 +1368,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
39166 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39167 struct dlfb_data *dev = fb_info->par;
39168 return snprintf(buf, PAGE_SIZE, "%u\n",
39169 - atomic_read(&dev->bytes_rendered));
39170 + atomic_read_unchecked(&dev->bytes_rendered));
39171 }
39172
39173 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
39174 @@ -1376,7 +1376,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
39175 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39176 struct dlfb_data *dev = fb_info->par;
39177 return snprintf(buf, PAGE_SIZE, "%u\n",
39178 - atomic_read(&dev->bytes_identical));
39179 + atomic_read_unchecked(&dev->bytes_identical));
39180 }
39181
39182 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
39183 @@ -1384,7 +1384,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
39184 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39185 struct dlfb_data *dev = fb_info->par;
39186 return snprintf(buf, PAGE_SIZE, "%u\n",
39187 - atomic_read(&dev->bytes_sent));
39188 + atomic_read_unchecked(&dev->bytes_sent));
39189 }
39190
39191 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
39192 @@ -1392,7 +1392,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
39193 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39194 struct dlfb_data *dev = fb_info->par;
39195 return snprintf(buf, PAGE_SIZE, "%u\n",
39196 - atomic_read(&dev->cpu_kcycles_used));
39197 + atomic_read_unchecked(&dev->cpu_kcycles_used));
39198 }
39199
39200 static ssize_t edid_show(
39201 @@ -1449,10 +1449,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
39202 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39203 struct dlfb_data *dev = fb_info->par;
39204
39205 - atomic_set(&dev->bytes_rendered, 0);
39206 - atomic_set(&dev->bytes_identical, 0);
39207 - atomic_set(&dev->bytes_sent, 0);
39208 - atomic_set(&dev->cpu_kcycles_used, 0);
39209 + atomic_set_unchecked(&dev->bytes_rendered, 0);
39210 + atomic_set_unchecked(&dev->bytes_identical, 0);
39211 + atomic_set_unchecked(&dev->bytes_sent, 0);
39212 + atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
39213
39214 return count;
39215 }
39216 diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
39217 index 7f8472c..9842e87 100644
39218 --- a/drivers/video/uvesafb.c
39219 +++ b/drivers/video/uvesafb.c
39220 @@ -19,6 +19,7 @@
39221 #include <linux/io.h>
39222 #include <linux/mutex.h>
39223 #include <linux/slab.h>
39224 +#include <linux/moduleloader.h>
39225 #include <video/edid.h>
39226 #include <video/uvesafb.h>
39227 #ifdef CONFIG_X86
39228 @@ -121,7 +122,7 @@ static int uvesafb_helper_start(void)
39229 NULL,
39230 };
39231
39232 - return call_usermodehelper(v86d_path, argv, envp, 1);
39233 + return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
39234 }
39235
39236 /*
39237 @@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
39238 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
39239 par->pmi_setpal = par->ypan = 0;
39240 } else {
39241 +
39242 +#ifdef CONFIG_PAX_KERNEXEC
39243 +#ifdef CONFIG_MODULES
39244 + par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
39245 +#endif
39246 + if (!par->pmi_code) {
39247 + par->pmi_setpal = par->ypan = 0;
39248 + return 0;
39249 + }
39250 +#endif
39251 +
39252 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
39253 + task->t.regs.edi);
39254 +
39255 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39256 + pax_open_kernel();
39257 + memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
39258 + pax_close_kernel();
39259 +
39260 + par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
39261 + par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
39262 +#else
39263 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
39264 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
39265 +#endif
39266 +
39267 printk(KERN_INFO "uvesafb: protected mode interface info at "
39268 "%04x:%04x\n",
39269 (u16)task->t.regs.es, (u16)task->t.regs.edi);
39270 @@ -1821,6 +1844,11 @@ out:
39271 if (par->vbe_modes)
39272 kfree(par->vbe_modes);
39273
39274 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39275 + if (par->pmi_code)
39276 + module_free_exec(NULL, par->pmi_code);
39277 +#endif
39278 +
39279 framebuffer_release(info);
39280 return err;
39281 }
39282 @@ -1847,6 +1875,12 @@ static int uvesafb_remove(struct platform_device *dev)
39283 kfree(par->vbe_state_orig);
39284 if (par->vbe_state_saved)
39285 kfree(par->vbe_state_saved);
39286 +
39287 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39288 + if (par->pmi_code)
39289 + module_free_exec(NULL, par->pmi_code);
39290 +#endif
39291 +
39292 }
39293
39294 framebuffer_release(info);
39295 diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
39296 index 501b340..86bd4cf 100644
39297 --- a/drivers/video/vesafb.c
39298 +++ b/drivers/video/vesafb.c
39299 @@ -9,6 +9,7 @@
39300 */
39301
39302 #include <linux/module.h>
39303 +#include <linux/moduleloader.h>
39304 #include <linux/kernel.h>
39305 #include <linux/errno.h>
39306 #include <linux/string.h>
39307 @@ -52,8 +53,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
39308 static int vram_total __initdata; /* Set total amount of memory */
39309 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
39310 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
39311 -static void (*pmi_start)(void) __read_mostly;
39312 -static void (*pmi_pal) (void) __read_mostly;
39313 +static void (*pmi_start)(void) __read_only;
39314 +static void (*pmi_pal) (void) __read_only;
39315 static int depth __read_mostly;
39316 static int vga_compat __read_mostly;
39317 /* --------------------------------------------------------------------- */
39318 @@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
39319 unsigned int size_vmode;
39320 unsigned int size_remap;
39321 unsigned int size_total;
39322 + void *pmi_code = NULL;
39323
39324 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
39325 return -ENODEV;
39326 @@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
39327 size_remap = size_total;
39328 vesafb_fix.smem_len = size_remap;
39329
39330 -#ifndef __i386__
39331 - screen_info.vesapm_seg = 0;
39332 -#endif
39333 -
39334 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
39335 printk(KERN_WARNING
39336 "vesafb: cannot reserve video memory at 0x%lx\n",
39337 @@ -307,9 +305,21 @@ static int __init vesafb_probe(struct platform_device *dev)
39338 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
39339 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
39340
39341 +#ifdef __i386__
39342 +
39343 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39344 + pmi_code = module_alloc_exec(screen_info.vesapm_size);
39345 + if (!pmi_code)
39346 +#elif !defined(CONFIG_PAX_KERNEXEC)
39347 + if (0)
39348 +#endif
39349 +
39350 +#endif
39351 + screen_info.vesapm_seg = 0;
39352 +
39353 if (screen_info.vesapm_seg) {
39354 - printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
39355 - screen_info.vesapm_seg,screen_info.vesapm_off);
39356 + printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
39357 + screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
39358 }
39359
39360 if (screen_info.vesapm_seg < 0xc000)
39361 @@ -317,9 +327,25 @@ static int __init vesafb_probe(struct platform_device *dev)
39362
39363 if (ypan || pmi_setpal) {
39364 unsigned short *pmi_base;
39365 +
39366 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
39367 - pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
39368 - pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
39369 +
39370 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39371 + pax_open_kernel();
39372 + memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
39373 +#else
39374 + pmi_code = pmi_base;
39375 +#endif
39376 +
39377 + pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
39378 + pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
39379 +
39380 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39381 + pmi_start = ktva_ktla(pmi_start);
39382 + pmi_pal = ktva_ktla(pmi_pal);
39383 + pax_close_kernel();
39384 +#endif
39385 +
39386 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
39387 if (pmi_base[3]) {
39388 printk(KERN_INFO "vesafb: pmi: ports = ");
39389 @@ -488,6 +514,11 @@ static int __init vesafb_probe(struct platform_device *dev)
39390 info->node, info->fix.id);
39391 return 0;
39392 err:
39393 +
39394 +#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39395 + module_free_exec(NULL, pmi_code);
39396 +#endif
39397 +
39398 if (info->screen_base)
39399 iounmap(info->screen_base);
39400 framebuffer_release(info);
39401 diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
39402 index 88714ae..16c2e11 100644
39403 --- a/drivers/video/via/via_clock.h
39404 +++ b/drivers/video/via/via_clock.h
39405 @@ -56,7 +56,7 @@ struct via_clock {
39406
39407 void (*set_engine_pll_state)(u8 state);
39408 void (*set_engine_pll)(struct via_pll_config config);
39409 -};
39410 +} __no_const;
39411
39412
39413 static inline u32 get_pll_internal_frequency(u32 ref_freq,
39414 diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h
39415 index e56c934..fc22f4b 100644
39416 --- a/drivers/xen/xen-pciback/conf_space.h
39417 +++ b/drivers/xen/xen-pciback/conf_space.h
39418 @@ -44,15 +44,15 @@ struct config_field {
39419 struct {
39420 conf_dword_write write;
39421 conf_dword_read read;
39422 - } dw;
39423 + } __no_const dw;
39424 struct {
39425 conf_word_write write;
39426 conf_word_read read;
39427 - } w;
39428 + } __no_const w;
39429 struct {
39430 conf_byte_write write;
39431 conf_byte_read read;
39432 - } b;
39433 + } __no_const b;
39434 } u;
39435 struct list_head list;
39436 };
39437 diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
39438 index 879ed88..bc03a01 100644
39439 --- a/fs/9p/vfs_inode.c
39440 +++ b/fs/9p/vfs_inode.c
39441 @@ -1286,7 +1286,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
39442 void
39443 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
39444 {
39445 - char *s = nd_get_link(nd);
39446 + const char *s = nd_get_link(nd);
39447
39448 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
39449 IS_ERR(s) ? "<error>" : s);
39450 diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
39451 index 79e2ca7..5828ad1 100644
39452 --- a/fs/Kconfig.binfmt
39453 +++ b/fs/Kconfig.binfmt
39454 @@ -86,7 +86,7 @@ config HAVE_AOUT
39455
39456 config BINFMT_AOUT
39457 tristate "Kernel support for a.out and ECOFF binaries"
39458 - depends on HAVE_AOUT
39459 + depends on HAVE_AOUT && BROKEN
39460 ---help---
39461 A.out (Assembler.OUTput) is a set of formats for libraries and
39462 executables used in the earliest versions of UNIX. Linux used
39463 diff --git a/fs/aio.c b/fs/aio.c
39464 index 969beb0..09fab51 100644
39465 --- a/fs/aio.c
39466 +++ b/fs/aio.c
39467 @@ -119,7 +119,7 @@ static int aio_setup_ring(struct kioctx *ctx)
39468 size += sizeof(struct io_event) * nr_events;
39469 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
39470
39471 - if (nr_pages < 0)
39472 + if (nr_pages <= 0)
39473 return -EINVAL;
39474
39475 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
39476 @@ -1461,22 +1461,27 @@ static ssize_t aio_fsync(struct kiocb *iocb)
39477 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
39478 {
39479 ssize_t ret;
39480 + struct iovec iovstack;
39481
39482 #ifdef CONFIG_COMPAT
39483 if (compat)
39484 ret = compat_rw_copy_check_uvector(type,
39485 (struct compat_iovec __user *)kiocb->ki_buf,
39486 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
39487 + kiocb->ki_nbytes, 1, &iovstack,
39488 &kiocb->ki_iovec, 1);
39489 else
39490 #endif
39491 ret = rw_copy_check_uvector(type,
39492 (struct iovec __user *)kiocb->ki_buf,
39493 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
39494 + kiocb->ki_nbytes, 1, &iovstack,
39495 &kiocb->ki_iovec, 1);
39496 if (ret < 0)
39497 goto out;
39498
39499 + if (kiocb->ki_iovec == &iovstack) {
39500 + kiocb->ki_inline_vec = iovstack;
39501 + kiocb->ki_iovec = &kiocb->ki_inline_vec;
39502 + }
39503 kiocb->ki_nr_segs = kiocb->ki_nbytes;
39504 kiocb->ki_cur_seg = 0;
39505 /* ki_nbytes/left now reflect bytes instead of segs */
39506 diff --git a/fs/attr.c b/fs/attr.c
39507 index 7ee7ba4..0c61a60 100644
39508 --- a/fs/attr.c
39509 +++ b/fs/attr.c
39510 @@ -99,6 +99,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
39511 unsigned long limit;
39512
39513 limit = rlimit(RLIMIT_FSIZE);
39514 + gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
39515 if (limit != RLIM_INFINITY && offset > limit)
39516 goto out_sig;
39517 if (offset > inode->i_sb->s_maxbytes)
39518 diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
39519 index e1fbdee..cd5ea56 100644
39520 --- a/fs/autofs4/waitq.c
39521 +++ b/fs/autofs4/waitq.c
39522 @@ -60,7 +60,7 @@ static int autofs4_write(struct file *file, const void *addr, int bytes)
39523 {
39524 unsigned long sigpipe, flags;
39525 mm_segment_t fs;
39526 - const char *data = (const char *)addr;
39527 + const char __user *data = (const char __force_user *)addr;
39528 ssize_t wr = 0;
39529
39530 /** WARNING: this is not safe for writing more than PIPE_BUF bytes! **/
39531 diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
39532 index 8342ca6..82fd192 100644
39533 --- a/fs/befs/linuxvfs.c
39534 +++ b/fs/befs/linuxvfs.c
39535 @@ -503,7 +503,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
39536 {
39537 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
39538 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
39539 - char *link = nd_get_link(nd);
39540 + const char *link = nd_get_link(nd);
39541 if (!IS_ERR(link))
39542 kfree(link);
39543 }
39544 diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
39545 index a6395bd..a5b24c4 100644
39546 --- a/fs/binfmt_aout.c
39547 +++ b/fs/binfmt_aout.c
39548 @@ -16,6 +16,7 @@
39549 #include <linux/string.h>
39550 #include <linux/fs.h>
39551 #include <linux/file.h>
39552 +#include <linux/security.h>
39553 #include <linux/stat.h>
39554 #include <linux/fcntl.h>
39555 #include <linux/ptrace.h>
39556 @@ -86,6 +87,8 @@ static int aout_core_dump(struct coredump_params *cprm)
39557 #endif
39558 # define START_STACK(u) ((void __user *)u.start_stack)
39559
39560 + memset(&dump, 0, sizeof(dump));
39561 +
39562 fs = get_fs();
39563 set_fs(KERNEL_DS);
39564 has_dumped = 1;
39565 @@ -97,10 +100,12 @@ static int aout_core_dump(struct coredump_params *cprm)
39566
39567 /* If the size of the dump file exceeds the rlimit, then see what would happen
39568 if we wrote the stack, but not the data area. */
39569 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
39570 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
39571 dump.u_dsize = 0;
39572
39573 /* Make sure we have enough room to write the stack and data areas. */
39574 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
39575 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
39576 dump.u_ssize = 0;
39577
39578 @@ -234,6 +239,8 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
39579 rlim = rlimit(RLIMIT_DATA);
39580 if (rlim >= RLIM_INFINITY)
39581 rlim = ~0;
39582 +
39583 + gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
39584 if (ex.a_data + ex.a_bss > rlim)
39585 return -ENOMEM;
39586
39587 @@ -262,6 +269,27 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
39588 install_exec_creds(bprm);
39589 current->flags &= ~PF_FORKNOEXEC;
39590
39591 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
39592 + current->mm->pax_flags = 0UL;
39593 +#endif
39594 +
39595 +#ifdef CONFIG_PAX_PAGEEXEC
39596 + if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
39597 + current->mm->pax_flags |= MF_PAX_PAGEEXEC;
39598 +
39599 +#ifdef CONFIG_PAX_EMUTRAMP
39600 + if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
39601 + current->mm->pax_flags |= MF_PAX_EMUTRAMP;
39602 +#endif
39603 +
39604 +#ifdef CONFIG_PAX_MPROTECT
39605 + if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
39606 + current->mm->pax_flags |= MF_PAX_MPROTECT;
39607 +#endif
39608 +
39609 + }
39610 +#endif
39611 +
39612 if (N_MAGIC(ex) == OMAGIC) {
39613 unsigned long text_addr, map_size;
39614 loff_t pos;
39615 @@ -334,7 +362,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
39616
39617 down_write(&current->mm->mmap_sem);
39618 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
39619 - PROT_READ | PROT_WRITE | PROT_EXEC,
39620 + PROT_READ | PROT_WRITE,
39621 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
39622 fd_offset + ex.a_text);
39623 up_write(&current->mm->mmap_sem);
39624 diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
39625 index 21ac5ee..31d14e9 100644
39626 --- a/fs/binfmt_elf.c
39627 +++ b/fs/binfmt_elf.c
39628 @@ -32,6 +32,7 @@
39629 #include <linux/elf.h>
39630 #include <linux/utsname.h>
39631 #include <linux/coredump.h>
39632 +#include <linux/xattr.h>
39633 #include <asm/uaccess.h>
39634 #include <asm/param.h>
39635 #include <asm/page.h>
39636 @@ -51,6 +52,10 @@ static int elf_core_dump(struct coredump_params *cprm);
39637 #define elf_core_dump NULL
39638 #endif
39639
39640 +#ifdef CONFIG_PAX_MPROTECT
39641 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
39642 +#endif
39643 +
39644 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
39645 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
39646 #else
39647 @@ -70,6 +75,11 @@ static struct linux_binfmt elf_format = {
39648 .load_binary = load_elf_binary,
39649 .load_shlib = load_elf_library,
39650 .core_dump = elf_core_dump,
39651 +
39652 +#ifdef CONFIG_PAX_MPROTECT
39653 + .handle_mprotect= elf_handle_mprotect,
39654 +#endif
39655 +
39656 .min_coredump = ELF_EXEC_PAGESIZE,
39657 };
39658
39659 @@ -77,6 +87,8 @@ static struct linux_binfmt elf_format = {
39660
39661 static int set_brk(unsigned long start, unsigned long end)
39662 {
39663 + unsigned long e = end;
39664 +
39665 start = ELF_PAGEALIGN(start);
39666 end = ELF_PAGEALIGN(end);
39667 if (end > start) {
39668 @@ -87,7 +99,7 @@ static int set_brk(unsigned long start, unsigned long end)
39669 if (BAD_ADDR(addr))
39670 return addr;
39671 }
39672 - current->mm->start_brk = current->mm->brk = end;
39673 + current->mm->start_brk = current->mm->brk = e;
39674 return 0;
39675 }
39676
39677 @@ -148,12 +160,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
39678 elf_addr_t __user *u_rand_bytes;
39679 const char *k_platform = ELF_PLATFORM;
39680 const char *k_base_platform = ELF_BASE_PLATFORM;
39681 - unsigned char k_rand_bytes[16];
39682 + u32 k_rand_bytes[4];
39683 int items;
39684 elf_addr_t *elf_info;
39685 int ei_index = 0;
39686 const struct cred *cred = current_cred();
39687 struct vm_area_struct *vma;
39688 + unsigned long saved_auxv[AT_VECTOR_SIZE];
39689
39690 /*
39691 * In some cases (e.g. Hyper-Threading), we want to avoid L1
39692 @@ -195,8 +208,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
39693 * Generate 16 random bytes for userspace PRNG seeding.
39694 */
39695 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
39696 - u_rand_bytes = (elf_addr_t __user *)
39697 - STACK_ALLOC(p, sizeof(k_rand_bytes));
39698 + srandom32(k_rand_bytes[0] ^ random32());
39699 + srandom32(k_rand_bytes[1] ^ random32());
39700 + srandom32(k_rand_bytes[2] ^ random32());
39701 + srandom32(k_rand_bytes[3] ^ random32());
39702 + p = STACK_ROUND(p, sizeof(k_rand_bytes));
39703 + u_rand_bytes = (elf_addr_t __user *) p;
39704 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
39705 return -EFAULT;
39706
39707 @@ -308,9 +325,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
39708 return -EFAULT;
39709 current->mm->env_end = p;
39710
39711 + memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
39712 +
39713 /* Put the elf_info on the stack in the right place. */
39714 sp = (elf_addr_t __user *)envp + 1;
39715 - if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
39716 + if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
39717 return -EFAULT;
39718 return 0;
39719 }
39720 @@ -381,10 +400,10 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
39721 {
39722 struct elf_phdr *elf_phdata;
39723 struct elf_phdr *eppnt;
39724 - unsigned long load_addr = 0;
39725 + unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
39726 int load_addr_set = 0;
39727 unsigned long last_bss = 0, elf_bss = 0;
39728 - unsigned long error = ~0UL;
39729 + unsigned long error = -EINVAL;
39730 unsigned long total_size;
39731 int retval, i, size;
39732
39733 @@ -430,6 +449,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
39734 goto out_close;
39735 }
39736
39737 +#ifdef CONFIG_PAX_SEGMEXEC
39738 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
39739 + pax_task_size = SEGMEXEC_TASK_SIZE;
39740 +#endif
39741 +
39742 eppnt = elf_phdata;
39743 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
39744 if (eppnt->p_type == PT_LOAD) {
39745 @@ -473,8 +497,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
39746 k = load_addr + eppnt->p_vaddr;
39747 if (BAD_ADDR(k) ||
39748 eppnt->p_filesz > eppnt->p_memsz ||
39749 - eppnt->p_memsz > TASK_SIZE ||
39750 - TASK_SIZE - eppnt->p_memsz < k) {
39751 + eppnt->p_memsz > pax_task_size ||
39752 + pax_task_size - eppnt->p_memsz < k) {
39753 error = -ENOMEM;
39754 goto out_close;
39755 }
39756 @@ -528,6 +552,351 @@ out:
39757 return error;
39758 }
39759
39760 +static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
39761 +{
39762 + unsigned long pax_flags = 0UL;
39763 +
39764 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
39765 +
39766 +#ifdef CONFIG_PAX_PAGEEXEC
39767 + if (elf_phdata->p_flags & PF_PAGEEXEC)
39768 + pax_flags |= MF_PAX_PAGEEXEC;
39769 +#endif
39770 +
39771 +#ifdef CONFIG_PAX_SEGMEXEC
39772 + if (elf_phdata->p_flags & PF_SEGMEXEC)
39773 + pax_flags |= MF_PAX_SEGMEXEC;
39774 +#endif
39775 +
39776 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39777 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39778 + if ((__supported_pte_mask & _PAGE_NX))
39779 + pax_flags &= ~MF_PAX_SEGMEXEC;
39780 + else
39781 + pax_flags &= ~MF_PAX_PAGEEXEC;
39782 + }
39783 +#endif
39784 +
39785 +#ifdef CONFIG_PAX_EMUTRAMP
39786 + if (elf_phdata->p_flags & PF_EMUTRAMP)
39787 + pax_flags |= MF_PAX_EMUTRAMP;
39788 +#endif
39789 +
39790 +#ifdef CONFIG_PAX_MPROTECT
39791 + if (elf_phdata->p_flags & PF_MPROTECT)
39792 + pax_flags |= MF_PAX_MPROTECT;
39793 +#endif
39794 +
39795 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
39796 + if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
39797 + pax_flags |= MF_PAX_RANDMMAP;
39798 +#endif
39799 +
39800 +#endif
39801 +
39802 + return pax_flags;
39803 +}
39804 +
39805 +static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
39806 +{
39807 + unsigned long pax_flags = 0UL;
39808 +
39809 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
39810 +
39811 +#ifdef CONFIG_PAX_PAGEEXEC
39812 + if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
39813 + pax_flags |= MF_PAX_PAGEEXEC;
39814 +#endif
39815 +
39816 +#ifdef CONFIG_PAX_SEGMEXEC
39817 + if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
39818 + pax_flags |= MF_PAX_SEGMEXEC;
39819 +#endif
39820 +
39821 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39822 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39823 + if ((__supported_pte_mask & _PAGE_NX))
39824 + pax_flags &= ~MF_PAX_SEGMEXEC;
39825 + else
39826 + pax_flags &= ~MF_PAX_PAGEEXEC;
39827 + }
39828 +#endif
39829 +
39830 +#ifdef CONFIG_PAX_EMUTRAMP
39831 + if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
39832 + pax_flags |= MF_PAX_EMUTRAMP;
39833 +#endif
39834 +
39835 +#ifdef CONFIG_PAX_MPROTECT
39836 + if (!(elf_phdata->p_flags & PF_NOMPROTECT))
39837 + pax_flags |= MF_PAX_MPROTECT;
39838 +#endif
39839 +
39840 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
39841 + if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
39842 + pax_flags |= MF_PAX_RANDMMAP;
39843 +#endif
39844 +
39845 +#endif
39846 +
39847 + return pax_flags;
39848 +}
39849 +
39850 +static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
39851 +{
39852 + unsigned long pax_flags = 0UL;
39853 +
39854 +#ifdef CONFIG_PAX_EI_PAX
39855 +
39856 +#ifdef CONFIG_PAX_PAGEEXEC
39857 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
39858 + pax_flags |= MF_PAX_PAGEEXEC;
39859 +#endif
39860 +
39861 +#ifdef CONFIG_PAX_SEGMEXEC
39862 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
39863 + pax_flags |= MF_PAX_SEGMEXEC;
39864 +#endif
39865 +
39866 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39867 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39868 + if ((__supported_pte_mask & _PAGE_NX))
39869 + pax_flags &= ~MF_PAX_SEGMEXEC;
39870 + else
39871 + pax_flags &= ~MF_PAX_PAGEEXEC;
39872 + }
39873 +#endif
39874 +
39875 +#ifdef CONFIG_PAX_EMUTRAMP
39876 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
39877 + pax_flags |= MF_PAX_EMUTRAMP;
39878 +#endif
39879 +
39880 +#ifdef CONFIG_PAX_MPROTECT
39881 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
39882 + pax_flags |= MF_PAX_MPROTECT;
39883 +#endif
39884 +
39885 +#ifdef CONFIG_PAX_ASLR
39886 + if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
39887 + pax_flags |= MF_PAX_RANDMMAP;
39888 +#endif
39889 +
39890 +#else
39891 +
39892 +#ifdef CONFIG_PAX_PAGEEXEC
39893 + pax_flags |= MF_PAX_PAGEEXEC;
39894 +#endif
39895 +
39896 +#ifdef CONFIG_PAX_MPROTECT
39897 + pax_flags |= MF_PAX_MPROTECT;
39898 +#endif
39899 +
39900 +#ifdef CONFIG_PAX_RANDMMAP
39901 + pax_flags |= MF_PAX_RANDMMAP;
39902 +#endif
39903 +
39904 +#ifdef CONFIG_PAX_SEGMEXEC
39905 + if (!(__supported_pte_mask & _PAGE_NX)) {
39906 + pax_flags &= ~MF_PAX_PAGEEXEC;
39907 + pax_flags |= MF_PAX_SEGMEXEC;
39908 + }
39909 +#endif
39910 +
39911 +#endif
39912 +
39913 + return pax_flags;
39914 +}
39915 +
39916 +static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
39917 +{
39918 +
39919 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
39920 + unsigned long i;
39921 +
39922 + for (i = 0UL; i < elf_ex->e_phnum; i++)
39923 + if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
39924 + if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
39925 + ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
39926 + ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
39927 + ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
39928 + ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
39929 + return ~0UL;
39930 +
39931 +#ifdef CONFIG_PAX_SOFTMODE
39932 + if (pax_softmode)
39933 + return pax_parse_pt_pax_softmode(&elf_phdata[i]);
39934 + else
39935 +#endif
39936 +
39937 + return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
39938 + break;
39939 + }
39940 +#endif
39941 +
39942 + return ~0UL;
39943 +}
39944 +
39945 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
39946 +static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
39947 +{
39948 + unsigned long pax_flags = 0UL;
39949 +
39950 +#ifdef CONFIG_PAX_PAGEEXEC
39951 + if (pax_flags_softmode & MF_PAX_PAGEEXEC)
39952 + pax_flags |= MF_PAX_PAGEEXEC;
39953 +#endif
39954 +
39955 +#ifdef CONFIG_PAX_SEGMEXEC
39956 + if (pax_flags_softmode & MF_PAX_SEGMEXEC)
39957 + pax_flags |= MF_PAX_SEGMEXEC;
39958 +#endif
39959 +
39960 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39961 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39962 + if ((__supported_pte_mask & _PAGE_NX))
39963 + pax_flags &= ~MF_PAX_SEGMEXEC;
39964 + else
39965 + pax_flags &= ~MF_PAX_PAGEEXEC;
39966 + }
39967 +#endif
39968 +
39969 +#ifdef CONFIG_PAX_EMUTRAMP
39970 + if (pax_flags_softmode & MF_PAX_EMUTRAMP)
39971 + pax_flags |= MF_PAX_EMUTRAMP;
39972 +#endif
39973 +
39974 +#ifdef CONFIG_PAX_MPROTECT
39975 + if (pax_flags_softmode & MF_PAX_MPROTECT)
39976 + pax_flags |= MF_PAX_MPROTECT;
39977 +#endif
39978 +
39979 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
39980 + if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
39981 + pax_flags |= MF_PAX_RANDMMAP;
39982 +#endif
39983 +
39984 + return pax_flags;
39985 +}
39986 +
39987 +static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
39988 +{
39989 + unsigned long pax_flags = 0UL;
39990 +
39991 +#ifdef CONFIG_PAX_PAGEEXEC
39992 + if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
39993 + pax_flags |= MF_PAX_PAGEEXEC;
39994 +#endif
39995 +
39996 +#ifdef CONFIG_PAX_SEGMEXEC
39997 + if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
39998 + pax_flags |= MF_PAX_SEGMEXEC;
39999 +#endif
40000 +
40001 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
40002 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40003 + if ((__supported_pte_mask & _PAGE_NX))
40004 + pax_flags &= ~MF_PAX_SEGMEXEC;
40005 + else
40006 + pax_flags &= ~MF_PAX_PAGEEXEC;
40007 + }
40008 +#endif
40009 +
40010 +#ifdef CONFIG_PAX_EMUTRAMP
40011 + if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
40012 + pax_flags |= MF_PAX_EMUTRAMP;
40013 +#endif
40014 +
40015 +#ifdef CONFIG_PAX_MPROTECT
40016 + if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
40017 + pax_flags |= MF_PAX_MPROTECT;
40018 +#endif
40019 +
40020 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
40021 + if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
40022 + pax_flags |= MF_PAX_RANDMMAP;
40023 +#endif
40024 +
40025 + return pax_flags;
40026 +}
40027 +#endif
40028 +
40029 +static unsigned long pax_parse_xattr_pax(struct file * const file)
40030 +{
40031 +
40032 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
40033 + ssize_t xattr_size, i;
40034 + unsigned char xattr_value[5];
40035 + unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
40036 +
40037 + xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
40038 + if (xattr_size <= 0)
40039 + return ~0UL;
40040 +
40041 + for (i = 0; i < xattr_size; i++)
40042 + switch (xattr_value[i]) {
40043 + default:
40044 + return ~0UL;
40045 +
40046 +#define parse_flag(option1, option2, flag) \
40047 + case option1: \
40048 + pax_flags_hardmode |= MF_PAX_##flag; \
40049 + break; \
40050 + case option2: \
40051 + pax_flags_softmode |= MF_PAX_##flag; \
40052 + break;
40053 +
40054 + parse_flag('p', 'P', PAGEEXEC);
40055 + parse_flag('e', 'E', EMUTRAMP);
40056 + parse_flag('m', 'M', MPROTECT);
40057 + parse_flag('r', 'R', RANDMMAP);
40058 + parse_flag('s', 'S', SEGMEXEC);
40059 +
40060 +#undef parse_flag
40061 + }
40062 +
40063 + if (pax_flags_hardmode & pax_flags_softmode)
40064 + return ~0UL;
40065 +
40066 +#ifdef CONFIG_PAX_SOFTMODE
40067 + if (pax_softmode)
40068 + return pax_parse_xattr_pax_softmode(pax_flags_softmode);
40069 + else
40070 +#endif
40071 +
40072 + return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
40073 +#else
40074 + return ~0UL;
40075 +#endif
40076 +
40077 +}
40078 +
40079 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
40080 +static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
40081 +{
40082 + unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
40083 +
40084 + pax_flags = pax_parse_ei_pax(elf_ex);
40085 + pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
40086 + xattr_pax_flags = pax_parse_xattr_pax(file);
40087 +
40088 + if (pt_pax_flags == ~0UL)
40089 + pt_pax_flags = xattr_pax_flags;
40090 + else if (xattr_pax_flags == ~0UL)
40091 + xattr_pax_flags = pt_pax_flags;
40092 + if (pt_pax_flags != xattr_pax_flags)
40093 + return -EINVAL;
40094 + if (pt_pax_flags != ~0UL)
40095 + pax_flags = pt_pax_flags;
40096 +
40097 + if (0 > pax_check_flags(&pax_flags))
40098 + return -EINVAL;
40099 +
40100 + current->mm->pax_flags = pax_flags;
40101 + return 0;
40102 +}
40103 +#endif
40104 +
40105 /*
40106 * These are the functions used to load ELF style executables and shared
40107 * libraries. There is no binary dependent code anywhere else.
40108 @@ -544,6 +913,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
40109 {
40110 unsigned int random_variable = 0;
40111
40112 +#ifdef CONFIG_PAX_RANDUSTACK
40113 + if (randomize_va_space)
40114 + return stack_top - current->mm->delta_stack;
40115 +#endif
40116 +
40117 if ((current->flags & PF_RANDOMIZE) &&
40118 !(current->personality & ADDR_NO_RANDOMIZE)) {
40119 random_variable = get_random_int() & STACK_RND_MASK;
40120 @@ -562,7 +936,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40121 unsigned long load_addr = 0, load_bias = 0;
40122 int load_addr_set = 0;
40123 char * elf_interpreter = NULL;
40124 - unsigned long error;
40125 + unsigned long error = 0;
40126 struct elf_phdr *elf_ppnt, *elf_phdata;
40127 unsigned long elf_bss, elf_brk;
40128 int retval, i;
40129 @@ -572,11 +946,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40130 unsigned long start_code, end_code, start_data, end_data;
40131 unsigned long reloc_func_desc __maybe_unused = 0;
40132 int executable_stack = EXSTACK_DEFAULT;
40133 - unsigned long def_flags = 0;
40134 struct {
40135 struct elfhdr elf_ex;
40136 struct elfhdr interp_elf_ex;
40137 } *loc;
40138 + unsigned long pax_task_size = TASK_SIZE;
40139
40140 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
40141 if (!loc) {
40142 @@ -713,11 +1087,81 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40143
40144 /* OK, This is the point of no return */
40145 current->flags &= ~PF_FORKNOEXEC;
40146 - current->mm->def_flags = def_flags;
40147 +
40148 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
40149 + current->mm->pax_flags = 0UL;
40150 +#endif
40151 +
40152 +#ifdef CONFIG_PAX_DLRESOLVE
40153 + current->mm->call_dl_resolve = 0UL;
40154 +#endif
40155 +
40156 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
40157 + current->mm->call_syscall = 0UL;
40158 +#endif
40159 +
40160 +#ifdef CONFIG_PAX_ASLR
40161 + current->mm->delta_mmap = 0UL;
40162 + current->mm->delta_stack = 0UL;
40163 +#endif
40164 +
40165 + current->mm->def_flags = 0;
40166 +
40167 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
40168 + if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
40169 + send_sig(SIGKILL, current, 0);
40170 + goto out_free_dentry;
40171 + }
40172 +#endif
40173 +
40174 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
40175 + pax_set_initial_flags(bprm);
40176 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
40177 + if (pax_set_initial_flags_func)
40178 + (pax_set_initial_flags_func)(bprm);
40179 +#endif
40180 +
40181 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
40182 + if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
40183 + current->mm->context.user_cs_limit = PAGE_SIZE;
40184 + current->mm->def_flags |= VM_PAGEEXEC;
40185 + }
40186 +#endif
40187 +
40188 +#ifdef CONFIG_PAX_SEGMEXEC
40189 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
40190 + current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
40191 + current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
40192 + pax_task_size = SEGMEXEC_TASK_SIZE;
40193 + current->mm->def_flags |= VM_NOHUGEPAGE;
40194 + }
40195 +#endif
40196 +
40197 +#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
40198 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40199 + set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
40200 + put_cpu();
40201 + }
40202 +#endif
40203
40204 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
40205 may depend on the personality. */
40206 SET_PERSONALITY(loc->elf_ex);
40207 +
40208 +#ifdef CONFIG_PAX_ASLR
40209 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
40210 + current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
40211 + current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
40212 + }
40213 +#endif
40214 +
40215 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
40216 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40217 + executable_stack = EXSTACK_DISABLE_X;
40218 + current->personality &= ~READ_IMPLIES_EXEC;
40219 + } else
40220 +#endif
40221 +
40222 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
40223 current->personality |= READ_IMPLIES_EXEC;
40224
40225 @@ -808,6 +1252,20 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40226 #else
40227 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
40228 #endif
40229 +
40230 +#ifdef CONFIG_PAX_RANDMMAP
40231 + /* PaX: randomize base address at the default exe base if requested */
40232 + if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
40233 +#ifdef CONFIG_SPARC64
40234 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
40235 +#else
40236 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
40237 +#endif
40238 + load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
40239 + elf_flags |= MAP_FIXED;
40240 + }
40241 +#endif
40242 +
40243 }
40244
40245 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
40246 @@ -840,9 +1298,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40247 * allowed task size. Note that p_filesz must always be
40248 * <= p_memsz so it is only necessary to check p_memsz.
40249 */
40250 - if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
40251 - elf_ppnt->p_memsz > TASK_SIZE ||
40252 - TASK_SIZE - elf_ppnt->p_memsz < k) {
40253 + if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
40254 + elf_ppnt->p_memsz > pax_task_size ||
40255 + pax_task_size - elf_ppnt->p_memsz < k) {
40256 /* set_brk can never work. Avoid overflows. */
40257 send_sig(SIGKILL, current, 0);
40258 retval = -EINVAL;
40259 @@ -870,6 +1328,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40260 start_data += load_bias;
40261 end_data += load_bias;
40262
40263 +#ifdef CONFIG_PAX_RANDMMAP
40264 + if (current->mm->pax_flags & MF_PAX_RANDMMAP)
40265 + elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
40266 +#endif
40267 +
40268 /* Calling set_brk effectively mmaps the pages that we need
40269 * for the bss and break sections. We must do this before
40270 * mapping in the interpreter, to make sure it doesn't wind
40271 @@ -881,9 +1344,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40272 goto out_free_dentry;
40273 }
40274 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
40275 - send_sig(SIGSEGV, current, 0);
40276 - retval = -EFAULT; /* Nobody gets to see this, but.. */
40277 - goto out_free_dentry;
40278 + /*
40279 + * This bss-zeroing can fail if the ELF
40280 + * file specifies odd protections. So
40281 + * we don't check the return value
40282 + */
40283 }
40284
40285 if (elf_interpreter) {
40286 @@ -1098,7 +1563,7 @@ out:
40287 * Decide what to dump of a segment, part, all or none.
40288 */
40289 static unsigned long vma_dump_size(struct vm_area_struct *vma,
40290 - unsigned long mm_flags)
40291 + unsigned long mm_flags, long signr)
40292 {
40293 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
40294
40295 @@ -1132,7 +1597,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
40296 if (vma->vm_file == NULL)
40297 return 0;
40298
40299 - if (FILTER(MAPPED_PRIVATE))
40300 + if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
40301 goto whole;
40302
40303 /*
40304 @@ -1354,9 +1819,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
40305 {
40306 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
40307 int i = 0;
40308 - do
40309 + do {
40310 i += 2;
40311 - while (auxv[i - 2] != AT_NULL);
40312 + } while (auxv[i - 2] != AT_NULL);
40313 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
40314 }
40315
40316 @@ -1862,14 +2327,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
40317 }
40318
40319 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
40320 - unsigned long mm_flags)
40321 + struct coredump_params *cprm)
40322 {
40323 struct vm_area_struct *vma;
40324 size_t size = 0;
40325
40326 for (vma = first_vma(current, gate_vma); vma != NULL;
40327 vma = next_vma(vma, gate_vma))
40328 - size += vma_dump_size(vma, mm_flags);
40329 + size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
40330 return size;
40331 }
40332
40333 @@ -1963,7 +2428,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40334
40335 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
40336
40337 - offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
40338 + offset += elf_core_vma_data_size(gate_vma, cprm);
40339 offset += elf_core_extra_data_size();
40340 e_shoff = offset;
40341
40342 @@ -1977,10 +2442,12 @@ static int elf_core_dump(struct coredump_params *cprm)
40343 offset = dataoff;
40344
40345 size += sizeof(*elf);
40346 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
40347 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
40348 goto end_coredump;
40349
40350 size += sizeof(*phdr4note);
40351 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
40352 if (size > cprm->limit
40353 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
40354 goto end_coredump;
40355 @@ -1994,7 +2461,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40356 phdr.p_offset = offset;
40357 phdr.p_vaddr = vma->vm_start;
40358 phdr.p_paddr = 0;
40359 - phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
40360 + phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
40361 phdr.p_memsz = vma->vm_end - vma->vm_start;
40362 offset += phdr.p_filesz;
40363 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
40364 @@ -2005,6 +2472,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40365 phdr.p_align = ELF_EXEC_PAGESIZE;
40366
40367 size += sizeof(phdr);
40368 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
40369 if (size > cprm->limit
40370 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
40371 goto end_coredump;
40372 @@ -2029,7 +2497,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40373 unsigned long addr;
40374 unsigned long end;
40375
40376 - end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
40377 + end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
40378
40379 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
40380 struct page *page;
40381 @@ -2038,6 +2506,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40382 page = get_dump_page(addr);
40383 if (page) {
40384 void *kaddr = kmap(page);
40385 + gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
40386 stop = ((size += PAGE_SIZE) > cprm->limit) ||
40387 !dump_write(cprm->file, kaddr,
40388 PAGE_SIZE);
40389 @@ -2055,6 +2524,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40390
40391 if (e_phnum == PN_XNUM) {
40392 size += sizeof(*shdr4extnum);
40393 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
40394 if (size > cprm->limit
40395 || !dump_write(cprm->file, shdr4extnum,
40396 sizeof(*shdr4extnum)))
40397 @@ -2075,6 +2545,97 @@ out:
40398
40399 #endif /* CONFIG_ELF_CORE */
40400
40401 +#ifdef CONFIG_PAX_MPROTECT
40402 +/* PaX: non-PIC ELF libraries need relocations on their executable segments
40403 + * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
40404 + * we'll remove VM_MAYWRITE for good on RELRO segments.
40405 + *
40406 + * The checks favour ld-linux.so behaviour which operates on a per ELF segment
40407 + * basis because we want to allow the common case and not the special ones.
40408 + */
40409 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
40410 +{
40411 + struct elfhdr elf_h;
40412 + struct elf_phdr elf_p;
40413 + unsigned long i;
40414 + unsigned long oldflags;
40415 + bool is_textrel_rw, is_textrel_rx, is_relro;
40416 +
40417 + if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
40418 + return;
40419 +
40420 + oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
40421 + newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
40422 +
40423 +#ifdef CONFIG_PAX_ELFRELOCS
40424 + /* possible TEXTREL */
40425 + is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
40426 + is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
40427 +#else
40428 + is_textrel_rw = false;
40429 + is_textrel_rx = false;
40430 +#endif
40431 +
40432 + /* possible RELRO */
40433 + is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
40434 +
40435 + if (!is_textrel_rw && !is_textrel_rx && !is_relro)
40436 + return;
40437 +
40438 + if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
40439 + memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
40440 +
40441 +#ifdef CONFIG_PAX_ETEXECRELOCS
40442 + ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
40443 +#else
40444 + ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
40445 +#endif
40446 +
40447 + (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
40448 + !elf_check_arch(&elf_h) ||
40449 + elf_h.e_phentsize != sizeof(struct elf_phdr) ||
40450 + elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
40451 + return;
40452 +
40453 + for (i = 0UL; i < elf_h.e_phnum; i++) {
40454 + if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
40455 + return;
40456 + switch (elf_p.p_type) {
40457 + case PT_DYNAMIC:
40458 + if (!is_textrel_rw && !is_textrel_rx)
40459 + continue;
40460 + i = 0UL;
40461 + while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
40462 + elf_dyn dyn;
40463 +
40464 + if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
40465 + return;
40466 + if (dyn.d_tag == DT_NULL)
40467 + return;
40468 + if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
40469 + gr_log_textrel(vma);
40470 + if (is_textrel_rw)
40471 + vma->vm_flags |= VM_MAYWRITE;
40472 + else
40473 + /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
40474 + vma->vm_flags &= ~VM_MAYWRITE;
40475 + return;
40476 + }
40477 + i++;
40478 + }
40479 + return;
40480 +
40481 + case PT_GNU_RELRO:
40482 + if (!is_relro)
40483 + continue;
40484 + if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
40485 + vma->vm_flags &= ~VM_MAYWRITE;
40486 + return;
40487 + }
40488 + }
40489 +}
40490 +#endif
40491 +
40492 static int __init init_elf_binfmt(void)
40493 {
40494 return register_binfmt(&elf_format);
40495 diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
40496 index 1bffbe0..c8c283e 100644
40497 --- a/fs/binfmt_flat.c
40498 +++ b/fs/binfmt_flat.c
40499 @@ -567,7 +567,9 @@ static int load_flat_file(struct linux_binprm * bprm,
40500 realdatastart = (unsigned long) -ENOMEM;
40501 printk("Unable to allocate RAM for process data, errno %d\n",
40502 (int)-realdatastart);
40503 + down_write(&current->mm->mmap_sem);
40504 do_munmap(current->mm, textpos, text_len);
40505 + up_write(&current->mm->mmap_sem);
40506 ret = realdatastart;
40507 goto err;
40508 }
40509 @@ -591,8 +593,10 @@ static int load_flat_file(struct linux_binprm * bprm,
40510 }
40511 if (IS_ERR_VALUE(result)) {
40512 printk("Unable to read data+bss, errno %d\n", (int)-result);
40513 + down_write(&current->mm->mmap_sem);
40514 do_munmap(current->mm, textpos, text_len);
40515 do_munmap(current->mm, realdatastart, len);
40516 + up_write(&current->mm->mmap_sem);
40517 ret = result;
40518 goto err;
40519 }
40520 @@ -661,8 +665,10 @@ static int load_flat_file(struct linux_binprm * bprm,
40521 }
40522 if (IS_ERR_VALUE(result)) {
40523 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
40524 + down_write(&current->mm->mmap_sem);
40525 do_munmap(current->mm, textpos, text_len + data_len + extra +
40526 MAX_SHARED_LIBS * sizeof(unsigned long));
40527 + up_write(&current->mm->mmap_sem);
40528 ret = result;
40529 goto err;
40530 }
40531 diff --git a/fs/bio.c b/fs/bio.c
40532 index b1fe82c..84da0a9 100644
40533 --- a/fs/bio.c
40534 +++ b/fs/bio.c
40535 @@ -1233,7 +1233,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
40536 const int read = bio_data_dir(bio) == READ;
40537 struct bio_map_data *bmd = bio->bi_private;
40538 int i;
40539 - char *p = bmd->sgvecs[0].iov_base;
40540 + char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
40541
40542 __bio_for_each_segment(bvec, bio, i, 0) {
40543 char *addr = page_address(bvec->bv_page);
40544 diff --git a/fs/block_dev.c b/fs/block_dev.c
40545 index b07f1da..9efcb92 100644
40546 --- a/fs/block_dev.c
40547 +++ b/fs/block_dev.c
40548 @@ -681,7 +681,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
40549 else if (bdev->bd_contains == bdev)
40550 return true; /* is a whole device which isn't held */
40551
40552 - else if (whole->bd_holder == bd_may_claim)
40553 + else if (whole->bd_holder == (void *)bd_may_claim)
40554 return true; /* is a partition of a device that is being partitioned */
40555 else if (whole->bd_holder != NULL)
40556 return false; /* is a partition of a held device */
40557 diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
40558 index dede441..f2a2507 100644
40559 --- a/fs/btrfs/ctree.c
40560 +++ b/fs/btrfs/ctree.c
40561 @@ -488,9 +488,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
40562 free_extent_buffer(buf);
40563 add_root_to_dirty_list(root);
40564 } else {
40565 - if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
40566 - parent_start = parent->start;
40567 - else
40568 + if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
40569 + if (parent)
40570 + parent_start = parent->start;
40571 + else
40572 + parent_start = 0;
40573 + } else
40574 parent_start = 0;
40575
40576 WARN_ON(trans->transid != btrfs_header_generation(parent));
40577 diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
40578 index fd1a06d..6e9033d 100644
40579 --- a/fs/btrfs/inode.c
40580 +++ b/fs/btrfs/inode.c
40581 @@ -6895,7 +6895,7 @@ fail:
40582 return -ENOMEM;
40583 }
40584
40585 -static int btrfs_getattr(struct vfsmount *mnt,
40586 +int btrfs_getattr(struct vfsmount *mnt,
40587 struct dentry *dentry, struct kstat *stat)
40588 {
40589 struct inode *inode = dentry->d_inode;
40590 @@ -6909,6 +6909,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
40591 return 0;
40592 }
40593
40594 +EXPORT_SYMBOL(btrfs_getattr);
40595 +
40596 +dev_t get_btrfs_dev_from_inode(struct inode *inode)
40597 +{
40598 + return BTRFS_I(inode)->root->anon_dev;
40599 +}
40600 +EXPORT_SYMBOL(get_btrfs_dev_from_inode);
40601 +
40602 /*
40603 * If a file is moved, it will inherit the cow and compression flags of the new
40604 * directory.
40605 diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
40606 index c04f02c..f5c9e2e 100644
40607 --- a/fs/btrfs/ioctl.c
40608 +++ b/fs/btrfs/ioctl.c
40609 @@ -2733,9 +2733,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
40610 for (i = 0; i < num_types; i++) {
40611 struct btrfs_space_info *tmp;
40612
40613 + /* Don't copy in more than we allocated */
40614 if (!slot_count)
40615 break;
40616
40617 + slot_count--;
40618 +
40619 info = NULL;
40620 rcu_read_lock();
40621 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
40622 @@ -2757,15 +2760,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
40623 memcpy(dest, &space, sizeof(space));
40624 dest++;
40625 space_args.total_spaces++;
40626 - slot_count--;
40627 }
40628 - if (!slot_count)
40629 - break;
40630 }
40631 up_read(&info->groups_sem);
40632 }
40633
40634 - user_dest = (struct btrfs_ioctl_space_info *)
40635 + user_dest = (struct btrfs_ioctl_space_info __user *)
40636 (arg + sizeof(struct btrfs_ioctl_space_args));
40637
40638 if (copy_to_user(user_dest, dest_orig, alloc_size))
40639 diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
40640 index cfb5543..1ae7347 100644
40641 --- a/fs/btrfs/relocation.c
40642 +++ b/fs/btrfs/relocation.c
40643 @@ -1244,7 +1244,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
40644 }
40645 spin_unlock(&rc->reloc_root_tree.lock);
40646
40647 - BUG_ON((struct btrfs_root *)node->data != root);
40648 + BUG_ON(!node || (struct btrfs_root *)node->data != root);
40649
40650 if (!del) {
40651 spin_lock(&rc->reloc_root_tree.lock);
40652 diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
40653 index 622f469..e8d2d55 100644
40654 --- a/fs/cachefiles/bind.c
40655 +++ b/fs/cachefiles/bind.c
40656 @@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
40657 args);
40658
40659 /* start by checking things over */
40660 - ASSERT(cache->fstop_percent >= 0 &&
40661 - cache->fstop_percent < cache->fcull_percent &&
40662 + ASSERT(cache->fstop_percent < cache->fcull_percent &&
40663 cache->fcull_percent < cache->frun_percent &&
40664 cache->frun_percent < 100);
40665
40666 - ASSERT(cache->bstop_percent >= 0 &&
40667 - cache->bstop_percent < cache->bcull_percent &&
40668 + ASSERT(cache->bstop_percent < cache->bcull_percent &&
40669 cache->bcull_percent < cache->brun_percent &&
40670 cache->brun_percent < 100);
40671
40672 diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
40673 index 0a1467b..6a53245 100644
40674 --- a/fs/cachefiles/daemon.c
40675 +++ b/fs/cachefiles/daemon.c
40676 @@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
40677 if (n > buflen)
40678 return -EMSGSIZE;
40679
40680 - if (copy_to_user(_buffer, buffer, n) != 0)
40681 + if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
40682 return -EFAULT;
40683
40684 return n;
40685 @@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
40686 if (test_bit(CACHEFILES_DEAD, &cache->flags))
40687 return -EIO;
40688
40689 - if (datalen < 0 || datalen > PAGE_SIZE - 1)
40690 + if (datalen > PAGE_SIZE - 1)
40691 return -EOPNOTSUPP;
40692
40693 /* drag the command string into the kernel so we can parse it */
40694 @@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
40695 if (args[0] != '%' || args[1] != '\0')
40696 return -EINVAL;
40697
40698 - if (fstop < 0 || fstop >= cache->fcull_percent)
40699 + if (fstop >= cache->fcull_percent)
40700 return cachefiles_daemon_range_error(cache, args);
40701
40702 cache->fstop_percent = fstop;
40703 @@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
40704 if (args[0] != '%' || args[1] != '\0')
40705 return -EINVAL;
40706
40707 - if (bstop < 0 || bstop >= cache->bcull_percent)
40708 + if (bstop >= cache->bcull_percent)
40709 return cachefiles_daemon_range_error(cache, args);
40710
40711 cache->bstop_percent = bstop;
40712 diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
40713 index bd6bc1b..b627b53 100644
40714 --- a/fs/cachefiles/internal.h
40715 +++ b/fs/cachefiles/internal.h
40716 @@ -57,7 +57,7 @@ struct cachefiles_cache {
40717 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
40718 struct rb_root active_nodes; /* active nodes (can't be culled) */
40719 rwlock_t active_lock; /* lock for active_nodes */
40720 - atomic_t gravecounter; /* graveyard uniquifier */
40721 + atomic_unchecked_t gravecounter; /* graveyard uniquifier */
40722 unsigned frun_percent; /* when to stop culling (% files) */
40723 unsigned fcull_percent; /* when to start culling (% files) */
40724 unsigned fstop_percent; /* when to stop allocating (% files) */
40725 @@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
40726 * proc.c
40727 */
40728 #ifdef CONFIG_CACHEFILES_HISTOGRAM
40729 -extern atomic_t cachefiles_lookup_histogram[HZ];
40730 -extern atomic_t cachefiles_mkdir_histogram[HZ];
40731 -extern atomic_t cachefiles_create_histogram[HZ];
40732 +extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
40733 +extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
40734 +extern atomic_unchecked_t cachefiles_create_histogram[HZ];
40735
40736 extern int __init cachefiles_proc_init(void);
40737 extern void cachefiles_proc_cleanup(void);
40738 static inline
40739 -void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
40740 +void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
40741 {
40742 unsigned long jif = jiffies - start_jif;
40743 if (jif >= HZ)
40744 jif = HZ - 1;
40745 - atomic_inc(&histogram[jif]);
40746 + atomic_inc_unchecked(&histogram[jif]);
40747 }
40748
40749 #else
40750 diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
40751 index a0358c2..d6137f2 100644
40752 --- a/fs/cachefiles/namei.c
40753 +++ b/fs/cachefiles/namei.c
40754 @@ -318,7 +318,7 @@ try_again:
40755 /* first step is to make up a grave dentry in the graveyard */
40756 sprintf(nbuffer, "%08x%08x",
40757 (uint32_t) get_seconds(),
40758 - (uint32_t) atomic_inc_return(&cache->gravecounter));
40759 + (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
40760
40761 /* do the multiway lock magic */
40762 trap = lock_rename(cache->graveyard, dir);
40763 diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
40764 index eccd339..4c1d995 100644
40765 --- a/fs/cachefiles/proc.c
40766 +++ b/fs/cachefiles/proc.c
40767 @@ -14,9 +14,9 @@
40768 #include <linux/seq_file.h>
40769 #include "internal.h"
40770
40771 -atomic_t cachefiles_lookup_histogram[HZ];
40772 -atomic_t cachefiles_mkdir_histogram[HZ];
40773 -atomic_t cachefiles_create_histogram[HZ];
40774 +atomic_unchecked_t cachefiles_lookup_histogram[HZ];
40775 +atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
40776 +atomic_unchecked_t cachefiles_create_histogram[HZ];
40777
40778 /*
40779 * display the latency histogram
40780 @@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
40781 return 0;
40782 default:
40783 index = (unsigned long) v - 3;
40784 - x = atomic_read(&cachefiles_lookup_histogram[index]);
40785 - y = atomic_read(&cachefiles_mkdir_histogram[index]);
40786 - z = atomic_read(&cachefiles_create_histogram[index]);
40787 + x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
40788 + y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
40789 + z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
40790 if (x == 0 && y == 0 && z == 0)
40791 return 0;
40792
40793 diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
40794 index 0e3c092..818480e 100644
40795 --- a/fs/cachefiles/rdwr.c
40796 +++ b/fs/cachefiles/rdwr.c
40797 @@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
40798 old_fs = get_fs();
40799 set_fs(KERNEL_DS);
40800 ret = file->f_op->write(
40801 - file, (const void __user *) data, len, &pos);
40802 + file, (const void __force_user *) data, len, &pos);
40803 set_fs(old_fs);
40804 kunmap(page);
40805 if (ret != len)
40806 diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
40807 index 9895400..fa40a7d 100644
40808 --- a/fs/ceph/dir.c
40809 +++ b/fs/ceph/dir.c
40810 @@ -244,7 +244,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
40811 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
40812 struct ceph_mds_client *mdsc = fsc->mdsc;
40813 unsigned frag = fpos_frag(filp->f_pos);
40814 - int off = fpos_off(filp->f_pos);
40815 + unsigned int off = fpos_off(filp->f_pos);
40816 int err;
40817 u32 ftype;
40818 struct ceph_mds_reply_info_parsed *rinfo;
40819 diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
40820 index 84e8c07..6170d31 100644
40821 --- a/fs/cifs/cifs_debug.c
40822 +++ b/fs/cifs/cifs_debug.c
40823 @@ -265,8 +265,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
40824
40825 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
40826 #ifdef CONFIG_CIFS_STATS2
40827 - atomic_set(&totBufAllocCount, 0);
40828 - atomic_set(&totSmBufAllocCount, 0);
40829 + atomic_set_unchecked(&totBufAllocCount, 0);
40830 + atomic_set_unchecked(&totSmBufAllocCount, 0);
40831 #endif /* CONFIG_CIFS_STATS2 */
40832 spin_lock(&cifs_tcp_ses_lock);
40833 list_for_each(tmp1, &cifs_tcp_ses_list) {
40834 @@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(struct file *file,
40835 tcon = list_entry(tmp3,
40836 struct cifs_tcon,
40837 tcon_list);
40838 - atomic_set(&tcon->num_smbs_sent, 0);
40839 - atomic_set(&tcon->num_writes, 0);
40840 - atomic_set(&tcon->num_reads, 0);
40841 - atomic_set(&tcon->num_oplock_brks, 0);
40842 - atomic_set(&tcon->num_opens, 0);
40843 - atomic_set(&tcon->num_posixopens, 0);
40844 - atomic_set(&tcon->num_posixmkdirs, 0);
40845 - atomic_set(&tcon->num_closes, 0);
40846 - atomic_set(&tcon->num_deletes, 0);
40847 - atomic_set(&tcon->num_mkdirs, 0);
40848 - atomic_set(&tcon->num_rmdirs, 0);
40849 - atomic_set(&tcon->num_renames, 0);
40850 - atomic_set(&tcon->num_t2renames, 0);
40851 - atomic_set(&tcon->num_ffirst, 0);
40852 - atomic_set(&tcon->num_fnext, 0);
40853 - atomic_set(&tcon->num_fclose, 0);
40854 - atomic_set(&tcon->num_hardlinks, 0);
40855 - atomic_set(&tcon->num_symlinks, 0);
40856 - atomic_set(&tcon->num_locks, 0);
40857 + atomic_set_unchecked(&tcon->num_smbs_sent, 0);
40858 + atomic_set_unchecked(&tcon->num_writes, 0);
40859 + atomic_set_unchecked(&tcon->num_reads, 0);
40860 + atomic_set_unchecked(&tcon->num_oplock_brks, 0);
40861 + atomic_set_unchecked(&tcon->num_opens, 0);
40862 + atomic_set_unchecked(&tcon->num_posixopens, 0);
40863 + atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
40864 + atomic_set_unchecked(&tcon->num_closes, 0);
40865 + atomic_set_unchecked(&tcon->num_deletes, 0);
40866 + atomic_set_unchecked(&tcon->num_mkdirs, 0);
40867 + atomic_set_unchecked(&tcon->num_rmdirs, 0);
40868 + atomic_set_unchecked(&tcon->num_renames, 0);
40869 + atomic_set_unchecked(&tcon->num_t2renames, 0);
40870 + atomic_set_unchecked(&tcon->num_ffirst, 0);
40871 + atomic_set_unchecked(&tcon->num_fnext, 0);
40872 + atomic_set_unchecked(&tcon->num_fclose, 0);
40873 + atomic_set_unchecked(&tcon->num_hardlinks, 0);
40874 + atomic_set_unchecked(&tcon->num_symlinks, 0);
40875 + atomic_set_unchecked(&tcon->num_locks, 0);
40876 }
40877 }
40878 }
40879 @@ -327,8 +327,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
40880 smBufAllocCount.counter, cifs_min_small);
40881 #ifdef CONFIG_CIFS_STATS2
40882 seq_printf(m, "Total Large %d Small %d Allocations\n",
40883 - atomic_read(&totBufAllocCount),
40884 - atomic_read(&totSmBufAllocCount));
40885 + atomic_read_unchecked(&totBufAllocCount),
40886 + atomic_read_unchecked(&totSmBufAllocCount));
40887 #endif /* CONFIG_CIFS_STATS2 */
40888
40889 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
40890 @@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
40891 if (tcon->need_reconnect)
40892 seq_puts(m, "\tDISCONNECTED ");
40893 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
40894 - atomic_read(&tcon->num_smbs_sent),
40895 - atomic_read(&tcon->num_oplock_brks));
40896 + atomic_read_unchecked(&tcon->num_smbs_sent),
40897 + atomic_read_unchecked(&tcon->num_oplock_brks));
40898 seq_printf(m, "\nReads: %d Bytes: %lld",
40899 - atomic_read(&tcon->num_reads),
40900 + atomic_read_unchecked(&tcon->num_reads),
40901 (long long)(tcon->bytes_read));
40902 seq_printf(m, "\nWrites: %d Bytes: %lld",
40903 - atomic_read(&tcon->num_writes),
40904 + atomic_read_unchecked(&tcon->num_writes),
40905 (long long)(tcon->bytes_written));
40906 seq_printf(m, "\nFlushes: %d",
40907 - atomic_read(&tcon->num_flushes));
40908 + atomic_read_unchecked(&tcon->num_flushes));
40909 seq_printf(m, "\nLocks: %d HardLinks: %d "
40910 "Symlinks: %d",
40911 - atomic_read(&tcon->num_locks),
40912 - atomic_read(&tcon->num_hardlinks),
40913 - atomic_read(&tcon->num_symlinks));
40914 + atomic_read_unchecked(&tcon->num_locks),
40915 + atomic_read_unchecked(&tcon->num_hardlinks),
40916 + atomic_read_unchecked(&tcon->num_symlinks));
40917 seq_printf(m, "\nOpens: %d Closes: %d "
40918 "Deletes: %d",
40919 - atomic_read(&tcon->num_opens),
40920 - atomic_read(&tcon->num_closes),
40921 - atomic_read(&tcon->num_deletes));
40922 + atomic_read_unchecked(&tcon->num_opens),
40923 + atomic_read_unchecked(&tcon->num_closes),
40924 + atomic_read_unchecked(&tcon->num_deletes));
40925 seq_printf(m, "\nPosix Opens: %d "
40926 "Posix Mkdirs: %d",
40927 - atomic_read(&tcon->num_posixopens),
40928 - atomic_read(&tcon->num_posixmkdirs));
40929 + atomic_read_unchecked(&tcon->num_posixopens),
40930 + atomic_read_unchecked(&tcon->num_posixmkdirs));
40931 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
40932 - atomic_read(&tcon->num_mkdirs),
40933 - atomic_read(&tcon->num_rmdirs));
40934 + atomic_read_unchecked(&tcon->num_mkdirs),
40935 + atomic_read_unchecked(&tcon->num_rmdirs));
40936 seq_printf(m, "\nRenames: %d T2 Renames %d",
40937 - atomic_read(&tcon->num_renames),
40938 - atomic_read(&tcon->num_t2renames));
40939 + atomic_read_unchecked(&tcon->num_renames),
40940 + atomic_read_unchecked(&tcon->num_t2renames));
40941 seq_printf(m, "\nFindFirst: %d FNext %d "
40942 "FClose %d",
40943 - atomic_read(&tcon->num_ffirst),
40944 - atomic_read(&tcon->num_fnext),
40945 - atomic_read(&tcon->num_fclose));
40946 + atomic_read_unchecked(&tcon->num_ffirst),
40947 + atomic_read_unchecked(&tcon->num_fnext),
40948 + atomic_read_unchecked(&tcon->num_fclose));
40949 }
40950 }
40951 }
40952 diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
40953 index 8f1fe32..38f9e27 100644
40954 --- a/fs/cifs/cifsfs.c
40955 +++ b/fs/cifs/cifsfs.c
40956 @@ -989,7 +989,7 @@ cifs_init_request_bufs(void)
40957 cifs_req_cachep = kmem_cache_create("cifs_request",
40958 CIFSMaxBufSize +
40959 MAX_CIFS_HDR_SIZE, 0,
40960 - SLAB_HWCACHE_ALIGN, NULL);
40961 + SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
40962 if (cifs_req_cachep == NULL)
40963 return -ENOMEM;
40964
40965 @@ -1016,7 +1016,7 @@ cifs_init_request_bufs(void)
40966 efficient to alloc 1 per page off the slab compared to 17K (5page)
40967 alloc of large cifs buffers even when page debugging is on */
40968 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
40969 - MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
40970 + MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
40971 NULL);
40972 if (cifs_sm_req_cachep == NULL) {
40973 mempool_destroy(cifs_req_poolp);
40974 @@ -1101,8 +1101,8 @@ init_cifs(void)
40975 atomic_set(&bufAllocCount, 0);
40976 atomic_set(&smBufAllocCount, 0);
40977 #ifdef CONFIG_CIFS_STATS2
40978 - atomic_set(&totBufAllocCount, 0);
40979 - atomic_set(&totSmBufAllocCount, 0);
40980 + atomic_set_unchecked(&totBufAllocCount, 0);
40981 + atomic_set_unchecked(&totSmBufAllocCount, 0);
40982 #endif /* CONFIG_CIFS_STATS2 */
40983
40984 atomic_set(&midCount, 0);
40985 diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
40986 index 8238aa1..0347196 100644
40987 --- a/fs/cifs/cifsglob.h
40988 +++ b/fs/cifs/cifsglob.h
40989 @@ -392,28 +392,28 @@ struct cifs_tcon {
40990 __u16 Flags; /* optional support bits */
40991 enum statusEnum tidStatus;
40992 #ifdef CONFIG_CIFS_STATS
40993 - atomic_t num_smbs_sent;
40994 - atomic_t num_writes;
40995 - atomic_t num_reads;
40996 - atomic_t num_flushes;
40997 - atomic_t num_oplock_brks;
40998 - atomic_t num_opens;
40999 - atomic_t num_closes;
41000 - atomic_t num_deletes;
41001 - atomic_t num_mkdirs;
41002 - atomic_t num_posixopens;
41003 - atomic_t num_posixmkdirs;
41004 - atomic_t num_rmdirs;
41005 - atomic_t num_renames;
41006 - atomic_t num_t2renames;
41007 - atomic_t num_ffirst;
41008 - atomic_t num_fnext;
41009 - atomic_t num_fclose;
41010 - atomic_t num_hardlinks;
41011 - atomic_t num_symlinks;
41012 - atomic_t num_locks;
41013 - atomic_t num_acl_get;
41014 - atomic_t num_acl_set;
41015 + atomic_unchecked_t num_smbs_sent;
41016 + atomic_unchecked_t num_writes;
41017 + atomic_unchecked_t num_reads;
41018 + atomic_unchecked_t num_flushes;
41019 + atomic_unchecked_t num_oplock_brks;
41020 + atomic_unchecked_t num_opens;
41021 + atomic_unchecked_t num_closes;
41022 + atomic_unchecked_t num_deletes;
41023 + atomic_unchecked_t num_mkdirs;
41024 + atomic_unchecked_t num_posixopens;
41025 + atomic_unchecked_t num_posixmkdirs;
41026 + atomic_unchecked_t num_rmdirs;
41027 + atomic_unchecked_t num_renames;
41028 + atomic_unchecked_t num_t2renames;
41029 + atomic_unchecked_t num_ffirst;
41030 + atomic_unchecked_t num_fnext;
41031 + atomic_unchecked_t num_fclose;
41032 + atomic_unchecked_t num_hardlinks;
41033 + atomic_unchecked_t num_symlinks;
41034 + atomic_unchecked_t num_locks;
41035 + atomic_unchecked_t num_acl_get;
41036 + atomic_unchecked_t num_acl_set;
41037 #ifdef CONFIG_CIFS_STATS2
41038 unsigned long long time_writes;
41039 unsigned long long time_reads;
41040 @@ -628,7 +628,7 @@ convert_delimiter(char *path, char delim)
41041 }
41042
41043 #ifdef CONFIG_CIFS_STATS
41044 -#define cifs_stats_inc atomic_inc
41045 +#define cifs_stats_inc atomic_inc_unchecked
41046
41047 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
41048 unsigned int bytes)
41049 @@ -985,8 +985,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
41050 /* Various Debug counters */
41051 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
41052 #ifdef CONFIG_CIFS_STATS2
41053 -GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
41054 -GLOBAL_EXTERN atomic_t totSmBufAllocCount;
41055 +GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
41056 +GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
41057 #endif
41058 GLOBAL_EXTERN atomic_t smBufAllocCount;
41059 GLOBAL_EXTERN atomic_t midCount;
41060 diff --git a/fs/cifs/link.c b/fs/cifs/link.c
41061 index 6b0e064..94e6c3c 100644
41062 --- a/fs/cifs/link.c
41063 +++ b/fs/cifs/link.c
41064 @@ -600,7 +600,7 @@ symlink_exit:
41065
41066 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
41067 {
41068 - char *p = nd_get_link(nd);
41069 + const char *p = nd_get_link(nd);
41070 if (!IS_ERR(p))
41071 kfree(p);
41072 }
41073 diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
41074 index 703ef5c..2a44ed5 100644
41075 --- a/fs/cifs/misc.c
41076 +++ b/fs/cifs/misc.c
41077 @@ -156,7 +156,7 @@ cifs_buf_get(void)
41078 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
41079 atomic_inc(&bufAllocCount);
41080 #ifdef CONFIG_CIFS_STATS2
41081 - atomic_inc(&totBufAllocCount);
41082 + atomic_inc_unchecked(&totBufAllocCount);
41083 #endif /* CONFIG_CIFS_STATS2 */
41084 }
41085
41086 @@ -191,7 +191,7 @@ cifs_small_buf_get(void)
41087 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
41088 atomic_inc(&smBufAllocCount);
41089 #ifdef CONFIG_CIFS_STATS2
41090 - atomic_inc(&totSmBufAllocCount);
41091 + atomic_inc_unchecked(&totSmBufAllocCount);
41092 #endif /* CONFIG_CIFS_STATS2 */
41093
41094 }
41095 diff --git a/fs/coda/cache.c b/fs/coda/cache.c
41096 index 6901578..d402eb5 100644
41097 --- a/fs/coda/cache.c
41098 +++ b/fs/coda/cache.c
41099 @@ -24,7 +24,7 @@
41100 #include "coda_linux.h"
41101 #include "coda_cache.h"
41102
41103 -static atomic_t permission_epoch = ATOMIC_INIT(0);
41104 +static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
41105
41106 /* replace or extend an acl cache hit */
41107 void coda_cache_enter(struct inode *inode, int mask)
41108 @@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
41109 struct coda_inode_info *cii = ITOC(inode);
41110
41111 spin_lock(&cii->c_lock);
41112 - cii->c_cached_epoch = atomic_read(&permission_epoch);
41113 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
41114 if (cii->c_uid != current_fsuid()) {
41115 cii->c_uid = current_fsuid();
41116 cii->c_cached_perm = mask;
41117 @@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
41118 {
41119 struct coda_inode_info *cii = ITOC(inode);
41120 spin_lock(&cii->c_lock);
41121 - cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
41122 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
41123 spin_unlock(&cii->c_lock);
41124 }
41125
41126 /* remove all acl caches */
41127 void coda_cache_clear_all(struct super_block *sb)
41128 {
41129 - atomic_inc(&permission_epoch);
41130 + atomic_inc_unchecked(&permission_epoch);
41131 }
41132
41133
41134 @@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
41135 spin_lock(&cii->c_lock);
41136 hit = (mask & cii->c_cached_perm) == mask &&
41137 cii->c_uid == current_fsuid() &&
41138 - cii->c_cached_epoch == atomic_read(&permission_epoch);
41139 + cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
41140 spin_unlock(&cii->c_lock);
41141
41142 return hit;
41143 diff --git a/fs/compat.c b/fs/compat.c
41144 index c987875..08771ca 100644
41145 --- a/fs/compat.c
41146 +++ b/fs/compat.c
41147 @@ -132,8 +132,8 @@ asmlinkage long compat_sys_utimes(const char __user *filename, struct compat_tim
41148 static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
41149 {
41150 compat_ino_t ino = stat->ino;
41151 - typeof(ubuf->st_uid) uid = 0;
41152 - typeof(ubuf->st_gid) gid = 0;
41153 + typeof(((struct compat_stat *)0)->st_uid) uid = 0;
41154 + typeof(((struct compat_stat *)0)->st_gid) gid = 0;
41155 int err;
41156
41157 SET_UID(uid, stat->uid);
41158 @@ -504,7 +504,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
41159
41160 set_fs(KERNEL_DS);
41161 /* The __user pointer cast is valid because of the set_fs() */
41162 - ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
41163 + ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
41164 set_fs(oldfs);
41165 /* truncating is ok because it's a user address */
41166 if (!ret)
41167 @@ -562,7 +562,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
41168 goto out;
41169
41170 ret = -EINVAL;
41171 - if (nr_segs > UIO_MAXIOV || nr_segs < 0)
41172 + if (nr_segs > UIO_MAXIOV)
41173 goto out;
41174 if (nr_segs > fast_segs) {
41175 ret = -ENOMEM;
41176 @@ -845,6 +845,7 @@ struct compat_old_linux_dirent {
41177
41178 struct compat_readdir_callback {
41179 struct compat_old_linux_dirent __user *dirent;
41180 + struct file * file;
41181 int result;
41182 };
41183
41184 @@ -862,6 +863,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
41185 buf->result = -EOVERFLOW;
41186 return -EOVERFLOW;
41187 }
41188 +
41189 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
41190 + return 0;
41191 +
41192 buf->result++;
41193 dirent = buf->dirent;
41194 if (!access_ok(VERIFY_WRITE, dirent,
41195 @@ -894,6 +899,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
41196
41197 buf.result = 0;
41198 buf.dirent = dirent;
41199 + buf.file = file;
41200
41201 error = vfs_readdir(file, compat_fillonedir, &buf);
41202 if (buf.result)
41203 @@ -914,6 +920,7 @@ struct compat_linux_dirent {
41204 struct compat_getdents_callback {
41205 struct compat_linux_dirent __user *current_dir;
41206 struct compat_linux_dirent __user *previous;
41207 + struct file * file;
41208 int count;
41209 int error;
41210 };
41211 @@ -935,6 +942,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
41212 buf->error = -EOVERFLOW;
41213 return -EOVERFLOW;
41214 }
41215 +
41216 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
41217 + return 0;
41218 +
41219 dirent = buf->previous;
41220 if (dirent) {
41221 if (__put_user(offset, &dirent->d_off))
41222 @@ -982,6 +993,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
41223 buf.previous = NULL;
41224 buf.count = count;
41225 buf.error = 0;
41226 + buf.file = file;
41227
41228 error = vfs_readdir(file, compat_filldir, &buf);
41229 if (error >= 0)
41230 @@ -1003,6 +1015,7 @@ out:
41231 struct compat_getdents_callback64 {
41232 struct linux_dirent64 __user *current_dir;
41233 struct linux_dirent64 __user *previous;
41234 + struct file * file;
41235 int count;
41236 int error;
41237 };
41238 @@ -1019,6 +1032,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
41239 buf->error = -EINVAL; /* only used if we fail.. */
41240 if (reclen > buf->count)
41241 return -EINVAL;
41242 +
41243 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
41244 + return 0;
41245 +
41246 dirent = buf->previous;
41247
41248 if (dirent) {
41249 @@ -1070,13 +1087,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
41250 buf.previous = NULL;
41251 buf.count = count;
41252 buf.error = 0;
41253 + buf.file = file;
41254
41255 error = vfs_readdir(file, compat_filldir64, &buf);
41256 if (error >= 0)
41257 error = buf.error;
41258 lastdirent = buf.previous;
41259 if (lastdirent) {
41260 - typeof(lastdirent->d_off) d_off = file->f_pos;
41261 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
41262 if (__put_user_unaligned(d_off, &lastdirent->d_off))
41263 error = -EFAULT;
41264 else
41265 diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
41266 index 112e45a..b59845b 100644
41267 --- a/fs/compat_binfmt_elf.c
41268 +++ b/fs/compat_binfmt_elf.c
41269 @@ -30,11 +30,13 @@
41270 #undef elf_phdr
41271 #undef elf_shdr
41272 #undef elf_note
41273 +#undef elf_dyn
41274 #undef elf_addr_t
41275 #define elfhdr elf32_hdr
41276 #define elf_phdr elf32_phdr
41277 #define elf_shdr elf32_shdr
41278 #define elf_note elf32_note
41279 +#define elf_dyn Elf32_Dyn
41280 #define elf_addr_t Elf32_Addr
41281
41282 /*
41283 diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
41284 index 51352de..93292ff 100644
41285 --- a/fs/compat_ioctl.c
41286 +++ b/fs/compat_ioctl.c
41287 @@ -210,6 +210,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd,
41288
41289 err = get_user(palp, &up->palette);
41290 err |= get_user(length, &up->length);
41291 + if (err)
41292 + return -EFAULT;
41293
41294 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
41295 err = put_user(compat_ptr(palp), &up_native->palette);
41296 @@ -621,7 +623,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
41297 return -EFAULT;
41298 if (__get_user(udata, &ss32->iomem_base))
41299 return -EFAULT;
41300 - ss.iomem_base = compat_ptr(udata);
41301 + ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
41302 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
41303 __get_user(ss.port_high, &ss32->port_high))
41304 return -EFAULT;
41305 @@ -796,7 +798,7 @@ static int compat_ioctl_preallocate(struct file *file,
41306 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
41307 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
41308 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
41309 - copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
41310 + copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
41311 return -EFAULT;
41312
41313 return ioctl_preallocate(file, p);
41314 @@ -1644,8 +1646,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
41315 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
41316 {
41317 unsigned int a, b;
41318 - a = *(unsigned int *)p;
41319 - b = *(unsigned int *)q;
41320 + a = *(const unsigned int *)p;
41321 + b = *(const unsigned int *)q;
41322 if (a > b)
41323 return 1;
41324 if (a < b)
41325 diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
41326 index 9a37a9b..35792b6 100644
41327 --- a/fs/configfs/dir.c
41328 +++ b/fs/configfs/dir.c
41329 @@ -1575,7 +1575,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
41330 }
41331 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
41332 struct configfs_dirent *next;
41333 - const char * name;
41334 + const unsigned char * name;
41335 + char d_name[sizeof(next->s_dentry->d_iname)];
41336 int len;
41337 struct inode *inode = NULL;
41338
41339 @@ -1585,7 +1586,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
41340 continue;
41341
41342 name = configfs_get_name(next);
41343 - len = strlen(name);
41344 + if (next->s_dentry && name == next->s_dentry->d_iname) {
41345 + len = next->s_dentry->d_name.len;
41346 + memcpy(d_name, name, len);
41347 + name = d_name;
41348 + } else
41349 + len = strlen(name);
41350
41351 /*
41352 * We'll have a dentry and an inode for
41353 diff --git a/fs/dcache.c b/fs/dcache.c
41354 index f7908ae..920a680 100644
41355 --- a/fs/dcache.c
41356 +++ b/fs/dcache.c
41357 @@ -3042,7 +3042,7 @@ void __init vfs_caches_init(unsigned long mempages)
41358 mempages -= reserve;
41359
41360 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
41361 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
41362 + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
41363
41364 dcache_init();
41365 inode_init();
41366 diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
41367 index f3a257d..715ac0f 100644
41368 --- a/fs/debugfs/inode.c
41369 +++ b/fs/debugfs/inode.c
41370 @@ -261,7 +261,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
41371 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
41372 {
41373 return debugfs_create_file(name,
41374 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
41375 + S_IFDIR | S_IRWXU,
41376 +#else
41377 S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
41378 +#endif
41379 parent, NULL, NULL);
41380 }
41381 EXPORT_SYMBOL_GPL(debugfs_create_dir);
41382 diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
41383 index d2039ca..a766407 100644
41384 --- a/fs/ecryptfs/inode.c
41385 +++ b/fs/ecryptfs/inode.c
41386 @@ -691,7 +691,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
41387 old_fs = get_fs();
41388 set_fs(get_ds());
41389 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
41390 - (char __user *)lower_buf,
41391 + (char __force_user *)lower_buf,
41392 lower_bufsiz);
41393 set_fs(old_fs);
41394 if (rc < 0)
41395 @@ -737,7 +737,7 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
41396 }
41397 old_fs = get_fs();
41398 set_fs(get_ds());
41399 - rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
41400 + rc = dentry->d_inode->i_op->readlink(dentry, (char __force_user *)buf, len);
41401 set_fs(old_fs);
41402 if (rc < 0) {
41403 kfree(buf);
41404 @@ -752,7 +752,7 @@ out:
41405 static void
41406 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
41407 {
41408 - char *buf = nd_get_link(nd);
41409 + const char *buf = nd_get_link(nd);
41410 if (!IS_ERR(buf)) {
41411 /* Free the char* */
41412 kfree(buf);
41413 diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
41414 index 0dc5a3d..d3cdeea 100644
41415 --- a/fs/ecryptfs/miscdev.c
41416 +++ b/fs/ecryptfs/miscdev.c
41417 @@ -328,7 +328,7 @@ check_list:
41418 goto out_unlock_msg_ctx;
41419 i = 5;
41420 if (msg_ctx->msg) {
41421 - if (copy_to_user(&buf[i], packet_length, packet_length_size))
41422 + if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
41423 goto out_unlock_msg_ctx;
41424 i += packet_length_size;
41425 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
41426 diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
41427 index 608c1c3..7d040a8 100644
41428 --- a/fs/ecryptfs/read_write.c
41429 +++ b/fs/ecryptfs/read_write.c
41430 @@ -48,7 +48,7 @@ int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
41431 return -EIO;
41432 fs_save = get_fs();
41433 set_fs(get_ds());
41434 - rc = vfs_write(lower_file, data, size, &offset);
41435 + rc = vfs_write(lower_file, (const char __force_user *)data, size, &offset);
41436 set_fs(fs_save);
41437 mark_inode_dirty_sync(ecryptfs_inode);
41438 return rc;
41439 @@ -244,7 +244,7 @@ int ecryptfs_read_lower(char *data, loff_t offset, size_t size,
41440 return -EIO;
41441 fs_save = get_fs();
41442 set_fs(get_ds());
41443 - rc = vfs_read(lower_file, data, size, &offset);
41444 + rc = vfs_read(lower_file, (char __force_user *)data, size, &offset);
41445 set_fs(fs_save);
41446 return rc;
41447 }
41448 diff --git a/fs/exec.c b/fs/exec.c
41449 index 3625464..04855f9 100644
41450 --- a/fs/exec.c
41451 +++ b/fs/exec.c
41452 @@ -55,12 +55,28 @@
41453 #include <linux/pipe_fs_i.h>
41454 #include <linux/oom.h>
41455 #include <linux/compat.h>
41456 +#include <linux/random.h>
41457 +#include <linux/seq_file.h>
41458 +
41459 +#ifdef CONFIG_PAX_REFCOUNT
41460 +#include <linux/kallsyms.h>
41461 +#include <linux/kdebug.h>
41462 +#endif
41463
41464 #include <asm/uaccess.h>
41465 #include <asm/mmu_context.h>
41466 #include <asm/tlb.h>
41467 #include "internal.h"
41468
41469 +#ifndef CONFIG_PAX_HAVE_ACL_FLAGS
41470 +void __weak pax_set_initial_flags(struct linux_binprm *bprm) {}
41471 +#endif
41472 +
41473 +#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
41474 +void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
41475 +EXPORT_SYMBOL(pax_set_initial_flags_func);
41476 +#endif
41477 +
41478 int core_uses_pid;
41479 char core_pattern[CORENAME_MAX_SIZE] = "core";
41480 unsigned int core_pipe_limit;
41481 @@ -70,7 +86,7 @@ struct core_name {
41482 char *corename;
41483 int used, size;
41484 };
41485 -static atomic_t call_count = ATOMIC_INIT(1);
41486 +static atomic_unchecked_t call_count = ATOMIC_INIT(1);
41487
41488 /* The maximal length of core_pattern is also specified in sysctl.c */
41489
41490 @@ -188,18 +204,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
41491 int write)
41492 {
41493 struct page *page;
41494 - int ret;
41495
41496 -#ifdef CONFIG_STACK_GROWSUP
41497 - if (write) {
41498 - ret = expand_downwards(bprm->vma, pos);
41499 - if (ret < 0)
41500 - return NULL;
41501 - }
41502 -#endif
41503 - ret = get_user_pages(current, bprm->mm, pos,
41504 - 1, write, 1, &page, NULL);
41505 - if (ret <= 0)
41506 + if (0 > expand_downwards(bprm->vma, pos))
41507 + return NULL;
41508 + if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
41509 return NULL;
41510
41511 if (write) {
41512 @@ -215,6 +223,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
41513 if (size <= ARG_MAX)
41514 return page;
41515
41516 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
41517 + // only allow 1MB for argv+env on suid/sgid binaries
41518 + // to prevent easy ASLR exhaustion
41519 + if (((bprm->cred->euid != current_euid()) ||
41520 + (bprm->cred->egid != current_egid())) &&
41521 + (size > (1024 * 1024))) {
41522 + put_page(page);
41523 + return NULL;
41524 + }
41525 +#endif
41526 +
41527 /*
41528 * Limit to 1/4-th the stack size for the argv+env strings.
41529 * This ensures that:
41530 @@ -274,6 +293,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
41531 vma->vm_end = STACK_TOP_MAX;
41532 vma->vm_start = vma->vm_end - PAGE_SIZE;
41533 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
41534 +
41535 +#ifdef CONFIG_PAX_SEGMEXEC
41536 + vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
41537 +#endif
41538 +
41539 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
41540 INIT_LIST_HEAD(&vma->anon_vma_chain);
41541
41542 @@ -288,6 +312,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
41543 mm->stack_vm = mm->total_vm = 1;
41544 up_write(&mm->mmap_sem);
41545 bprm->p = vma->vm_end - sizeof(void *);
41546 +
41547 +#ifdef CONFIG_PAX_RANDUSTACK
41548 + if (randomize_va_space)
41549 + bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
41550 +#endif
41551 +
41552 return 0;
41553 err:
41554 up_write(&mm->mmap_sem);
41555 @@ -396,19 +426,7 @@ err:
41556 return err;
41557 }
41558
41559 -struct user_arg_ptr {
41560 -#ifdef CONFIG_COMPAT
41561 - bool is_compat;
41562 -#endif
41563 - union {
41564 - const char __user *const __user *native;
41565 -#ifdef CONFIG_COMPAT
41566 - compat_uptr_t __user *compat;
41567 -#endif
41568 - } ptr;
41569 -};
41570 -
41571 -static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
41572 +const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
41573 {
41574 const char __user *native;
41575
41576 @@ -417,14 +435,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
41577 compat_uptr_t compat;
41578
41579 if (get_user(compat, argv.ptr.compat + nr))
41580 - return ERR_PTR(-EFAULT);
41581 + return (const char __force_user *)ERR_PTR(-EFAULT);
41582
41583 return compat_ptr(compat);
41584 }
41585 #endif
41586
41587 if (get_user(native, argv.ptr.native + nr))
41588 - return ERR_PTR(-EFAULT);
41589 + return (const char __force_user *)ERR_PTR(-EFAULT);
41590
41591 return native;
41592 }
41593 @@ -443,7 +461,7 @@ static int count(struct user_arg_ptr argv, int max)
41594 if (!p)
41595 break;
41596
41597 - if (IS_ERR(p))
41598 + if (IS_ERR((const char __force_kernel *)p))
41599 return -EFAULT;
41600
41601 if (i++ >= max)
41602 @@ -477,7 +495,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
41603
41604 ret = -EFAULT;
41605 str = get_user_arg_ptr(argv, argc);
41606 - if (IS_ERR(str))
41607 + if (IS_ERR((const char __force_kernel *)str))
41608 goto out;
41609
41610 len = strnlen_user(str, MAX_ARG_STRLEN);
41611 @@ -559,7 +577,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
41612 int r;
41613 mm_segment_t oldfs = get_fs();
41614 struct user_arg_ptr argv = {
41615 - .ptr.native = (const char __user *const __user *)__argv,
41616 + .ptr.native = (const char __force_user *const __force_user *)__argv,
41617 };
41618
41619 set_fs(KERNEL_DS);
41620 @@ -594,7 +612,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
41621 unsigned long new_end = old_end - shift;
41622 struct mmu_gather tlb;
41623
41624 - BUG_ON(new_start > new_end);
41625 + if (new_start >= new_end || new_start < mmap_min_addr)
41626 + return -ENOMEM;
41627
41628 /*
41629 * ensure there are no vmas between where we want to go
41630 @@ -603,6 +622,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
41631 if (vma != find_vma(mm, new_start))
41632 return -EFAULT;
41633
41634 +#ifdef CONFIG_PAX_SEGMEXEC
41635 + BUG_ON(pax_find_mirror_vma(vma));
41636 +#endif
41637 +
41638 /*
41639 * cover the whole range: [new_start, old_end)
41640 */
41641 @@ -683,10 +706,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
41642 stack_top = arch_align_stack(stack_top);
41643 stack_top = PAGE_ALIGN(stack_top);
41644
41645 - if (unlikely(stack_top < mmap_min_addr) ||
41646 - unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
41647 - return -ENOMEM;
41648 -
41649 stack_shift = vma->vm_end - stack_top;
41650
41651 bprm->p -= stack_shift;
41652 @@ -698,8 +717,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
41653 bprm->exec -= stack_shift;
41654
41655 down_write(&mm->mmap_sem);
41656 +
41657 + /* Move stack pages down in memory. */
41658 + if (stack_shift) {
41659 + ret = shift_arg_pages(vma, stack_shift);
41660 + if (ret)
41661 + goto out_unlock;
41662 + }
41663 +
41664 vm_flags = VM_STACK_FLAGS;
41665
41666 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
41667 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41668 + vm_flags &= ~VM_EXEC;
41669 +
41670 +#ifdef CONFIG_PAX_MPROTECT
41671 + if (mm->pax_flags & MF_PAX_MPROTECT)
41672 + vm_flags &= ~VM_MAYEXEC;
41673 +#endif
41674 +
41675 + }
41676 +#endif
41677 +
41678 /*
41679 * Adjust stack execute permissions; explicitly enable for
41680 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
41681 @@ -718,13 +757,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
41682 goto out_unlock;
41683 BUG_ON(prev != vma);
41684
41685 - /* Move stack pages down in memory. */
41686 - if (stack_shift) {
41687 - ret = shift_arg_pages(vma, stack_shift);
41688 - if (ret)
41689 - goto out_unlock;
41690 - }
41691 -
41692 /* mprotect_fixup is overkill to remove the temporary stack flags */
41693 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
41694
41695 @@ -805,7 +837,7 @@ int kernel_read(struct file *file, loff_t offset,
41696 old_fs = get_fs();
41697 set_fs(get_ds());
41698 /* The cast to a user pointer is valid due to the set_fs() */
41699 - result = vfs_read(file, (void __user *)addr, count, &pos);
41700 + result = vfs_read(file, (void __force_user *)addr, count, &pos);
41701 set_fs(old_fs);
41702 return result;
41703 }
41704 @@ -1067,6 +1099,21 @@ void set_task_comm(struct task_struct *tsk, char *buf)
41705 perf_event_comm(tsk);
41706 }
41707
41708 +static void filename_to_taskname(char *tcomm, const char *fn, unsigned int len)
41709 +{
41710 + int i, ch;
41711 +
41712 + /* Copies the binary name from after last slash */
41713 + for (i = 0; (ch = *(fn++)) != '\0';) {
41714 + if (ch == '/')
41715 + i = 0; /* overwrite what we wrote */
41716 + else
41717 + if (i < len - 1)
41718 + tcomm[i++] = ch;
41719 + }
41720 + tcomm[i] = '\0';
41721 +}
41722 +
41723 int flush_old_exec(struct linux_binprm * bprm)
41724 {
41725 int retval;
41726 @@ -1081,6 +1128,7 @@ int flush_old_exec(struct linux_binprm * bprm)
41727
41728 set_mm_exe_file(bprm->mm, bprm->file);
41729
41730 + filename_to_taskname(bprm->tcomm, bprm->filename, sizeof(bprm->tcomm));
41731 /*
41732 * Release all of the old mmap stuff
41733 */
41734 @@ -1112,10 +1160,6 @@ EXPORT_SYMBOL(would_dump);
41735
41736 void setup_new_exec(struct linux_binprm * bprm)
41737 {
41738 - int i, ch;
41739 - const char *name;
41740 - char tcomm[sizeof(current->comm)];
41741 -
41742 arch_pick_mmap_layout(current->mm);
41743
41744 /* This is the point of no return */
41745 @@ -1126,18 +1170,7 @@ void setup_new_exec(struct linux_binprm * bprm)
41746 else
41747 set_dumpable(current->mm, suid_dumpable);
41748
41749 - name = bprm->filename;
41750 -
41751 - /* Copies the binary name from after last slash */
41752 - for (i=0; (ch = *(name++)) != '\0';) {
41753 - if (ch == '/')
41754 - i = 0; /* overwrite what we wrote */
41755 - else
41756 - if (i < (sizeof(tcomm) - 1))
41757 - tcomm[i++] = ch;
41758 - }
41759 - tcomm[i] = '\0';
41760 - set_task_comm(current, tcomm);
41761 + set_task_comm(current, bprm->tcomm);
41762
41763 /* Set the new mm task size. We have to do that late because it may
41764 * depend on TIF_32BIT which is only updated in flush_thread() on
41765 @@ -1247,7 +1280,7 @@ int check_unsafe_exec(struct linux_binprm *bprm)
41766 }
41767 rcu_read_unlock();
41768
41769 - if (p->fs->users > n_fs) {
41770 + if (atomic_read(&p->fs->users) > n_fs) {
41771 bprm->unsafe |= LSM_UNSAFE_SHARE;
41772 } else {
41773 res = -EAGAIN;
41774 @@ -1442,6 +1475,10 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
41775
41776 EXPORT_SYMBOL(search_binary_handler);
41777
41778 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
41779 +static atomic64_unchecked_t global_exec_counter = ATOMIC64_INIT(0);
41780 +#endif
41781 +
41782 /*
41783 * sys_execve() executes a new program.
41784 */
41785 @@ -1450,6 +1487,11 @@ static int do_execve_common(const char *filename,
41786 struct user_arg_ptr envp,
41787 struct pt_regs *regs)
41788 {
41789 +#ifdef CONFIG_GRKERNSEC
41790 + struct file *old_exec_file;
41791 + struct acl_subject_label *old_acl;
41792 + struct rlimit old_rlim[RLIM_NLIMITS];
41793 +#endif
41794 struct linux_binprm *bprm;
41795 struct file *file;
41796 struct files_struct *displaced;
41797 @@ -1457,6 +1499,8 @@ static int do_execve_common(const char *filename,
41798 int retval;
41799 const struct cred *cred = current_cred();
41800
41801 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
41802 +
41803 /*
41804 * We move the actual failure in case of RLIMIT_NPROC excess from
41805 * set*uid() to execve() because too many poorly written programs
41806 @@ -1497,12 +1541,27 @@ static int do_execve_common(const char *filename,
41807 if (IS_ERR(file))
41808 goto out_unmark;
41809
41810 + if (gr_ptrace_readexec(file, bprm->unsafe)) {
41811 + retval = -EPERM;
41812 + goto out_file;
41813 + }
41814 +
41815 sched_exec();
41816
41817 bprm->file = file;
41818 bprm->filename = filename;
41819 bprm->interp = filename;
41820
41821 + if (gr_process_user_ban()) {
41822 + retval = -EPERM;
41823 + goto out_file;
41824 + }
41825 +
41826 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
41827 + retval = -EACCES;
41828 + goto out_file;
41829 + }
41830 +
41831 retval = bprm_mm_init(bprm);
41832 if (retval)
41833 goto out_file;
41834 @@ -1532,11 +1591,46 @@ static int do_execve_common(const char *filename,
41835 if (retval < 0)
41836 goto out;
41837
41838 + if (!gr_tpe_allow(file)) {
41839 + retval = -EACCES;
41840 + goto out;
41841 + }
41842 +
41843 + if (gr_check_crash_exec(file)) {
41844 + retval = -EACCES;
41845 + goto out;
41846 + }
41847 +
41848 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
41849 +
41850 + gr_handle_exec_args(bprm, argv);
41851 +
41852 +#ifdef CONFIG_GRKERNSEC
41853 + old_acl = current->acl;
41854 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
41855 + old_exec_file = current->exec_file;
41856 + get_file(file);
41857 + current->exec_file = file;
41858 +#endif
41859 +
41860 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
41861 + bprm->unsafe);
41862 + if (retval < 0)
41863 + goto out_fail;
41864 +
41865 retval = search_binary_handler(bprm,regs);
41866 if (retval < 0)
41867 - goto out;
41868 + goto out_fail;
41869 +#ifdef CONFIG_GRKERNSEC
41870 + if (old_exec_file)
41871 + fput(old_exec_file);
41872 +#endif
41873
41874 /* execve succeeded */
41875 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
41876 + current->exec_id = atomic64_inc_return_unchecked(&global_exec_counter);
41877 +#endif
41878 +
41879 current->fs->in_exec = 0;
41880 current->in_execve = 0;
41881 acct_update_integrals(current);
41882 @@ -1545,6 +1639,14 @@ static int do_execve_common(const char *filename,
41883 put_files_struct(displaced);
41884 return retval;
41885
41886 +out_fail:
41887 +#ifdef CONFIG_GRKERNSEC
41888 + current->acl = old_acl;
41889 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
41890 + fput(current->exec_file);
41891 + current->exec_file = old_exec_file;
41892 +#endif
41893 +
41894 out:
41895 if (bprm->mm) {
41896 acct_arg_size(bprm, 0);
41897 @@ -1618,7 +1720,7 @@ static int expand_corename(struct core_name *cn)
41898 {
41899 char *old_corename = cn->corename;
41900
41901 - cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
41902 + cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
41903 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
41904
41905 if (!cn->corename) {
41906 @@ -1715,7 +1817,7 @@ static int format_corename(struct core_name *cn, long signr)
41907 int pid_in_pattern = 0;
41908 int err = 0;
41909
41910 - cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
41911 + cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
41912 cn->corename = kmalloc(cn->size, GFP_KERNEL);
41913 cn->used = 0;
41914
41915 @@ -1812,6 +1914,218 @@ out:
41916 return ispipe;
41917 }
41918
41919 +int pax_check_flags(unsigned long *flags)
41920 +{
41921 + int retval = 0;
41922 +
41923 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
41924 + if (*flags & MF_PAX_SEGMEXEC)
41925 + {
41926 + *flags &= ~MF_PAX_SEGMEXEC;
41927 + retval = -EINVAL;
41928 + }
41929 +#endif
41930 +
41931 + if ((*flags & MF_PAX_PAGEEXEC)
41932 +
41933 +#ifdef CONFIG_PAX_PAGEEXEC
41934 + && (*flags & MF_PAX_SEGMEXEC)
41935 +#endif
41936 +
41937 + )
41938 + {
41939 + *flags &= ~MF_PAX_PAGEEXEC;
41940 + retval = -EINVAL;
41941 + }
41942 +
41943 + if ((*flags & MF_PAX_MPROTECT)
41944 +
41945 +#ifdef CONFIG_PAX_MPROTECT
41946 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
41947 +#endif
41948 +
41949 + )
41950 + {
41951 + *flags &= ~MF_PAX_MPROTECT;
41952 + retval = -EINVAL;
41953 + }
41954 +
41955 + if ((*flags & MF_PAX_EMUTRAMP)
41956 +
41957 +#ifdef CONFIG_PAX_EMUTRAMP
41958 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
41959 +#endif
41960 +
41961 + )
41962 + {
41963 + *flags &= ~MF_PAX_EMUTRAMP;
41964 + retval = -EINVAL;
41965 + }
41966 +
41967 + return retval;
41968 +}
41969 +
41970 +EXPORT_SYMBOL(pax_check_flags);
41971 +
41972 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
41973 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
41974 +{
41975 + struct task_struct *tsk = current;
41976 + struct mm_struct *mm = current->mm;
41977 + char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
41978 + char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
41979 + char *path_exec = NULL;
41980 + char *path_fault = NULL;
41981 + unsigned long start = 0UL, end = 0UL, offset = 0UL;
41982 +
41983 + if (buffer_exec && buffer_fault) {
41984 + struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
41985 +
41986 + down_read(&mm->mmap_sem);
41987 + vma = mm->mmap;
41988 + while (vma && (!vma_exec || !vma_fault)) {
41989 + if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
41990 + vma_exec = vma;
41991 + if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
41992 + vma_fault = vma;
41993 + vma = vma->vm_next;
41994 + }
41995 + if (vma_exec) {
41996 + path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
41997 + if (IS_ERR(path_exec))
41998 + path_exec = "<path too long>";
41999 + else {
42000 + path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
42001 + if (path_exec) {
42002 + *path_exec = 0;
42003 + path_exec = buffer_exec;
42004 + } else
42005 + path_exec = "<path too long>";
42006 + }
42007 + }
42008 + if (vma_fault) {
42009 + start = vma_fault->vm_start;
42010 + end = vma_fault->vm_end;
42011 + offset = vma_fault->vm_pgoff << PAGE_SHIFT;
42012 + if (vma_fault->vm_file) {
42013 + path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
42014 + if (IS_ERR(path_fault))
42015 + path_fault = "<path too long>";
42016 + else {
42017 + path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
42018 + if (path_fault) {
42019 + *path_fault = 0;
42020 + path_fault = buffer_fault;
42021 + } else
42022 + path_fault = "<path too long>";
42023 + }
42024 + } else
42025 + path_fault = "<anonymous mapping>";
42026 + }
42027 + up_read(&mm->mmap_sem);
42028 + }
42029 + if (tsk->signal->curr_ip)
42030 + printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
42031 + else
42032 + printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
42033 + printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
42034 + "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
42035 + task_uid(tsk), task_euid(tsk), pc, sp);
42036 + free_page((unsigned long)buffer_exec);
42037 + free_page((unsigned long)buffer_fault);
42038 + pax_report_insns(regs, pc, sp);
42039 + do_coredump(SIGKILL, SIGKILL, regs);
42040 +}
42041 +#endif
42042 +
42043 +#ifdef CONFIG_PAX_REFCOUNT
42044 +void pax_report_refcount_overflow(struct pt_regs *regs)
42045 +{
42046 + if (current->signal->curr_ip)
42047 + printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
42048 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
42049 + else
42050 + printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
42051 + current->comm, task_pid_nr(current), current_uid(), current_euid());
42052 + print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
42053 + show_regs(regs);
42054 + force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
42055 +}
42056 +#endif
42057 +
42058 +#ifdef CONFIG_PAX_USERCOPY
42059 +/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
42060 +int object_is_on_stack(const void *obj, unsigned long len)
42061 +{
42062 + const void * const stack = task_stack_page(current);
42063 + const void * const stackend = stack + THREAD_SIZE;
42064 +
42065 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
42066 + const void *frame = NULL;
42067 + const void *oldframe;
42068 +#endif
42069 +
42070 + if (obj + len < obj)
42071 + return -1;
42072 +
42073 + if (obj + len <= stack || stackend <= obj)
42074 + return 0;
42075 +
42076 + if (obj < stack || stackend < obj + len)
42077 + return -1;
42078 +
42079 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
42080 + oldframe = __builtin_frame_address(1);
42081 + if (oldframe)
42082 + frame = __builtin_frame_address(2);
42083 + /*
42084 + low ----------------------------------------------> high
42085 + [saved bp][saved ip][args][local vars][saved bp][saved ip]
42086 + ^----------------^
42087 + allow copies only within here
42088 + */
42089 + while (stack <= frame && frame < stackend) {
42090 + /* if obj + len extends past the last frame, this
42091 + check won't pass and the next frame will be 0,
42092 + causing us to bail out and correctly report
42093 + the copy as invalid
42094 + */
42095 + if (obj + len <= frame)
42096 + return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
42097 + oldframe = frame;
42098 + frame = *(const void * const *)frame;
42099 + }
42100 + return -1;
42101 +#else
42102 + return 1;
42103 +#endif
42104 +}
42105 +
42106 +NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
42107 +{
42108 + if (current->signal->curr_ip)
42109 + printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
42110 + &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
42111 + else
42112 + printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
42113 + to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
42114 + dump_stack();
42115 + gr_handle_kernel_exploit();
42116 + do_group_exit(SIGKILL);
42117 +}
42118 +#endif
42119 +
42120 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
42121 +void pax_track_stack(void)
42122 +{
42123 + unsigned long sp = (unsigned long)&sp;
42124 + if (sp < current_thread_info()->lowest_stack &&
42125 + sp > (unsigned long)task_stack_page(current))
42126 + current_thread_info()->lowest_stack = sp;
42127 +}
42128 +EXPORT_SYMBOL(pax_track_stack);
42129 +#endif
42130 +
42131 static int zap_process(struct task_struct *start, int exit_code)
42132 {
42133 struct task_struct *t;
42134 @@ -2023,17 +2337,17 @@ static void wait_for_dump_helpers(struct file *file)
42135 pipe = file->f_path.dentry->d_inode->i_pipe;
42136
42137 pipe_lock(pipe);
42138 - pipe->readers++;
42139 - pipe->writers--;
42140 + atomic_inc(&pipe->readers);
42141 + atomic_dec(&pipe->writers);
42142
42143 - while ((pipe->readers > 1) && (!signal_pending(current))) {
42144 + while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
42145 wake_up_interruptible_sync(&pipe->wait);
42146 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
42147 pipe_wait(pipe);
42148 }
42149
42150 - pipe->readers--;
42151 - pipe->writers++;
42152 + atomic_dec(&pipe->readers);
42153 + atomic_inc(&pipe->writers);
42154 pipe_unlock(pipe);
42155
42156 }
42157 @@ -2094,7 +2408,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
42158 int retval = 0;
42159 int flag = 0;
42160 int ispipe;
42161 - static atomic_t core_dump_count = ATOMIC_INIT(0);
42162 + static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
42163 struct coredump_params cprm = {
42164 .signr = signr,
42165 .regs = regs,
42166 @@ -2109,6 +2423,9 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
42167
42168 audit_core_dumps(signr);
42169
42170 + if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
42171 + gr_handle_brute_attach(current, cprm.mm_flags);
42172 +
42173 binfmt = mm->binfmt;
42174 if (!binfmt || !binfmt->core_dump)
42175 goto fail;
42176 @@ -2176,7 +2493,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
42177 }
42178 cprm.limit = RLIM_INFINITY;
42179
42180 - dump_count = atomic_inc_return(&core_dump_count);
42181 + dump_count = atomic_inc_return_unchecked(&core_dump_count);
42182 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
42183 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
42184 task_tgid_vnr(current), current->comm);
42185 @@ -2203,6 +2520,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
42186 } else {
42187 struct inode *inode;
42188
42189 + gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
42190 +
42191 if (cprm.limit < binfmt->min_coredump)
42192 goto fail_unlock;
42193
42194 @@ -2246,7 +2565,7 @@ close_fail:
42195 filp_close(cprm.file, NULL);
42196 fail_dropcount:
42197 if (ispipe)
42198 - atomic_dec(&core_dump_count);
42199 + atomic_dec_unchecked(&core_dump_count);
42200 fail_unlock:
42201 kfree(cn.corename);
42202 fail_corename:
42203 @@ -2265,7 +2584,7 @@ fail:
42204 */
42205 int dump_write(struct file *file, const void *addr, int nr)
42206 {
42207 - return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
42208 + return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
42209 }
42210 EXPORT_SYMBOL(dump_write);
42211
42212 diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
42213 index a8cbe1b..fed04cb 100644
42214 --- a/fs/ext2/balloc.c
42215 +++ b/fs/ext2/balloc.c
42216 @@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
42217
42218 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
42219 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
42220 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
42221 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
42222 sbi->s_resuid != current_fsuid() &&
42223 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
42224 return 0;
42225 diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
42226 index a203892..4e64db5 100644
42227 --- a/fs/ext3/balloc.c
42228 +++ b/fs/ext3/balloc.c
42229 @@ -1446,9 +1446,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
42230
42231 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
42232 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
42233 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
42234 + if (free_blocks < root_blocks + 1 &&
42235 !use_reservation && sbi->s_resuid != current_fsuid() &&
42236 - (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
42237 + (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid)) &&
42238 + !capable_nolog(CAP_SYS_RESOURCE)) {
42239 return 0;
42240 }
42241 return 1;
42242 diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
42243 index 12ccacd..a6035fce0 100644
42244 --- a/fs/ext4/balloc.c
42245 +++ b/fs/ext4/balloc.c
42246 @@ -436,8 +436,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
42247 /* Hm, nope. Are (enough) root reserved clusters available? */
42248 if (sbi->s_resuid == current_fsuid() ||
42249 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
42250 - capable(CAP_SYS_RESOURCE) ||
42251 - (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
42252 + (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
42253 + capable_nolog(CAP_SYS_RESOURCE)) {
42254
42255 if (free_clusters >= (nclusters + dirty_clusters))
42256 return 1;
42257 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
42258 index 5b0e26a..0aa002d 100644
42259 --- a/fs/ext4/ext4.h
42260 +++ b/fs/ext4/ext4.h
42261 @@ -1208,19 +1208,19 @@ struct ext4_sb_info {
42262 unsigned long s_mb_last_start;
42263
42264 /* stats for buddy allocator */
42265 - atomic_t s_bal_reqs; /* number of reqs with len > 1 */
42266 - atomic_t s_bal_success; /* we found long enough chunks */
42267 - atomic_t s_bal_allocated; /* in blocks */
42268 - atomic_t s_bal_ex_scanned; /* total extents scanned */
42269 - atomic_t s_bal_goals; /* goal hits */
42270 - atomic_t s_bal_breaks; /* too long searches */
42271 - atomic_t s_bal_2orders; /* 2^order hits */
42272 + atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
42273 + atomic_unchecked_t s_bal_success; /* we found long enough chunks */
42274 + atomic_unchecked_t s_bal_allocated; /* in blocks */
42275 + atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
42276 + atomic_unchecked_t s_bal_goals; /* goal hits */
42277 + atomic_unchecked_t s_bal_breaks; /* too long searches */
42278 + atomic_unchecked_t s_bal_2orders; /* 2^order hits */
42279 spinlock_t s_bal_lock;
42280 unsigned long s_mb_buddies_generated;
42281 unsigned long long s_mb_generation_time;
42282 - atomic_t s_mb_lost_chunks;
42283 - atomic_t s_mb_preallocated;
42284 - atomic_t s_mb_discarded;
42285 + atomic_unchecked_t s_mb_lost_chunks;
42286 + atomic_unchecked_t s_mb_preallocated;
42287 + atomic_unchecked_t s_mb_discarded;
42288 atomic_t s_lock_busy;
42289
42290 /* locality groups */
42291 diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
42292 index e2d8be8..c7f0ce9 100644
42293 --- a/fs/ext4/mballoc.c
42294 +++ b/fs/ext4/mballoc.c
42295 @@ -1794,7 +1794,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
42296 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
42297
42298 if (EXT4_SB(sb)->s_mb_stats)
42299 - atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
42300 + atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
42301
42302 break;
42303 }
42304 @@ -2088,7 +2088,7 @@ repeat:
42305 ac->ac_status = AC_STATUS_CONTINUE;
42306 ac->ac_flags |= EXT4_MB_HINT_FIRST;
42307 cr = 3;
42308 - atomic_inc(&sbi->s_mb_lost_chunks);
42309 + atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
42310 goto repeat;
42311 }
42312 }
42313 @@ -2592,25 +2592,25 @@ int ext4_mb_release(struct super_block *sb)
42314 if (sbi->s_mb_stats) {
42315 ext4_msg(sb, KERN_INFO,
42316 "mballoc: %u blocks %u reqs (%u success)",
42317 - atomic_read(&sbi->s_bal_allocated),
42318 - atomic_read(&sbi->s_bal_reqs),
42319 - atomic_read(&sbi->s_bal_success));
42320 + atomic_read_unchecked(&sbi->s_bal_allocated),
42321 + atomic_read_unchecked(&sbi->s_bal_reqs),
42322 + atomic_read_unchecked(&sbi->s_bal_success));
42323 ext4_msg(sb, KERN_INFO,
42324 "mballoc: %u extents scanned, %u goal hits, "
42325 "%u 2^N hits, %u breaks, %u lost",
42326 - atomic_read(&sbi->s_bal_ex_scanned),
42327 - atomic_read(&sbi->s_bal_goals),
42328 - atomic_read(&sbi->s_bal_2orders),
42329 - atomic_read(&sbi->s_bal_breaks),
42330 - atomic_read(&sbi->s_mb_lost_chunks));
42331 + atomic_read_unchecked(&sbi->s_bal_ex_scanned),
42332 + atomic_read_unchecked(&sbi->s_bal_goals),
42333 + atomic_read_unchecked(&sbi->s_bal_2orders),
42334 + atomic_read_unchecked(&sbi->s_bal_breaks),
42335 + atomic_read_unchecked(&sbi->s_mb_lost_chunks));
42336 ext4_msg(sb, KERN_INFO,
42337 "mballoc: %lu generated and it took %Lu",
42338 sbi->s_mb_buddies_generated,
42339 sbi->s_mb_generation_time);
42340 ext4_msg(sb, KERN_INFO,
42341 "mballoc: %u preallocated, %u discarded",
42342 - atomic_read(&sbi->s_mb_preallocated),
42343 - atomic_read(&sbi->s_mb_discarded));
42344 + atomic_read_unchecked(&sbi->s_mb_preallocated),
42345 + atomic_read_unchecked(&sbi->s_mb_discarded));
42346 }
42347
42348 free_percpu(sbi->s_locality_groups);
42349 @@ -3096,16 +3096,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
42350 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
42351
42352 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
42353 - atomic_inc(&sbi->s_bal_reqs);
42354 - atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
42355 + atomic_inc_unchecked(&sbi->s_bal_reqs);
42356 + atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
42357 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
42358 - atomic_inc(&sbi->s_bal_success);
42359 - atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
42360 + atomic_inc_unchecked(&sbi->s_bal_success);
42361 + atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
42362 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
42363 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
42364 - atomic_inc(&sbi->s_bal_goals);
42365 + atomic_inc_unchecked(&sbi->s_bal_goals);
42366 if (ac->ac_found > sbi->s_mb_max_to_scan)
42367 - atomic_inc(&sbi->s_bal_breaks);
42368 + atomic_inc_unchecked(&sbi->s_bal_breaks);
42369 }
42370
42371 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
42372 @@ -3509,7 +3509,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
42373 trace_ext4_mb_new_inode_pa(ac, pa);
42374
42375 ext4_mb_use_inode_pa(ac, pa);
42376 - atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
42377 + atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
42378
42379 ei = EXT4_I(ac->ac_inode);
42380 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
42381 @@ -3569,7 +3569,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
42382 trace_ext4_mb_new_group_pa(ac, pa);
42383
42384 ext4_mb_use_group_pa(ac, pa);
42385 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
42386 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
42387
42388 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
42389 lg = ac->ac_lg;
42390 @@ -3658,7 +3658,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
42391 * from the bitmap and continue.
42392 */
42393 }
42394 - atomic_add(free, &sbi->s_mb_discarded);
42395 + atomic_add_unchecked(free, &sbi->s_mb_discarded);
42396
42397 return err;
42398 }
42399 @@ -3676,7 +3676,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
42400 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
42401 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
42402 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
42403 - atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
42404 + atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
42405 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
42406
42407 return 0;
42408 diff --git a/fs/fcntl.c b/fs/fcntl.c
42409 index 22764c7..86372c9 100644
42410 --- a/fs/fcntl.c
42411 +++ b/fs/fcntl.c
42412 @@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
42413 if (err)
42414 return err;
42415
42416 + if (gr_handle_chroot_fowner(pid, type))
42417 + return -ENOENT;
42418 + if (gr_check_protected_task_fowner(pid, type))
42419 + return -EACCES;
42420 +
42421 f_modown(filp, pid, type, force);
42422 return 0;
42423 }
42424 @@ -266,7 +271,7 @@ pid_t f_getown(struct file *filp)
42425
42426 static int f_setown_ex(struct file *filp, unsigned long arg)
42427 {
42428 - struct f_owner_ex * __user owner_p = (void * __user)arg;
42429 + struct f_owner_ex __user *owner_p = (void __user *)arg;
42430 struct f_owner_ex owner;
42431 struct pid *pid;
42432 int type;
42433 @@ -306,7 +311,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
42434
42435 static int f_getown_ex(struct file *filp, unsigned long arg)
42436 {
42437 - struct f_owner_ex * __user owner_p = (void * __user)arg;
42438 + struct f_owner_ex __user *owner_p = (void __user *)arg;
42439 struct f_owner_ex owner;
42440 int ret = 0;
42441
42442 @@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
42443 switch (cmd) {
42444 case F_DUPFD:
42445 case F_DUPFD_CLOEXEC:
42446 + gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
42447 if (arg >= rlimit(RLIMIT_NOFILE))
42448 break;
42449 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
42450 diff --git a/fs/fifo.c b/fs/fifo.c
42451 index b1a524d..4ee270e 100644
42452 --- a/fs/fifo.c
42453 +++ b/fs/fifo.c
42454 @@ -58,10 +58,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
42455 */
42456 filp->f_op = &read_pipefifo_fops;
42457 pipe->r_counter++;
42458 - if (pipe->readers++ == 0)
42459 + if (atomic_inc_return(&pipe->readers) == 1)
42460 wake_up_partner(inode);
42461
42462 - if (!pipe->writers) {
42463 + if (!atomic_read(&pipe->writers)) {
42464 if ((filp->f_flags & O_NONBLOCK)) {
42465 /* suppress POLLHUP until we have
42466 * seen a writer */
42467 @@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
42468 * errno=ENXIO when there is no process reading the FIFO.
42469 */
42470 ret = -ENXIO;
42471 - if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
42472 + if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
42473 goto err;
42474
42475 filp->f_op = &write_pipefifo_fops;
42476 pipe->w_counter++;
42477 - if (!pipe->writers++)
42478 + if (atomic_inc_return(&pipe->writers) == 1)
42479 wake_up_partner(inode);
42480
42481 - if (!pipe->readers) {
42482 + if (!atomic_read(&pipe->readers)) {
42483 wait_for_partner(inode, &pipe->r_counter);
42484 if (signal_pending(current))
42485 goto err_wr;
42486 @@ -105,11 +105,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
42487 */
42488 filp->f_op = &rdwr_pipefifo_fops;
42489
42490 - pipe->readers++;
42491 - pipe->writers++;
42492 + atomic_inc(&pipe->readers);
42493 + atomic_inc(&pipe->writers);
42494 pipe->r_counter++;
42495 pipe->w_counter++;
42496 - if (pipe->readers == 1 || pipe->writers == 1)
42497 + if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
42498 wake_up_partner(inode);
42499 break;
42500
42501 @@ -123,19 +123,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
42502 return 0;
42503
42504 err_rd:
42505 - if (!--pipe->readers)
42506 + if (atomic_dec_and_test(&pipe->readers))
42507 wake_up_interruptible(&pipe->wait);
42508 ret = -ERESTARTSYS;
42509 goto err;
42510
42511 err_wr:
42512 - if (!--pipe->writers)
42513 + if (atomic_dec_and_test(&pipe->writers))
42514 wake_up_interruptible(&pipe->wait);
42515 ret = -ERESTARTSYS;
42516 goto err;
42517
42518 err:
42519 - if (!pipe->readers && !pipe->writers)
42520 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
42521 free_pipe_info(inode);
42522
42523 err_nocleanup:
42524 diff --git a/fs/file.c b/fs/file.c
42525 index 4c6992d..104cdea 100644
42526 --- a/fs/file.c
42527 +++ b/fs/file.c
42528 @@ -15,6 +15,7 @@
42529 #include <linux/slab.h>
42530 #include <linux/vmalloc.h>
42531 #include <linux/file.h>
42532 +#include <linux/security.h>
42533 #include <linux/fdtable.h>
42534 #include <linux/bitops.h>
42535 #include <linux/interrupt.h>
42536 @@ -254,6 +255,7 @@ int expand_files(struct files_struct *files, int nr)
42537 * N.B. For clone tasks sharing a files structure, this test
42538 * will limit the total number of files that can be opened.
42539 */
42540 + gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
42541 if (nr >= rlimit(RLIMIT_NOFILE))
42542 return -EMFILE;
42543
42544 diff --git a/fs/filesystems.c b/fs/filesystems.c
42545 index 0845f84..7b4ebef 100644
42546 --- a/fs/filesystems.c
42547 +++ b/fs/filesystems.c
42548 @@ -274,7 +274,12 @@ struct file_system_type *get_fs_type(const char *name)
42549 int len = dot ? dot - name : strlen(name);
42550
42551 fs = __get_fs_type(name, len);
42552 +
42553 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
42554 + if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
42555 +#else
42556 if (!fs && (request_module("%.*s", len, name) == 0))
42557 +#endif
42558 fs = __get_fs_type(name, len);
42559
42560 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
42561 diff --git a/fs/fs_struct.c b/fs/fs_struct.c
42562 index 78b519c..a8b4979 100644
42563 --- a/fs/fs_struct.c
42564 +++ b/fs/fs_struct.c
42565 @@ -4,6 +4,7 @@
42566 #include <linux/path.h>
42567 #include <linux/slab.h>
42568 #include <linux/fs_struct.h>
42569 +#include <linux/grsecurity.h>
42570 #include "internal.h"
42571
42572 static inline void path_get_longterm(struct path *path)
42573 @@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
42574 old_root = fs->root;
42575 fs->root = *path;
42576 path_get_longterm(path);
42577 + gr_set_chroot_entries(current, path);
42578 write_seqcount_end(&fs->seq);
42579 spin_unlock(&fs->lock);
42580 if (old_root.dentry)
42581 @@ -74,6 +76,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
42582 && fs->root.mnt == old_root->mnt) {
42583 path_get_longterm(new_root);
42584 fs->root = *new_root;
42585 + gr_set_chroot_entries(p, new_root);
42586 count++;
42587 }
42588 if (fs->pwd.dentry == old_root->dentry
42589 @@ -109,7 +112,8 @@ void exit_fs(struct task_struct *tsk)
42590 spin_lock(&fs->lock);
42591 write_seqcount_begin(&fs->seq);
42592 tsk->fs = NULL;
42593 - kill = !--fs->users;
42594 + gr_clear_chroot_entries(tsk);
42595 + kill = !atomic_dec_return(&fs->users);
42596 write_seqcount_end(&fs->seq);
42597 spin_unlock(&fs->lock);
42598 task_unlock(tsk);
42599 @@ -123,7 +127,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
42600 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
42601 /* We don't need to lock fs - think why ;-) */
42602 if (fs) {
42603 - fs->users = 1;
42604 + atomic_set(&fs->users, 1);
42605 fs->in_exec = 0;
42606 spin_lock_init(&fs->lock);
42607 seqcount_init(&fs->seq);
42608 @@ -132,6 +136,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
42609 spin_lock(&old->lock);
42610 fs->root = old->root;
42611 path_get_longterm(&fs->root);
42612 + /* instead of calling gr_set_chroot_entries here,
42613 + we call it from every caller of this function
42614 + */
42615 fs->pwd = old->pwd;
42616 path_get_longterm(&fs->pwd);
42617 spin_unlock(&old->lock);
42618 @@ -150,8 +157,9 @@ int unshare_fs_struct(void)
42619
42620 task_lock(current);
42621 spin_lock(&fs->lock);
42622 - kill = !--fs->users;
42623 + kill = !atomic_dec_return(&fs->users);
42624 current->fs = new_fs;
42625 + gr_set_chroot_entries(current, &new_fs->root);
42626 spin_unlock(&fs->lock);
42627 task_unlock(current);
42628
42629 @@ -164,13 +172,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
42630
42631 int current_umask(void)
42632 {
42633 - return current->fs->umask;
42634 + return current->fs->umask | gr_acl_umask();
42635 }
42636 EXPORT_SYMBOL(current_umask);
42637
42638 /* to be mentioned only in INIT_TASK */
42639 struct fs_struct init_fs = {
42640 - .users = 1,
42641 + .users = ATOMIC_INIT(1),
42642 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
42643 .seq = SEQCNT_ZERO,
42644 .umask = 0022,
42645 @@ -186,12 +194,13 @@ void daemonize_fs_struct(void)
42646 task_lock(current);
42647
42648 spin_lock(&init_fs.lock);
42649 - init_fs.users++;
42650 + atomic_inc(&init_fs.users);
42651 spin_unlock(&init_fs.lock);
42652
42653 spin_lock(&fs->lock);
42654 current->fs = &init_fs;
42655 - kill = !--fs->users;
42656 + gr_set_chroot_entries(current, &current->fs->root);
42657 + kill = !atomic_dec_return(&fs->users);
42658 spin_unlock(&fs->lock);
42659
42660 task_unlock(current);
42661 diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
42662 index 9905350..02eaec4 100644
42663 --- a/fs/fscache/cookie.c
42664 +++ b/fs/fscache/cookie.c
42665 @@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
42666 parent ? (char *) parent->def->name : "<no-parent>",
42667 def->name, netfs_data);
42668
42669 - fscache_stat(&fscache_n_acquires);
42670 + fscache_stat_unchecked(&fscache_n_acquires);
42671
42672 /* if there's no parent cookie, then we don't create one here either */
42673 if (!parent) {
42674 - fscache_stat(&fscache_n_acquires_null);
42675 + fscache_stat_unchecked(&fscache_n_acquires_null);
42676 _leave(" [no parent]");
42677 return NULL;
42678 }
42679 @@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
42680 /* allocate and initialise a cookie */
42681 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
42682 if (!cookie) {
42683 - fscache_stat(&fscache_n_acquires_oom);
42684 + fscache_stat_unchecked(&fscache_n_acquires_oom);
42685 _leave(" [ENOMEM]");
42686 return NULL;
42687 }
42688 @@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
42689
42690 switch (cookie->def->type) {
42691 case FSCACHE_COOKIE_TYPE_INDEX:
42692 - fscache_stat(&fscache_n_cookie_index);
42693 + fscache_stat_unchecked(&fscache_n_cookie_index);
42694 break;
42695 case FSCACHE_COOKIE_TYPE_DATAFILE:
42696 - fscache_stat(&fscache_n_cookie_data);
42697 + fscache_stat_unchecked(&fscache_n_cookie_data);
42698 break;
42699 default:
42700 - fscache_stat(&fscache_n_cookie_special);
42701 + fscache_stat_unchecked(&fscache_n_cookie_special);
42702 break;
42703 }
42704
42705 @@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
42706 if (fscache_acquire_non_index_cookie(cookie) < 0) {
42707 atomic_dec(&parent->n_children);
42708 __fscache_cookie_put(cookie);
42709 - fscache_stat(&fscache_n_acquires_nobufs);
42710 + fscache_stat_unchecked(&fscache_n_acquires_nobufs);
42711 _leave(" = NULL");
42712 return NULL;
42713 }
42714 }
42715
42716 - fscache_stat(&fscache_n_acquires_ok);
42717 + fscache_stat_unchecked(&fscache_n_acquires_ok);
42718 _leave(" = %p", cookie);
42719 return cookie;
42720 }
42721 @@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
42722 cache = fscache_select_cache_for_object(cookie->parent);
42723 if (!cache) {
42724 up_read(&fscache_addremove_sem);
42725 - fscache_stat(&fscache_n_acquires_no_cache);
42726 + fscache_stat_unchecked(&fscache_n_acquires_no_cache);
42727 _leave(" = -ENOMEDIUM [no cache]");
42728 return -ENOMEDIUM;
42729 }
42730 @@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
42731 object = cache->ops->alloc_object(cache, cookie);
42732 fscache_stat_d(&fscache_n_cop_alloc_object);
42733 if (IS_ERR(object)) {
42734 - fscache_stat(&fscache_n_object_no_alloc);
42735 + fscache_stat_unchecked(&fscache_n_object_no_alloc);
42736 ret = PTR_ERR(object);
42737 goto error;
42738 }
42739
42740 - fscache_stat(&fscache_n_object_alloc);
42741 + fscache_stat_unchecked(&fscache_n_object_alloc);
42742
42743 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
42744
42745 @@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
42746 struct fscache_object *object;
42747 struct hlist_node *_p;
42748
42749 - fscache_stat(&fscache_n_updates);
42750 + fscache_stat_unchecked(&fscache_n_updates);
42751
42752 if (!cookie) {
42753 - fscache_stat(&fscache_n_updates_null);
42754 + fscache_stat_unchecked(&fscache_n_updates_null);
42755 _leave(" [no cookie]");
42756 return;
42757 }
42758 @@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
42759 struct fscache_object *object;
42760 unsigned long event;
42761
42762 - fscache_stat(&fscache_n_relinquishes);
42763 + fscache_stat_unchecked(&fscache_n_relinquishes);
42764 if (retire)
42765 - fscache_stat(&fscache_n_relinquishes_retire);
42766 + fscache_stat_unchecked(&fscache_n_relinquishes_retire);
42767
42768 if (!cookie) {
42769 - fscache_stat(&fscache_n_relinquishes_null);
42770 + fscache_stat_unchecked(&fscache_n_relinquishes_null);
42771 _leave(" [no cookie]");
42772 return;
42773 }
42774 @@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
42775
42776 /* wait for the cookie to finish being instantiated (or to fail) */
42777 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
42778 - fscache_stat(&fscache_n_relinquishes_waitcrt);
42779 + fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
42780 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
42781 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
42782 }
42783 diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
42784 index f6aad48..88dcf26 100644
42785 --- a/fs/fscache/internal.h
42786 +++ b/fs/fscache/internal.h
42787 @@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
42788 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
42789 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
42790
42791 -extern atomic_t fscache_n_op_pend;
42792 -extern atomic_t fscache_n_op_run;
42793 -extern atomic_t fscache_n_op_enqueue;
42794 -extern atomic_t fscache_n_op_deferred_release;
42795 -extern atomic_t fscache_n_op_release;
42796 -extern atomic_t fscache_n_op_gc;
42797 -extern atomic_t fscache_n_op_cancelled;
42798 -extern atomic_t fscache_n_op_rejected;
42799 +extern atomic_unchecked_t fscache_n_op_pend;
42800 +extern atomic_unchecked_t fscache_n_op_run;
42801 +extern atomic_unchecked_t fscache_n_op_enqueue;
42802 +extern atomic_unchecked_t fscache_n_op_deferred_release;
42803 +extern atomic_unchecked_t fscache_n_op_release;
42804 +extern atomic_unchecked_t fscache_n_op_gc;
42805 +extern atomic_unchecked_t fscache_n_op_cancelled;
42806 +extern atomic_unchecked_t fscache_n_op_rejected;
42807
42808 -extern atomic_t fscache_n_attr_changed;
42809 -extern atomic_t fscache_n_attr_changed_ok;
42810 -extern atomic_t fscache_n_attr_changed_nobufs;
42811 -extern atomic_t fscache_n_attr_changed_nomem;
42812 -extern atomic_t fscache_n_attr_changed_calls;
42813 +extern atomic_unchecked_t fscache_n_attr_changed;
42814 +extern atomic_unchecked_t fscache_n_attr_changed_ok;
42815 +extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
42816 +extern atomic_unchecked_t fscache_n_attr_changed_nomem;
42817 +extern atomic_unchecked_t fscache_n_attr_changed_calls;
42818
42819 -extern atomic_t fscache_n_allocs;
42820 -extern atomic_t fscache_n_allocs_ok;
42821 -extern atomic_t fscache_n_allocs_wait;
42822 -extern atomic_t fscache_n_allocs_nobufs;
42823 -extern atomic_t fscache_n_allocs_intr;
42824 -extern atomic_t fscache_n_allocs_object_dead;
42825 -extern atomic_t fscache_n_alloc_ops;
42826 -extern atomic_t fscache_n_alloc_op_waits;
42827 +extern atomic_unchecked_t fscache_n_allocs;
42828 +extern atomic_unchecked_t fscache_n_allocs_ok;
42829 +extern atomic_unchecked_t fscache_n_allocs_wait;
42830 +extern atomic_unchecked_t fscache_n_allocs_nobufs;
42831 +extern atomic_unchecked_t fscache_n_allocs_intr;
42832 +extern atomic_unchecked_t fscache_n_allocs_object_dead;
42833 +extern atomic_unchecked_t fscache_n_alloc_ops;
42834 +extern atomic_unchecked_t fscache_n_alloc_op_waits;
42835
42836 -extern atomic_t fscache_n_retrievals;
42837 -extern atomic_t fscache_n_retrievals_ok;
42838 -extern atomic_t fscache_n_retrievals_wait;
42839 -extern atomic_t fscache_n_retrievals_nodata;
42840 -extern atomic_t fscache_n_retrievals_nobufs;
42841 -extern atomic_t fscache_n_retrievals_intr;
42842 -extern atomic_t fscache_n_retrievals_nomem;
42843 -extern atomic_t fscache_n_retrievals_object_dead;
42844 -extern atomic_t fscache_n_retrieval_ops;
42845 -extern atomic_t fscache_n_retrieval_op_waits;
42846 +extern atomic_unchecked_t fscache_n_retrievals;
42847 +extern atomic_unchecked_t fscache_n_retrievals_ok;
42848 +extern atomic_unchecked_t fscache_n_retrievals_wait;
42849 +extern atomic_unchecked_t fscache_n_retrievals_nodata;
42850 +extern atomic_unchecked_t fscache_n_retrievals_nobufs;
42851 +extern atomic_unchecked_t fscache_n_retrievals_intr;
42852 +extern atomic_unchecked_t fscache_n_retrievals_nomem;
42853 +extern atomic_unchecked_t fscache_n_retrievals_object_dead;
42854 +extern atomic_unchecked_t fscache_n_retrieval_ops;
42855 +extern atomic_unchecked_t fscache_n_retrieval_op_waits;
42856
42857 -extern atomic_t fscache_n_stores;
42858 -extern atomic_t fscache_n_stores_ok;
42859 -extern atomic_t fscache_n_stores_again;
42860 -extern atomic_t fscache_n_stores_nobufs;
42861 -extern atomic_t fscache_n_stores_oom;
42862 -extern atomic_t fscache_n_store_ops;
42863 -extern atomic_t fscache_n_store_calls;
42864 -extern atomic_t fscache_n_store_pages;
42865 -extern atomic_t fscache_n_store_radix_deletes;
42866 -extern atomic_t fscache_n_store_pages_over_limit;
42867 +extern atomic_unchecked_t fscache_n_stores;
42868 +extern atomic_unchecked_t fscache_n_stores_ok;
42869 +extern atomic_unchecked_t fscache_n_stores_again;
42870 +extern atomic_unchecked_t fscache_n_stores_nobufs;
42871 +extern atomic_unchecked_t fscache_n_stores_oom;
42872 +extern atomic_unchecked_t fscache_n_store_ops;
42873 +extern atomic_unchecked_t fscache_n_store_calls;
42874 +extern atomic_unchecked_t fscache_n_store_pages;
42875 +extern atomic_unchecked_t fscache_n_store_radix_deletes;
42876 +extern atomic_unchecked_t fscache_n_store_pages_over_limit;
42877
42878 -extern atomic_t fscache_n_store_vmscan_not_storing;
42879 -extern atomic_t fscache_n_store_vmscan_gone;
42880 -extern atomic_t fscache_n_store_vmscan_busy;
42881 -extern atomic_t fscache_n_store_vmscan_cancelled;
42882 +extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
42883 +extern atomic_unchecked_t fscache_n_store_vmscan_gone;
42884 +extern atomic_unchecked_t fscache_n_store_vmscan_busy;
42885 +extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
42886
42887 -extern atomic_t fscache_n_marks;
42888 -extern atomic_t fscache_n_uncaches;
42889 +extern atomic_unchecked_t fscache_n_marks;
42890 +extern atomic_unchecked_t fscache_n_uncaches;
42891
42892 -extern atomic_t fscache_n_acquires;
42893 -extern atomic_t fscache_n_acquires_null;
42894 -extern atomic_t fscache_n_acquires_no_cache;
42895 -extern atomic_t fscache_n_acquires_ok;
42896 -extern atomic_t fscache_n_acquires_nobufs;
42897 -extern atomic_t fscache_n_acquires_oom;
42898 +extern atomic_unchecked_t fscache_n_acquires;
42899 +extern atomic_unchecked_t fscache_n_acquires_null;
42900 +extern atomic_unchecked_t fscache_n_acquires_no_cache;
42901 +extern atomic_unchecked_t fscache_n_acquires_ok;
42902 +extern atomic_unchecked_t fscache_n_acquires_nobufs;
42903 +extern atomic_unchecked_t fscache_n_acquires_oom;
42904
42905 -extern atomic_t fscache_n_updates;
42906 -extern atomic_t fscache_n_updates_null;
42907 -extern atomic_t fscache_n_updates_run;
42908 +extern atomic_unchecked_t fscache_n_updates;
42909 +extern atomic_unchecked_t fscache_n_updates_null;
42910 +extern atomic_unchecked_t fscache_n_updates_run;
42911
42912 -extern atomic_t fscache_n_relinquishes;
42913 -extern atomic_t fscache_n_relinquishes_null;
42914 -extern atomic_t fscache_n_relinquishes_waitcrt;
42915 -extern atomic_t fscache_n_relinquishes_retire;
42916 +extern atomic_unchecked_t fscache_n_relinquishes;
42917 +extern atomic_unchecked_t fscache_n_relinquishes_null;
42918 +extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
42919 +extern atomic_unchecked_t fscache_n_relinquishes_retire;
42920
42921 -extern atomic_t fscache_n_cookie_index;
42922 -extern atomic_t fscache_n_cookie_data;
42923 -extern atomic_t fscache_n_cookie_special;
42924 +extern atomic_unchecked_t fscache_n_cookie_index;
42925 +extern atomic_unchecked_t fscache_n_cookie_data;
42926 +extern atomic_unchecked_t fscache_n_cookie_special;
42927
42928 -extern atomic_t fscache_n_object_alloc;
42929 -extern atomic_t fscache_n_object_no_alloc;
42930 -extern atomic_t fscache_n_object_lookups;
42931 -extern atomic_t fscache_n_object_lookups_negative;
42932 -extern atomic_t fscache_n_object_lookups_positive;
42933 -extern atomic_t fscache_n_object_lookups_timed_out;
42934 -extern atomic_t fscache_n_object_created;
42935 -extern atomic_t fscache_n_object_avail;
42936 -extern atomic_t fscache_n_object_dead;
42937 +extern atomic_unchecked_t fscache_n_object_alloc;
42938 +extern atomic_unchecked_t fscache_n_object_no_alloc;
42939 +extern atomic_unchecked_t fscache_n_object_lookups;
42940 +extern atomic_unchecked_t fscache_n_object_lookups_negative;
42941 +extern atomic_unchecked_t fscache_n_object_lookups_positive;
42942 +extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
42943 +extern atomic_unchecked_t fscache_n_object_created;
42944 +extern atomic_unchecked_t fscache_n_object_avail;
42945 +extern atomic_unchecked_t fscache_n_object_dead;
42946
42947 -extern atomic_t fscache_n_checkaux_none;
42948 -extern atomic_t fscache_n_checkaux_okay;
42949 -extern atomic_t fscache_n_checkaux_update;
42950 -extern atomic_t fscache_n_checkaux_obsolete;
42951 +extern atomic_unchecked_t fscache_n_checkaux_none;
42952 +extern atomic_unchecked_t fscache_n_checkaux_okay;
42953 +extern atomic_unchecked_t fscache_n_checkaux_update;
42954 +extern atomic_unchecked_t fscache_n_checkaux_obsolete;
42955
42956 extern atomic_t fscache_n_cop_alloc_object;
42957 extern atomic_t fscache_n_cop_lookup_object;
42958 @@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t *stat)
42959 atomic_inc(stat);
42960 }
42961
42962 +static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
42963 +{
42964 + atomic_inc_unchecked(stat);
42965 +}
42966 +
42967 static inline void fscache_stat_d(atomic_t *stat)
42968 {
42969 atomic_dec(stat);
42970 @@ -267,6 +272,7 @@ extern const struct file_operations fscache_stats_fops;
42971
42972 #define __fscache_stat(stat) (NULL)
42973 #define fscache_stat(stat) do {} while (0)
42974 +#define fscache_stat_unchecked(stat) do {} while (0)
42975 #define fscache_stat_d(stat) do {} while (0)
42976 #endif
42977
42978 diff --git a/fs/fscache/object.c b/fs/fscache/object.c
42979 index b6b897c..0ffff9c 100644
42980 --- a/fs/fscache/object.c
42981 +++ b/fs/fscache/object.c
42982 @@ -128,7 +128,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
42983 /* update the object metadata on disk */
42984 case FSCACHE_OBJECT_UPDATING:
42985 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
42986 - fscache_stat(&fscache_n_updates_run);
42987 + fscache_stat_unchecked(&fscache_n_updates_run);
42988 fscache_stat(&fscache_n_cop_update_object);
42989 object->cache->ops->update_object(object);
42990 fscache_stat_d(&fscache_n_cop_update_object);
42991 @@ -217,7 +217,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
42992 spin_lock(&object->lock);
42993 object->state = FSCACHE_OBJECT_DEAD;
42994 spin_unlock(&object->lock);
42995 - fscache_stat(&fscache_n_object_dead);
42996 + fscache_stat_unchecked(&fscache_n_object_dead);
42997 goto terminal_transit;
42998
42999 /* handle the parent cache of this object being withdrawn from
43000 @@ -232,7 +232,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
43001 spin_lock(&object->lock);
43002 object->state = FSCACHE_OBJECT_DEAD;
43003 spin_unlock(&object->lock);
43004 - fscache_stat(&fscache_n_object_dead);
43005 + fscache_stat_unchecked(&fscache_n_object_dead);
43006 goto terminal_transit;
43007
43008 /* complain about the object being woken up once it is
43009 @@ -461,7 +461,7 @@ static void fscache_lookup_object(struct fscache_object *object)
43010 parent->cookie->def->name, cookie->def->name,
43011 object->cache->tag->name);
43012
43013 - fscache_stat(&fscache_n_object_lookups);
43014 + fscache_stat_unchecked(&fscache_n_object_lookups);
43015 fscache_stat(&fscache_n_cop_lookup_object);
43016 ret = object->cache->ops->lookup_object(object);
43017 fscache_stat_d(&fscache_n_cop_lookup_object);
43018 @@ -472,7 +472,7 @@ static void fscache_lookup_object(struct fscache_object *object)
43019 if (ret == -ETIMEDOUT) {
43020 /* probably stuck behind another object, so move this one to
43021 * the back of the queue */
43022 - fscache_stat(&fscache_n_object_lookups_timed_out);
43023 + fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
43024 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
43025 }
43026
43027 @@ -495,7 +495,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
43028
43029 spin_lock(&object->lock);
43030 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
43031 - fscache_stat(&fscache_n_object_lookups_negative);
43032 + fscache_stat_unchecked(&fscache_n_object_lookups_negative);
43033
43034 /* transit here to allow write requests to begin stacking up
43035 * and read requests to begin returning ENODATA */
43036 @@ -541,7 +541,7 @@ void fscache_obtained_object(struct fscache_object *object)
43037 * result, in which case there may be data available */
43038 spin_lock(&object->lock);
43039 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
43040 - fscache_stat(&fscache_n_object_lookups_positive);
43041 + fscache_stat_unchecked(&fscache_n_object_lookups_positive);
43042
43043 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
43044
43045 @@ -555,7 +555,7 @@ void fscache_obtained_object(struct fscache_object *object)
43046 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
43047 } else {
43048 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
43049 - fscache_stat(&fscache_n_object_created);
43050 + fscache_stat_unchecked(&fscache_n_object_created);
43051
43052 object->state = FSCACHE_OBJECT_AVAILABLE;
43053 spin_unlock(&object->lock);
43054 @@ -602,7 +602,7 @@ static void fscache_object_available(struct fscache_object *object)
43055 fscache_enqueue_dependents(object);
43056
43057 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
43058 - fscache_stat(&fscache_n_object_avail);
43059 + fscache_stat_unchecked(&fscache_n_object_avail);
43060
43061 _leave("");
43062 }
43063 @@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
43064 enum fscache_checkaux result;
43065
43066 if (!object->cookie->def->check_aux) {
43067 - fscache_stat(&fscache_n_checkaux_none);
43068 + fscache_stat_unchecked(&fscache_n_checkaux_none);
43069 return FSCACHE_CHECKAUX_OKAY;
43070 }
43071
43072 @@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
43073 switch (result) {
43074 /* entry okay as is */
43075 case FSCACHE_CHECKAUX_OKAY:
43076 - fscache_stat(&fscache_n_checkaux_okay);
43077 + fscache_stat_unchecked(&fscache_n_checkaux_okay);
43078 break;
43079
43080 /* entry requires update */
43081 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
43082 - fscache_stat(&fscache_n_checkaux_update);
43083 + fscache_stat_unchecked(&fscache_n_checkaux_update);
43084 break;
43085
43086 /* entry requires deletion */
43087 case FSCACHE_CHECKAUX_OBSOLETE:
43088 - fscache_stat(&fscache_n_checkaux_obsolete);
43089 + fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
43090 break;
43091
43092 default:
43093 diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
43094 index 30afdfa..2256596 100644
43095 --- a/fs/fscache/operation.c
43096 +++ b/fs/fscache/operation.c
43097 @@ -17,7 +17,7 @@
43098 #include <linux/slab.h>
43099 #include "internal.h"
43100
43101 -atomic_t fscache_op_debug_id;
43102 +atomic_unchecked_t fscache_op_debug_id;
43103 EXPORT_SYMBOL(fscache_op_debug_id);
43104
43105 /**
43106 @@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
43107 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
43108 ASSERTCMP(atomic_read(&op->usage), >, 0);
43109
43110 - fscache_stat(&fscache_n_op_enqueue);
43111 + fscache_stat_unchecked(&fscache_n_op_enqueue);
43112 switch (op->flags & FSCACHE_OP_TYPE) {
43113 case FSCACHE_OP_ASYNC:
43114 _debug("queue async");
43115 @@ -69,7 +69,7 @@ static void fscache_run_op(struct fscache_object *object,
43116 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
43117 if (op->processor)
43118 fscache_enqueue_operation(op);
43119 - fscache_stat(&fscache_n_op_run);
43120 + fscache_stat_unchecked(&fscache_n_op_run);
43121 }
43122
43123 /*
43124 @@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
43125 if (object->n_ops > 1) {
43126 atomic_inc(&op->usage);
43127 list_add_tail(&op->pend_link, &object->pending_ops);
43128 - fscache_stat(&fscache_n_op_pend);
43129 + fscache_stat_unchecked(&fscache_n_op_pend);
43130 } else if (!list_empty(&object->pending_ops)) {
43131 atomic_inc(&op->usage);
43132 list_add_tail(&op->pend_link, &object->pending_ops);
43133 - fscache_stat(&fscache_n_op_pend);
43134 + fscache_stat_unchecked(&fscache_n_op_pend);
43135 fscache_start_operations(object);
43136 } else {
43137 ASSERTCMP(object->n_in_progress, ==, 0);
43138 @@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
43139 object->n_exclusive++; /* reads and writes must wait */
43140 atomic_inc(&op->usage);
43141 list_add_tail(&op->pend_link, &object->pending_ops);
43142 - fscache_stat(&fscache_n_op_pend);
43143 + fscache_stat_unchecked(&fscache_n_op_pend);
43144 ret = 0;
43145 } else {
43146 /* not allowed to submit ops in any other state */
43147 @@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_object *object,
43148 if (object->n_exclusive > 0) {
43149 atomic_inc(&op->usage);
43150 list_add_tail(&op->pend_link, &object->pending_ops);
43151 - fscache_stat(&fscache_n_op_pend);
43152 + fscache_stat_unchecked(&fscache_n_op_pend);
43153 } else if (!list_empty(&object->pending_ops)) {
43154 atomic_inc(&op->usage);
43155 list_add_tail(&op->pend_link, &object->pending_ops);
43156 - fscache_stat(&fscache_n_op_pend);
43157 + fscache_stat_unchecked(&fscache_n_op_pend);
43158 fscache_start_operations(object);
43159 } else {
43160 ASSERTCMP(object->n_exclusive, ==, 0);
43161 @@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_object *object,
43162 object->n_ops++;
43163 atomic_inc(&op->usage);
43164 list_add_tail(&op->pend_link, &object->pending_ops);
43165 - fscache_stat(&fscache_n_op_pend);
43166 + fscache_stat_unchecked(&fscache_n_op_pend);
43167 ret = 0;
43168 } else if (object->state == FSCACHE_OBJECT_DYING ||
43169 object->state == FSCACHE_OBJECT_LC_DYING ||
43170 object->state == FSCACHE_OBJECT_WITHDRAWING) {
43171 - fscache_stat(&fscache_n_op_rejected);
43172 + fscache_stat_unchecked(&fscache_n_op_rejected);
43173 ret = -ENOBUFS;
43174 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
43175 fscache_report_unexpected_submission(object, op, ostate);
43176 @@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_operation *op)
43177
43178 ret = -EBUSY;
43179 if (!list_empty(&op->pend_link)) {
43180 - fscache_stat(&fscache_n_op_cancelled);
43181 + fscache_stat_unchecked(&fscache_n_op_cancelled);
43182 list_del_init(&op->pend_link);
43183 object->n_ops--;
43184 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
43185 @@ -331,7 +331,7 @@ void fscache_put_operation(struct fscache_operation *op)
43186 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
43187 BUG();
43188
43189 - fscache_stat(&fscache_n_op_release);
43190 + fscache_stat_unchecked(&fscache_n_op_release);
43191
43192 if (op->release) {
43193 op->release(op);
43194 @@ -348,7 +348,7 @@ void fscache_put_operation(struct fscache_operation *op)
43195 * lock, and defer it otherwise */
43196 if (!spin_trylock(&object->lock)) {
43197 _debug("defer put");
43198 - fscache_stat(&fscache_n_op_deferred_release);
43199 + fscache_stat_unchecked(&fscache_n_op_deferred_release);
43200
43201 cache = object->cache;
43202 spin_lock(&cache->op_gc_list_lock);
43203 @@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_struct *work)
43204
43205 _debug("GC DEFERRED REL OBJ%x OP%x",
43206 object->debug_id, op->debug_id);
43207 - fscache_stat(&fscache_n_op_gc);
43208 + fscache_stat_unchecked(&fscache_n_op_gc);
43209
43210 ASSERTCMP(atomic_read(&op->usage), ==, 0);
43211
43212 diff --git a/fs/fscache/page.c b/fs/fscache/page.c
43213 index 3f7a59b..cf196cc 100644
43214 --- a/fs/fscache/page.c
43215 +++ b/fs/fscache/page.c
43216 @@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
43217 val = radix_tree_lookup(&cookie->stores, page->index);
43218 if (!val) {
43219 rcu_read_unlock();
43220 - fscache_stat(&fscache_n_store_vmscan_not_storing);
43221 + fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
43222 __fscache_uncache_page(cookie, page);
43223 return true;
43224 }
43225 @@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
43226 spin_unlock(&cookie->stores_lock);
43227
43228 if (xpage) {
43229 - fscache_stat(&fscache_n_store_vmscan_cancelled);
43230 - fscache_stat(&fscache_n_store_radix_deletes);
43231 + fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
43232 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
43233 ASSERTCMP(xpage, ==, page);
43234 } else {
43235 - fscache_stat(&fscache_n_store_vmscan_gone);
43236 + fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
43237 }
43238
43239 wake_up_bit(&cookie->flags, 0);
43240 @@ -107,7 +107,7 @@ page_busy:
43241 /* we might want to wait here, but that could deadlock the allocator as
43242 * the work threads writing to the cache may all end up sleeping
43243 * on memory allocation */
43244 - fscache_stat(&fscache_n_store_vmscan_busy);
43245 + fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
43246 return false;
43247 }
43248 EXPORT_SYMBOL(__fscache_maybe_release_page);
43249 @@ -131,7 +131,7 @@ static void fscache_end_page_write(struct fscache_object *object,
43250 FSCACHE_COOKIE_STORING_TAG);
43251 if (!radix_tree_tag_get(&cookie->stores, page->index,
43252 FSCACHE_COOKIE_PENDING_TAG)) {
43253 - fscache_stat(&fscache_n_store_radix_deletes);
43254 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
43255 xpage = radix_tree_delete(&cookie->stores, page->index);
43256 }
43257 spin_unlock(&cookie->stores_lock);
43258 @@ -152,7 +152,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
43259
43260 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
43261
43262 - fscache_stat(&fscache_n_attr_changed_calls);
43263 + fscache_stat_unchecked(&fscache_n_attr_changed_calls);
43264
43265 if (fscache_object_is_active(object)) {
43266 fscache_stat(&fscache_n_cop_attr_changed);
43267 @@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
43268
43269 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
43270
43271 - fscache_stat(&fscache_n_attr_changed);
43272 + fscache_stat_unchecked(&fscache_n_attr_changed);
43273
43274 op = kzalloc(sizeof(*op), GFP_KERNEL);
43275 if (!op) {
43276 - fscache_stat(&fscache_n_attr_changed_nomem);
43277 + fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
43278 _leave(" = -ENOMEM");
43279 return -ENOMEM;
43280 }
43281 @@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
43282 if (fscache_submit_exclusive_op(object, op) < 0)
43283 goto nobufs;
43284 spin_unlock(&cookie->lock);
43285 - fscache_stat(&fscache_n_attr_changed_ok);
43286 + fscache_stat_unchecked(&fscache_n_attr_changed_ok);
43287 fscache_put_operation(op);
43288 _leave(" = 0");
43289 return 0;
43290 @@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
43291 nobufs:
43292 spin_unlock(&cookie->lock);
43293 kfree(op);
43294 - fscache_stat(&fscache_n_attr_changed_nobufs);
43295 + fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
43296 _leave(" = %d", -ENOBUFS);
43297 return -ENOBUFS;
43298 }
43299 @@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
43300 /* allocate a retrieval operation and attempt to submit it */
43301 op = kzalloc(sizeof(*op), GFP_NOIO);
43302 if (!op) {
43303 - fscache_stat(&fscache_n_retrievals_nomem);
43304 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
43305 return NULL;
43306 }
43307
43308 @@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
43309 return 0;
43310 }
43311
43312 - fscache_stat(&fscache_n_retrievals_wait);
43313 + fscache_stat_unchecked(&fscache_n_retrievals_wait);
43314
43315 jif = jiffies;
43316 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
43317 fscache_wait_bit_interruptible,
43318 TASK_INTERRUPTIBLE) != 0) {
43319 - fscache_stat(&fscache_n_retrievals_intr);
43320 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
43321 _leave(" = -ERESTARTSYS");
43322 return -ERESTARTSYS;
43323 }
43324 @@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
43325 */
43326 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
43327 struct fscache_retrieval *op,
43328 - atomic_t *stat_op_waits,
43329 - atomic_t *stat_object_dead)
43330 + atomic_unchecked_t *stat_op_waits,
43331 + atomic_unchecked_t *stat_object_dead)
43332 {
43333 int ret;
43334
43335 @@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
43336 goto check_if_dead;
43337
43338 _debug(">>> WT");
43339 - fscache_stat(stat_op_waits);
43340 + fscache_stat_unchecked(stat_op_waits);
43341 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
43342 fscache_wait_bit_interruptible,
43343 TASK_INTERRUPTIBLE) < 0) {
43344 @@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
43345
43346 check_if_dead:
43347 if (unlikely(fscache_object_is_dead(object))) {
43348 - fscache_stat(stat_object_dead);
43349 + fscache_stat_unchecked(stat_object_dead);
43350 return -ENOBUFS;
43351 }
43352 return 0;
43353 @@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
43354
43355 _enter("%p,%p,,,", cookie, page);
43356
43357 - fscache_stat(&fscache_n_retrievals);
43358 + fscache_stat_unchecked(&fscache_n_retrievals);
43359
43360 if (hlist_empty(&cookie->backing_objects))
43361 goto nobufs;
43362 @@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
43363 goto nobufs_unlock;
43364 spin_unlock(&cookie->lock);
43365
43366 - fscache_stat(&fscache_n_retrieval_ops);
43367 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
43368
43369 /* pin the netfs read context in case we need to do the actual netfs
43370 * read because we've encountered a cache read failure */
43371 @@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
43372
43373 error:
43374 if (ret == -ENOMEM)
43375 - fscache_stat(&fscache_n_retrievals_nomem);
43376 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
43377 else if (ret == -ERESTARTSYS)
43378 - fscache_stat(&fscache_n_retrievals_intr);
43379 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
43380 else if (ret == -ENODATA)
43381 - fscache_stat(&fscache_n_retrievals_nodata);
43382 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
43383 else if (ret < 0)
43384 - fscache_stat(&fscache_n_retrievals_nobufs);
43385 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
43386 else
43387 - fscache_stat(&fscache_n_retrievals_ok);
43388 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
43389
43390 fscache_put_retrieval(op);
43391 _leave(" = %d", ret);
43392 @@ -429,7 +429,7 @@ nobufs_unlock:
43393 spin_unlock(&cookie->lock);
43394 kfree(op);
43395 nobufs:
43396 - fscache_stat(&fscache_n_retrievals_nobufs);
43397 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
43398 _leave(" = -ENOBUFS");
43399 return -ENOBUFS;
43400 }
43401 @@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
43402
43403 _enter("%p,,%d,,,", cookie, *nr_pages);
43404
43405 - fscache_stat(&fscache_n_retrievals);
43406 + fscache_stat_unchecked(&fscache_n_retrievals);
43407
43408 if (hlist_empty(&cookie->backing_objects))
43409 goto nobufs;
43410 @@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
43411 goto nobufs_unlock;
43412 spin_unlock(&cookie->lock);
43413
43414 - fscache_stat(&fscache_n_retrieval_ops);
43415 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
43416
43417 /* pin the netfs read context in case we need to do the actual netfs
43418 * read because we've encountered a cache read failure */
43419 @@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
43420
43421 error:
43422 if (ret == -ENOMEM)
43423 - fscache_stat(&fscache_n_retrievals_nomem);
43424 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
43425 else if (ret == -ERESTARTSYS)
43426 - fscache_stat(&fscache_n_retrievals_intr);
43427 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
43428 else if (ret == -ENODATA)
43429 - fscache_stat(&fscache_n_retrievals_nodata);
43430 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
43431 else if (ret < 0)
43432 - fscache_stat(&fscache_n_retrievals_nobufs);
43433 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
43434 else
43435 - fscache_stat(&fscache_n_retrievals_ok);
43436 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
43437
43438 fscache_put_retrieval(op);
43439 _leave(" = %d", ret);
43440 @@ -545,7 +545,7 @@ nobufs_unlock:
43441 spin_unlock(&cookie->lock);
43442 kfree(op);
43443 nobufs:
43444 - fscache_stat(&fscache_n_retrievals_nobufs);
43445 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
43446 _leave(" = -ENOBUFS");
43447 return -ENOBUFS;
43448 }
43449 @@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
43450
43451 _enter("%p,%p,,,", cookie, page);
43452
43453 - fscache_stat(&fscache_n_allocs);
43454 + fscache_stat_unchecked(&fscache_n_allocs);
43455
43456 if (hlist_empty(&cookie->backing_objects))
43457 goto nobufs;
43458 @@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
43459 goto nobufs_unlock;
43460 spin_unlock(&cookie->lock);
43461
43462 - fscache_stat(&fscache_n_alloc_ops);
43463 + fscache_stat_unchecked(&fscache_n_alloc_ops);
43464
43465 ret = fscache_wait_for_retrieval_activation(
43466 object, op,
43467 @@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
43468
43469 error:
43470 if (ret == -ERESTARTSYS)
43471 - fscache_stat(&fscache_n_allocs_intr);
43472 + fscache_stat_unchecked(&fscache_n_allocs_intr);
43473 else if (ret < 0)
43474 - fscache_stat(&fscache_n_allocs_nobufs);
43475 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
43476 else
43477 - fscache_stat(&fscache_n_allocs_ok);
43478 + fscache_stat_unchecked(&fscache_n_allocs_ok);
43479
43480 fscache_put_retrieval(op);
43481 _leave(" = %d", ret);
43482 @@ -625,7 +625,7 @@ nobufs_unlock:
43483 spin_unlock(&cookie->lock);
43484 kfree(op);
43485 nobufs:
43486 - fscache_stat(&fscache_n_allocs_nobufs);
43487 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
43488 _leave(" = -ENOBUFS");
43489 return -ENOBUFS;
43490 }
43491 @@ -666,7 +666,7 @@ static void fscache_write_op(struct fscache_operation *_op)
43492
43493 spin_lock(&cookie->stores_lock);
43494
43495 - fscache_stat(&fscache_n_store_calls);
43496 + fscache_stat_unchecked(&fscache_n_store_calls);
43497
43498 /* find a page to store */
43499 page = NULL;
43500 @@ -677,7 +677,7 @@ static void fscache_write_op(struct fscache_operation *_op)
43501 page = results[0];
43502 _debug("gang %d [%lx]", n, page->index);
43503 if (page->index > op->store_limit) {
43504 - fscache_stat(&fscache_n_store_pages_over_limit);
43505 + fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
43506 goto superseded;
43507 }
43508
43509 @@ -689,7 +689,7 @@ static void fscache_write_op(struct fscache_operation *_op)
43510 spin_unlock(&cookie->stores_lock);
43511 spin_unlock(&object->lock);
43512
43513 - fscache_stat(&fscache_n_store_pages);
43514 + fscache_stat_unchecked(&fscache_n_store_pages);
43515 fscache_stat(&fscache_n_cop_write_page);
43516 ret = object->cache->ops->write_page(op, page);
43517 fscache_stat_d(&fscache_n_cop_write_page);
43518 @@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
43519 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
43520 ASSERT(PageFsCache(page));
43521
43522 - fscache_stat(&fscache_n_stores);
43523 + fscache_stat_unchecked(&fscache_n_stores);
43524
43525 op = kzalloc(sizeof(*op), GFP_NOIO);
43526 if (!op)
43527 @@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
43528 spin_unlock(&cookie->stores_lock);
43529 spin_unlock(&object->lock);
43530
43531 - op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
43532 + op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
43533 op->store_limit = object->store_limit;
43534
43535 if (fscache_submit_op(object, &op->op) < 0)
43536 @@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
43537
43538 spin_unlock(&cookie->lock);
43539 radix_tree_preload_end();
43540 - fscache_stat(&fscache_n_store_ops);
43541 - fscache_stat(&fscache_n_stores_ok);
43542 + fscache_stat_unchecked(&fscache_n_store_ops);
43543 + fscache_stat_unchecked(&fscache_n_stores_ok);
43544
43545 /* the work queue now carries its own ref on the object */
43546 fscache_put_operation(&op->op);
43547 @@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
43548 return 0;
43549
43550 already_queued:
43551 - fscache_stat(&fscache_n_stores_again);
43552 + fscache_stat_unchecked(&fscache_n_stores_again);
43553 already_pending:
43554 spin_unlock(&cookie->stores_lock);
43555 spin_unlock(&object->lock);
43556 spin_unlock(&cookie->lock);
43557 radix_tree_preload_end();
43558 kfree(op);
43559 - fscache_stat(&fscache_n_stores_ok);
43560 + fscache_stat_unchecked(&fscache_n_stores_ok);
43561 _leave(" = 0");
43562 return 0;
43563
43564 @@ -851,14 +851,14 @@ nobufs:
43565 spin_unlock(&cookie->lock);
43566 radix_tree_preload_end();
43567 kfree(op);
43568 - fscache_stat(&fscache_n_stores_nobufs);
43569 + fscache_stat_unchecked(&fscache_n_stores_nobufs);
43570 _leave(" = -ENOBUFS");
43571 return -ENOBUFS;
43572
43573 nomem_free:
43574 kfree(op);
43575 nomem:
43576 - fscache_stat(&fscache_n_stores_oom);
43577 + fscache_stat_unchecked(&fscache_n_stores_oom);
43578 _leave(" = -ENOMEM");
43579 return -ENOMEM;
43580 }
43581 @@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
43582 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
43583 ASSERTCMP(page, !=, NULL);
43584
43585 - fscache_stat(&fscache_n_uncaches);
43586 + fscache_stat_unchecked(&fscache_n_uncaches);
43587
43588 /* cache withdrawal may beat us to it */
43589 if (!PageFsCache(page))
43590 @@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op,
43591 unsigned long loop;
43592
43593 #ifdef CONFIG_FSCACHE_STATS
43594 - atomic_add(pagevec->nr, &fscache_n_marks);
43595 + atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
43596 #endif
43597
43598 for (loop = 0; loop < pagevec->nr; loop++) {
43599 diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
43600 index 4765190..2a067f2 100644
43601 --- a/fs/fscache/stats.c
43602 +++ b/fs/fscache/stats.c
43603 @@ -18,95 +18,95 @@
43604 /*
43605 * operation counters
43606 */
43607 -atomic_t fscache_n_op_pend;
43608 -atomic_t fscache_n_op_run;
43609 -atomic_t fscache_n_op_enqueue;
43610 -atomic_t fscache_n_op_requeue;
43611 -atomic_t fscache_n_op_deferred_release;
43612 -atomic_t fscache_n_op_release;
43613 -atomic_t fscache_n_op_gc;
43614 -atomic_t fscache_n_op_cancelled;
43615 -atomic_t fscache_n_op_rejected;
43616 +atomic_unchecked_t fscache_n_op_pend;
43617 +atomic_unchecked_t fscache_n_op_run;
43618 +atomic_unchecked_t fscache_n_op_enqueue;
43619 +atomic_unchecked_t fscache_n_op_requeue;
43620 +atomic_unchecked_t fscache_n_op_deferred_release;
43621 +atomic_unchecked_t fscache_n_op_release;
43622 +atomic_unchecked_t fscache_n_op_gc;
43623 +atomic_unchecked_t fscache_n_op_cancelled;
43624 +atomic_unchecked_t fscache_n_op_rejected;
43625
43626 -atomic_t fscache_n_attr_changed;
43627 -atomic_t fscache_n_attr_changed_ok;
43628 -atomic_t fscache_n_attr_changed_nobufs;
43629 -atomic_t fscache_n_attr_changed_nomem;
43630 -atomic_t fscache_n_attr_changed_calls;
43631 +atomic_unchecked_t fscache_n_attr_changed;
43632 +atomic_unchecked_t fscache_n_attr_changed_ok;
43633 +atomic_unchecked_t fscache_n_attr_changed_nobufs;
43634 +atomic_unchecked_t fscache_n_attr_changed_nomem;
43635 +atomic_unchecked_t fscache_n_attr_changed_calls;
43636
43637 -atomic_t fscache_n_allocs;
43638 -atomic_t fscache_n_allocs_ok;
43639 -atomic_t fscache_n_allocs_wait;
43640 -atomic_t fscache_n_allocs_nobufs;
43641 -atomic_t fscache_n_allocs_intr;
43642 -atomic_t fscache_n_allocs_object_dead;
43643 -atomic_t fscache_n_alloc_ops;
43644 -atomic_t fscache_n_alloc_op_waits;
43645 +atomic_unchecked_t fscache_n_allocs;
43646 +atomic_unchecked_t fscache_n_allocs_ok;
43647 +atomic_unchecked_t fscache_n_allocs_wait;
43648 +atomic_unchecked_t fscache_n_allocs_nobufs;
43649 +atomic_unchecked_t fscache_n_allocs_intr;
43650 +atomic_unchecked_t fscache_n_allocs_object_dead;
43651 +atomic_unchecked_t fscache_n_alloc_ops;
43652 +atomic_unchecked_t fscache_n_alloc_op_waits;
43653
43654 -atomic_t fscache_n_retrievals;
43655 -atomic_t fscache_n_retrievals_ok;
43656 -atomic_t fscache_n_retrievals_wait;
43657 -atomic_t fscache_n_retrievals_nodata;
43658 -atomic_t fscache_n_retrievals_nobufs;
43659 -atomic_t fscache_n_retrievals_intr;
43660 -atomic_t fscache_n_retrievals_nomem;
43661 -atomic_t fscache_n_retrievals_object_dead;
43662 -atomic_t fscache_n_retrieval_ops;
43663 -atomic_t fscache_n_retrieval_op_waits;
43664 +atomic_unchecked_t fscache_n_retrievals;
43665 +atomic_unchecked_t fscache_n_retrievals_ok;
43666 +atomic_unchecked_t fscache_n_retrievals_wait;
43667 +atomic_unchecked_t fscache_n_retrievals_nodata;
43668 +atomic_unchecked_t fscache_n_retrievals_nobufs;
43669 +atomic_unchecked_t fscache_n_retrievals_intr;
43670 +atomic_unchecked_t fscache_n_retrievals_nomem;
43671 +atomic_unchecked_t fscache_n_retrievals_object_dead;
43672 +atomic_unchecked_t fscache_n_retrieval_ops;
43673 +atomic_unchecked_t fscache_n_retrieval_op_waits;
43674
43675 -atomic_t fscache_n_stores;
43676 -atomic_t fscache_n_stores_ok;
43677 -atomic_t fscache_n_stores_again;
43678 -atomic_t fscache_n_stores_nobufs;
43679 -atomic_t fscache_n_stores_oom;
43680 -atomic_t fscache_n_store_ops;
43681 -atomic_t fscache_n_store_calls;
43682 -atomic_t fscache_n_store_pages;
43683 -atomic_t fscache_n_store_radix_deletes;
43684 -atomic_t fscache_n_store_pages_over_limit;
43685 +atomic_unchecked_t fscache_n_stores;
43686 +atomic_unchecked_t fscache_n_stores_ok;
43687 +atomic_unchecked_t fscache_n_stores_again;
43688 +atomic_unchecked_t fscache_n_stores_nobufs;
43689 +atomic_unchecked_t fscache_n_stores_oom;
43690 +atomic_unchecked_t fscache_n_store_ops;
43691 +atomic_unchecked_t fscache_n_store_calls;
43692 +atomic_unchecked_t fscache_n_store_pages;
43693 +atomic_unchecked_t fscache_n_store_radix_deletes;
43694 +atomic_unchecked_t fscache_n_store_pages_over_limit;
43695
43696 -atomic_t fscache_n_store_vmscan_not_storing;
43697 -atomic_t fscache_n_store_vmscan_gone;
43698 -atomic_t fscache_n_store_vmscan_busy;
43699 -atomic_t fscache_n_store_vmscan_cancelled;
43700 +atomic_unchecked_t fscache_n_store_vmscan_not_storing;
43701 +atomic_unchecked_t fscache_n_store_vmscan_gone;
43702 +atomic_unchecked_t fscache_n_store_vmscan_busy;
43703 +atomic_unchecked_t fscache_n_store_vmscan_cancelled;
43704
43705 -atomic_t fscache_n_marks;
43706 -atomic_t fscache_n_uncaches;
43707 +atomic_unchecked_t fscache_n_marks;
43708 +atomic_unchecked_t fscache_n_uncaches;
43709
43710 -atomic_t fscache_n_acquires;
43711 -atomic_t fscache_n_acquires_null;
43712 -atomic_t fscache_n_acquires_no_cache;
43713 -atomic_t fscache_n_acquires_ok;
43714 -atomic_t fscache_n_acquires_nobufs;
43715 -atomic_t fscache_n_acquires_oom;
43716 +atomic_unchecked_t fscache_n_acquires;
43717 +atomic_unchecked_t fscache_n_acquires_null;
43718 +atomic_unchecked_t fscache_n_acquires_no_cache;
43719 +atomic_unchecked_t fscache_n_acquires_ok;
43720 +atomic_unchecked_t fscache_n_acquires_nobufs;
43721 +atomic_unchecked_t fscache_n_acquires_oom;
43722
43723 -atomic_t fscache_n_updates;
43724 -atomic_t fscache_n_updates_null;
43725 -atomic_t fscache_n_updates_run;
43726 +atomic_unchecked_t fscache_n_updates;
43727 +atomic_unchecked_t fscache_n_updates_null;
43728 +atomic_unchecked_t fscache_n_updates_run;
43729
43730 -atomic_t fscache_n_relinquishes;
43731 -atomic_t fscache_n_relinquishes_null;
43732 -atomic_t fscache_n_relinquishes_waitcrt;
43733 -atomic_t fscache_n_relinquishes_retire;
43734 +atomic_unchecked_t fscache_n_relinquishes;
43735 +atomic_unchecked_t fscache_n_relinquishes_null;
43736 +atomic_unchecked_t fscache_n_relinquishes_waitcrt;
43737 +atomic_unchecked_t fscache_n_relinquishes_retire;
43738
43739 -atomic_t fscache_n_cookie_index;
43740 -atomic_t fscache_n_cookie_data;
43741 -atomic_t fscache_n_cookie_special;
43742 +atomic_unchecked_t fscache_n_cookie_index;
43743 +atomic_unchecked_t fscache_n_cookie_data;
43744 +atomic_unchecked_t fscache_n_cookie_special;
43745
43746 -atomic_t fscache_n_object_alloc;
43747 -atomic_t fscache_n_object_no_alloc;
43748 -atomic_t fscache_n_object_lookups;
43749 -atomic_t fscache_n_object_lookups_negative;
43750 -atomic_t fscache_n_object_lookups_positive;
43751 -atomic_t fscache_n_object_lookups_timed_out;
43752 -atomic_t fscache_n_object_created;
43753 -atomic_t fscache_n_object_avail;
43754 -atomic_t fscache_n_object_dead;
43755 +atomic_unchecked_t fscache_n_object_alloc;
43756 +atomic_unchecked_t fscache_n_object_no_alloc;
43757 +atomic_unchecked_t fscache_n_object_lookups;
43758 +atomic_unchecked_t fscache_n_object_lookups_negative;
43759 +atomic_unchecked_t fscache_n_object_lookups_positive;
43760 +atomic_unchecked_t fscache_n_object_lookups_timed_out;
43761 +atomic_unchecked_t fscache_n_object_created;
43762 +atomic_unchecked_t fscache_n_object_avail;
43763 +atomic_unchecked_t fscache_n_object_dead;
43764
43765 -atomic_t fscache_n_checkaux_none;
43766 -atomic_t fscache_n_checkaux_okay;
43767 -atomic_t fscache_n_checkaux_update;
43768 -atomic_t fscache_n_checkaux_obsolete;
43769 +atomic_unchecked_t fscache_n_checkaux_none;
43770 +atomic_unchecked_t fscache_n_checkaux_okay;
43771 +atomic_unchecked_t fscache_n_checkaux_update;
43772 +atomic_unchecked_t fscache_n_checkaux_obsolete;
43773
43774 atomic_t fscache_n_cop_alloc_object;
43775 atomic_t fscache_n_cop_lookup_object;
43776 @@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq_file *m, void *v)
43777 seq_puts(m, "FS-Cache statistics\n");
43778
43779 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
43780 - atomic_read(&fscache_n_cookie_index),
43781 - atomic_read(&fscache_n_cookie_data),
43782 - atomic_read(&fscache_n_cookie_special));
43783 + atomic_read_unchecked(&fscache_n_cookie_index),
43784 + atomic_read_unchecked(&fscache_n_cookie_data),
43785 + atomic_read_unchecked(&fscache_n_cookie_special));
43786
43787 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
43788 - atomic_read(&fscache_n_object_alloc),
43789 - atomic_read(&fscache_n_object_no_alloc),
43790 - atomic_read(&fscache_n_object_avail),
43791 - atomic_read(&fscache_n_object_dead));
43792 + atomic_read_unchecked(&fscache_n_object_alloc),
43793 + atomic_read_unchecked(&fscache_n_object_no_alloc),
43794 + atomic_read_unchecked(&fscache_n_object_avail),
43795 + atomic_read_unchecked(&fscache_n_object_dead));
43796 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
43797 - atomic_read(&fscache_n_checkaux_none),
43798 - atomic_read(&fscache_n_checkaux_okay),
43799 - atomic_read(&fscache_n_checkaux_update),
43800 - atomic_read(&fscache_n_checkaux_obsolete));
43801 + atomic_read_unchecked(&fscache_n_checkaux_none),
43802 + atomic_read_unchecked(&fscache_n_checkaux_okay),
43803 + atomic_read_unchecked(&fscache_n_checkaux_update),
43804 + atomic_read_unchecked(&fscache_n_checkaux_obsolete));
43805
43806 seq_printf(m, "Pages : mrk=%u unc=%u\n",
43807 - atomic_read(&fscache_n_marks),
43808 - atomic_read(&fscache_n_uncaches));
43809 + atomic_read_unchecked(&fscache_n_marks),
43810 + atomic_read_unchecked(&fscache_n_uncaches));
43811
43812 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
43813 " oom=%u\n",
43814 - atomic_read(&fscache_n_acquires),
43815 - atomic_read(&fscache_n_acquires_null),
43816 - atomic_read(&fscache_n_acquires_no_cache),
43817 - atomic_read(&fscache_n_acquires_ok),
43818 - atomic_read(&fscache_n_acquires_nobufs),
43819 - atomic_read(&fscache_n_acquires_oom));
43820 + atomic_read_unchecked(&fscache_n_acquires),
43821 + atomic_read_unchecked(&fscache_n_acquires_null),
43822 + atomic_read_unchecked(&fscache_n_acquires_no_cache),
43823 + atomic_read_unchecked(&fscache_n_acquires_ok),
43824 + atomic_read_unchecked(&fscache_n_acquires_nobufs),
43825 + atomic_read_unchecked(&fscache_n_acquires_oom));
43826
43827 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
43828 - atomic_read(&fscache_n_object_lookups),
43829 - atomic_read(&fscache_n_object_lookups_negative),
43830 - atomic_read(&fscache_n_object_lookups_positive),
43831 - atomic_read(&fscache_n_object_created),
43832 - atomic_read(&fscache_n_object_lookups_timed_out));
43833 + atomic_read_unchecked(&fscache_n_object_lookups),
43834 + atomic_read_unchecked(&fscache_n_object_lookups_negative),
43835 + atomic_read_unchecked(&fscache_n_object_lookups_positive),
43836 + atomic_read_unchecked(&fscache_n_object_created),
43837 + atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
43838
43839 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
43840 - atomic_read(&fscache_n_updates),
43841 - atomic_read(&fscache_n_updates_null),
43842 - atomic_read(&fscache_n_updates_run));
43843 + atomic_read_unchecked(&fscache_n_updates),
43844 + atomic_read_unchecked(&fscache_n_updates_null),
43845 + atomic_read_unchecked(&fscache_n_updates_run));
43846
43847 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
43848 - atomic_read(&fscache_n_relinquishes),
43849 - atomic_read(&fscache_n_relinquishes_null),
43850 - atomic_read(&fscache_n_relinquishes_waitcrt),
43851 - atomic_read(&fscache_n_relinquishes_retire));
43852 + atomic_read_unchecked(&fscache_n_relinquishes),
43853 + atomic_read_unchecked(&fscache_n_relinquishes_null),
43854 + atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
43855 + atomic_read_unchecked(&fscache_n_relinquishes_retire));
43856
43857 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
43858 - atomic_read(&fscache_n_attr_changed),
43859 - atomic_read(&fscache_n_attr_changed_ok),
43860 - atomic_read(&fscache_n_attr_changed_nobufs),
43861 - atomic_read(&fscache_n_attr_changed_nomem),
43862 - atomic_read(&fscache_n_attr_changed_calls));
43863 + atomic_read_unchecked(&fscache_n_attr_changed),
43864 + atomic_read_unchecked(&fscache_n_attr_changed_ok),
43865 + atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
43866 + atomic_read_unchecked(&fscache_n_attr_changed_nomem),
43867 + atomic_read_unchecked(&fscache_n_attr_changed_calls));
43868
43869 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
43870 - atomic_read(&fscache_n_allocs),
43871 - atomic_read(&fscache_n_allocs_ok),
43872 - atomic_read(&fscache_n_allocs_wait),
43873 - atomic_read(&fscache_n_allocs_nobufs),
43874 - atomic_read(&fscache_n_allocs_intr));
43875 + atomic_read_unchecked(&fscache_n_allocs),
43876 + atomic_read_unchecked(&fscache_n_allocs_ok),
43877 + atomic_read_unchecked(&fscache_n_allocs_wait),
43878 + atomic_read_unchecked(&fscache_n_allocs_nobufs),
43879 + atomic_read_unchecked(&fscache_n_allocs_intr));
43880 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
43881 - atomic_read(&fscache_n_alloc_ops),
43882 - atomic_read(&fscache_n_alloc_op_waits),
43883 - atomic_read(&fscache_n_allocs_object_dead));
43884 + atomic_read_unchecked(&fscache_n_alloc_ops),
43885 + atomic_read_unchecked(&fscache_n_alloc_op_waits),
43886 + atomic_read_unchecked(&fscache_n_allocs_object_dead));
43887
43888 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
43889 " int=%u oom=%u\n",
43890 - atomic_read(&fscache_n_retrievals),
43891 - atomic_read(&fscache_n_retrievals_ok),
43892 - atomic_read(&fscache_n_retrievals_wait),
43893 - atomic_read(&fscache_n_retrievals_nodata),
43894 - atomic_read(&fscache_n_retrievals_nobufs),
43895 - atomic_read(&fscache_n_retrievals_intr),
43896 - atomic_read(&fscache_n_retrievals_nomem));
43897 + atomic_read_unchecked(&fscache_n_retrievals),
43898 + atomic_read_unchecked(&fscache_n_retrievals_ok),
43899 + atomic_read_unchecked(&fscache_n_retrievals_wait),
43900 + atomic_read_unchecked(&fscache_n_retrievals_nodata),
43901 + atomic_read_unchecked(&fscache_n_retrievals_nobufs),
43902 + atomic_read_unchecked(&fscache_n_retrievals_intr),
43903 + atomic_read_unchecked(&fscache_n_retrievals_nomem));
43904 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
43905 - atomic_read(&fscache_n_retrieval_ops),
43906 - atomic_read(&fscache_n_retrieval_op_waits),
43907 - atomic_read(&fscache_n_retrievals_object_dead));
43908 + atomic_read_unchecked(&fscache_n_retrieval_ops),
43909 + atomic_read_unchecked(&fscache_n_retrieval_op_waits),
43910 + atomic_read_unchecked(&fscache_n_retrievals_object_dead));
43911
43912 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
43913 - atomic_read(&fscache_n_stores),
43914 - atomic_read(&fscache_n_stores_ok),
43915 - atomic_read(&fscache_n_stores_again),
43916 - atomic_read(&fscache_n_stores_nobufs),
43917 - atomic_read(&fscache_n_stores_oom));
43918 + atomic_read_unchecked(&fscache_n_stores),
43919 + atomic_read_unchecked(&fscache_n_stores_ok),
43920 + atomic_read_unchecked(&fscache_n_stores_again),
43921 + atomic_read_unchecked(&fscache_n_stores_nobufs),
43922 + atomic_read_unchecked(&fscache_n_stores_oom));
43923 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
43924 - atomic_read(&fscache_n_store_ops),
43925 - atomic_read(&fscache_n_store_calls),
43926 - atomic_read(&fscache_n_store_pages),
43927 - atomic_read(&fscache_n_store_radix_deletes),
43928 - atomic_read(&fscache_n_store_pages_over_limit));
43929 + atomic_read_unchecked(&fscache_n_store_ops),
43930 + atomic_read_unchecked(&fscache_n_store_calls),
43931 + atomic_read_unchecked(&fscache_n_store_pages),
43932 + atomic_read_unchecked(&fscache_n_store_radix_deletes),
43933 + atomic_read_unchecked(&fscache_n_store_pages_over_limit));
43934
43935 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
43936 - atomic_read(&fscache_n_store_vmscan_not_storing),
43937 - atomic_read(&fscache_n_store_vmscan_gone),
43938 - atomic_read(&fscache_n_store_vmscan_busy),
43939 - atomic_read(&fscache_n_store_vmscan_cancelled));
43940 + atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
43941 + atomic_read_unchecked(&fscache_n_store_vmscan_gone),
43942 + atomic_read_unchecked(&fscache_n_store_vmscan_busy),
43943 + atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
43944
43945 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
43946 - atomic_read(&fscache_n_op_pend),
43947 - atomic_read(&fscache_n_op_run),
43948 - atomic_read(&fscache_n_op_enqueue),
43949 - atomic_read(&fscache_n_op_cancelled),
43950 - atomic_read(&fscache_n_op_rejected));
43951 + atomic_read_unchecked(&fscache_n_op_pend),
43952 + atomic_read_unchecked(&fscache_n_op_run),
43953 + atomic_read_unchecked(&fscache_n_op_enqueue),
43954 + atomic_read_unchecked(&fscache_n_op_cancelled),
43955 + atomic_read_unchecked(&fscache_n_op_rejected));
43956 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
43957 - atomic_read(&fscache_n_op_deferred_release),
43958 - atomic_read(&fscache_n_op_release),
43959 - atomic_read(&fscache_n_op_gc));
43960 + atomic_read_unchecked(&fscache_n_op_deferred_release),
43961 + atomic_read_unchecked(&fscache_n_op_release),
43962 + atomic_read_unchecked(&fscache_n_op_gc));
43963
43964 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
43965 atomic_read(&fscache_n_cop_alloc_object),
43966 diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
43967 index 3426521..3b75162 100644
43968 --- a/fs/fuse/cuse.c
43969 +++ b/fs/fuse/cuse.c
43970 @@ -587,10 +587,12 @@ static int __init cuse_init(void)
43971 INIT_LIST_HEAD(&cuse_conntbl[i]);
43972
43973 /* inherit and extend fuse_dev_operations */
43974 - cuse_channel_fops = fuse_dev_operations;
43975 - cuse_channel_fops.owner = THIS_MODULE;
43976 - cuse_channel_fops.open = cuse_channel_open;
43977 - cuse_channel_fops.release = cuse_channel_release;
43978 + pax_open_kernel();
43979 + memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
43980 + *(void **)&cuse_channel_fops.owner = THIS_MODULE;
43981 + *(void **)&cuse_channel_fops.open = cuse_channel_open;
43982 + *(void **)&cuse_channel_fops.release = cuse_channel_release;
43983 + pax_close_kernel();
43984
43985 cuse_class = class_create(THIS_MODULE, "cuse");
43986 if (IS_ERR(cuse_class))
43987 diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
43988 index 2aaf3ea..8e50863 100644
43989 --- a/fs/fuse/dev.c
43990 +++ b/fs/fuse/dev.c
43991 @@ -1242,7 +1242,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
43992 ret = 0;
43993 pipe_lock(pipe);
43994
43995 - if (!pipe->readers) {
43996 + if (!atomic_read(&pipe->readers)) {
43997 send_sig(SIGPIPE, current, 0);
43998 if (!ret)
43999 ret = -EPIPE;
44000 diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
44001 index 9f63e49..d8a64c0 100644
44002 --- a/fs/fuse/dir.c
44003 +++ b/fs/fuse/dir.c
44004 @@ -1147,7 +1147,7 @@ static char *read_link(struct dentry *dentry)
44005 return link;
44006 }
44007
44008 -static void free_link(char *link)
44009 +static void free_link(const char *link)
44010 {
44011 if (!IS_ERR(link))
44012 free_page((unsigned long) link);
44013 diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
44014 index cfd4959..a780959 100644
44015 --- a/fs/gfs2/inode.c
44016 +++ b/fs/gfs2/inode.c
44017 @@ -1490,7 +1490,7 @@ out:
44018
44019 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
44020 {
44021 - char *s = nd_get_link(nd);
44022 + const char *s = nd_get_link(nd);
44023 if (!IS_ERR(s))
44024 kfree(s);
44025 }
44026 diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
44027 index 0be5a78..9cfb853 100644
44028 --- a/fs/hugetlbfs/inode.c
44029 +++ b/fs/hugetlbfs/inode.c
44030 @@ -915,7 +915,7 @@ static struct file_system_type hugetlbfs_fs_type = {
44031 .kill_sb = kill_litter_super,
44032 };
44033
44034 -static struct vfsmount *hugetlbfs_vfsmount;
44035 +struct vfsmount *hugetlbfs_vfsmount;
44036
44037 static int can_do_hugetlb_shm(void)
44038 {
44039 diff --git a/fs/inode.c b/fs/inode.c
44040 index ee4e66b..0451521 100644
44041 --- a/fs/inode.c
44042 +++ b/fs/inode.c
44043 @@ -787,8 +787,8 @@ unsigned int get_next_ino(void)
44044
44045 #ifdef CONFIG_SMP
44046 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
44047 - static atomic_t shared_last_ino;
44048 - int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
44049 + static atomic_unchecked_t shared_last_ino;
44050 + int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
44051
44052 res = next - LAST_INO_BATCH;
44053 }
44054 diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
44055 index e513f19..2ab1351 100644
44056 --- a/fs/jffs2/erase.c
44057 +++ b/fs/jffs2/erase.c
44058 @@ -439,7 +439,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
44059 struct jffs2_unknown_node marker = {
44060 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
44061 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
44062 - .totlen = cpu_to_je32(c->cleanmarker_size)
44063 + .totlen = cpu_to_je32(c->cleanmarker_size),
44064 + .hdr_crc = cpu_to_je32(0)
44065 };
44066
44067 jffs2_prealloc_raw_node_refs(c, jeb, 1);
44068 diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
44069 index b09e51d..e482afa 100644
44070 --- a/fs/jffs2/wbuf.c
44071 +++ b/fs/jffs2/wbuf.c
44072 @@ -1011,7 +1011,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
44073 {
44074 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
44075 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
44076 - .totlen = constant_cpu_to_je32(8)
44077 + .totlen = constant_cpu_to_je32(8),
44078 + .hdr_crc = constant_cpu_to_je32(0)
44079 };
44080
44081 /*
44082 diff --git a/fs/jfs/super.c b/fs/jfs/super.c
44083 index a44eff0..462e07d 100644
44084 --- a/fs/jfs/super.c
44085 +++ b/fs/jfs/super.c
44086 @@ -802,7 +802,7 @@ static int __init init_jfs_fs(void)
44087
44088 jfs_inode_cachep =
44089 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
44090 - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
44091 + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
44092 init_once);
44093 if (jfs_inode_cachep == NULL)
44094 return -ENOMEM;
44095 diff --git a/fs/libfs.c b/fs/libfs.c
44096 index f6d411e..e82a08d 100644
44097 --- a/fs/libfs.c
44098 +++ b/fs/libfs.c
44099 @@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
44100
44101 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
44102 struct dentry *next;
44103 + char d_name[sizeof(next->d_iname)];
44104 + const unsigned char *name;
44105 +
44106 next = list_entry(p, struct dentry, d_u.d_child);
44107 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
44108 if (!simple_positive(next)) {
44109 @@ -174,7 +177,12 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
44110
44111 spin_unlock(&next->d_lock);
44112 spin_unlock(&dentry->d_lock);
44113 - if (filldir(dirent, next->d_name.name,
44114 + name = next->d_name.name;
44115 + if (name == next->d_iname) {
44116 + memcpy(d_name, name, next->d_name.len);
44117 + name = d_name;
44118 + }
44119 + if (filldir(dirent, name,
44120 next->d_name.len, filp->f_pos,
44121 next->d_inode->i_ino,
44122 dt_type(next->d_inode)) < 0)
44123 diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
44124 index 8392cb8..80d6193 100644
44125 --- a/fs/lockd/clntproc.c
44126 +++ b/fs/lockd/clntproc.c
44127 @@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
44128 /*
44129 * Cookie counter for NLM requests
44130 */
44131 -static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
44132 +static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
44133
44134 void nlmclnt_next_cookie(struct nlm_cookie *c)
44135 {
44136 - u32 cookie = atomic_inc_return(&nlm_cookie);
44137 + u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
44138
44139 memcpy(c->data, &cookie, 4);
44140 c->len=4;
44141 diff --git a/fs/locks.c b/fs/locks.c
44142 index 637694b..f84a121 100644
44143 --- a/fs/locks.c
44144 +++ b/fs/locks.c
44145 @@ -2074,16 +2074,16 @@ void locks_remove_flock(struct file *filp)
44146 return;
44147
44148 if (filp->f_op && filp->f_op->flock) {
44149 - struct file_lock fl = {
44150 + struct file_lock flock = {
44151 .fl_pid = current->tgid,
44152 .fl_file = filp,
44153 .fl_flags = FL_FLOCK,
44154 .fl_type = F_UNLCK,
44155 .fl_end = OFFSET_MAX,
44156 };
44157 - filp->f_op->flock(filp, F_SETLKW, &fl);
44158 - if (fl.fl_ops && fl.fl_ops->fl_release_private)
44159 - fl.fl_ops->fl_release_private(&fl);
44160 + filp->f_op->flock(filp, F_SETLKW, &flock);
44161 + if (flock.fl_ops && flock.fl_ops->fl_release_private)
44162 + flock.fl_ops->fl_release_private(&flock);
44163 }
44164
44165 lock_flocks();
44166 diff --git a/fs/namei.c b/fs/namei.c
44167 index 5008f01..90328a7 100644
44168 --- a/fs/namei.c
44169 +++ b/fs/namei.c
44170 @@ -279,16 +279,32 @@ int generic_permission(struct inode *inode, int mask)
44171 if (ret != -EACCES)
44172 return ret;
44173
44174 +#ifdef CONFIG_GRKERNSEC
44175 + /* we'll block if we have to log due to a denied capability use */
44176 + if (mask & MAY_NOT_BLOCK)
44177 + return -ECHILD;
44178 +#endif
44179 +
44180 if (S_ISDIR(inode->i_mode)) {
44181 /* DACs are overridable for directories */
44182 - if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
44183 - return 0;
44184 if (!(mask & MAY_WRITE))
44185 - if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
44186 + if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
44187 + ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
44188 return 0;
44189 + if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
44190 + return 0;
44191 return -EACCES;
44192 }
44193 /*
44194 + * Searching includes executable on directories, else just read.
44195 + */
44196 + mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
44197 + if (mask == MAY_READ)
44198 + if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
44199 + ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
44200 + return 0;
44201 +
44202 + /*
44203 * Read/write DACs are always overridable.
44204 * Executable DACs are overridable when there is
44205 * at least one exec bit set.
44206 @@ -297,14 +313,6 @@ int generic_permission(struct inode *inode, int mask)
44207 if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
44208 return 0;
44209
44210 - /*
44211 - * Searching includes executable on directories, else just read.
44212 - */
44213 - mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
44214 - if (mask == MAY_READ)
44215 - if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
44216 - return 0;
44217 -
44218 return -EACCES;
44219 }
44220
44221 @@ -653,11 +661,19 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
44222 return error;
44223 }
44224
44225 + if (gr_handle_follow_link(dentry->d_parent->d_inode,
44226 + dentry->d_inode, dentry, nd->path.mnt)) {
44227 + error = -EACCES;
44228 + *p = ERR_PTR(error); /* no ->put_link(), please */
44229 + path_put(&nd->path);
44230 + return error;
44231 + }
44232 +
44233 nd->last_type = LAST_BIND;
44234 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
44235 error = PTR_ERR(*p);
44236 if (!IS_ERR(*p)) {
44237 - char *s = nd_get_link(nd);
44238 + const char *s = nd_get_link(nd);
44239 error = 0;
44240 if (s)
44241 error = __vfs_follow_link(nd, s);
44242 @@ -1622,6 +1638,21 @@ static int path_lookupat(int dfd, const char *name,
44243 if (!err)
44244 err = complete_walk(nd);
44245
44246 + if (!(nd->flags & LOOKUP_PARENT)) {
44247 +#ifdef CONFIG_GRKERNSEC
44248 + if (flags & LOOKUP_RCU) {
44249 + if (!err)
44250 + path_put(&nd->path);
44251 + err = -ECHILD;
44252 + } else
44253 +#endif
44254 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
44255 + if (!err)
44256 + path_put(&nd->path);
44257 + err = -ENOENT;
44258 + }
44259 + }
44260 +
44261 if (!err && nd->flags & LOOKUP_DIRECTORY) {
44262 if (!nd->inode->i_op->lookup) {
44263 path_put(&nd->path);
44264 @@ -1649,6 +1680,15 @@ static int do_path_lookup(int dfd, const char *name,
44265 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
44266
44267 if (likely(!retval)) {
44268 + if (*name != '/' && nd->path.dentry && nd->inode) {
44269 +#ifdef CONFIG_GRKERNSEC
44270 + if (flags & LOOKUP_RCU)
44271 + return -ECHILD;
44272 +#endif
44273 + if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
44274 + return -ENOENT;
44275 + }
44276 +
44277 if (unlikely(!audit_dummy_context())) {
44278 if (nd->path.dentry && nd->inode)
44279 audit_inode(name, nd->path.dentry);
44280 @@ -2046,6 +2086,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
44281 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
44282 return -EPERM;
44283
44284 + if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
44285 + return -EPERM;
44286 + if (gr_handle_rawio(inode))
44287 + return -EPERM;
44288 + if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
44289 + return -EACCES;
44290 +
44291 return 0;
44292 }
44293
44294 @@ -2107,6 +2154,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44295 error = complete_walk(nd);
44296 if (error)
44297 return ERR_PTR(error);
44298 +#ifdef CONFIG_GRKERNSEC
44299 + if (nd->flags & LOOKUP_RCU) {
44300 + error = -ECHILD;
44301 + goto exit;
44302 + }
44303 +#endif
44304 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
44305 + error = -ENOENT;
44306 + goto exit;
44307 + }
44308 audit_inode(pathname, nd->path.dentry);
44309 if (open_flag & O_CREAT) {
44310 error = -EISDIR;
44311 @@ -2117,6 +2174,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44312 error = complete_walk(nd);
44313 if (error)
44314 return ERR_PTR(error);
44315 +#ifdef CONFIG_GRKERNSEC
44316 + if (nd->flags & LOOKUP_RCU) {
44317 + error = -ECHILD;
44318 + goto exit;
44319 + }
44320 +#endif
44321 + if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
44322 + error = -ENOENT;
44323 + goto exit;
44324 + }
44325 audit_inode(pathname, dir);
44326 goto ok;
44327 }
44328 @@ -2138,6 +2205,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44329 error = complete_walk(nd);
44330 if (error)
44331 return ERR_PTR(-ECHILD);
44332 +#ifdef CONFIG_GRKERNSEC
44333 + if (nd->flags & LOOKUP_RCU) {
44334 + error = -ECHILD;
44335 + goto exit;
44336 + }
44337 +#endif
44338 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
44339 + error = -ENOENT;
44340 + goto exit;
44341 + }
44342
44343 error = -ENOTDIR;
44344 if (nd->flags & LOOKUP_DIRECTORY) {
44345 @@ -2178,6 +2255,12 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44346 /* Negative dentry, just create the file */
44347 if (!dentry->d_inode) {
44348 int mode = op->mode;
44349 +
44350 + if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, open_flag, acc_mode, mode)) {
44351 + error = -EACCES;
44352 + goto exit_mutex_unlock;
44353 + }
44354 +
44355 if (!IS_POSIXACL(dir->d_inode))
44356 mode &= ~current_umask();
44357 /*
44358 @@ -2201,6 +2284,8 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44359 error = vfs_create(dir->d_inode, dentry, mode, nd);
44360 if (error)
44361 goto exit_mutex_unlock;
44362 + else
44363 + gr_handle_create(path->dentry, path->mnt);
44364 mutex_unlock(&dir->d_inode->i_mutex);
44365 dput(nd->path.dentry);
44366 nd->path.dentry = dentry;
44367 @@ -2210,6 +2295,19 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44368 /*
44369 * It already exists.
44370 */
44371 +
44372 + if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
44373 + error = -ENOENT;
44374 + goto exit_mutex_unlock;
44375 + }
44376 +
44377 + /* only check if O_CREAT is specified, all other checks need to go
44378 + into may_open */
44379 + if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
44380 + error = -EACCES;
44381 + goto exit_mutex_unlock;
44382 + }
44383 +
44384 mutex_unlock(&dir->d_inode->i_mutex);
44385 audit_inode(pathname, path->dentry);
44386
44387 @@ -2422,6 +2520,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path
44388 *path = nd.path;
44389 return dentry;
44390 eexist:
44391 + if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) {
44392 + dput(dentry);
44393 + dentry = ERR_PTR(-ENOENT);
44394 + goto fail;
44395 + }
44396 dput(dentry);
44397 dentry = ERR_PTR(-EEXIST);
44398 fail:
44399 @@ -2444,6 +2547,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname, struct pat
44400 }
44401 EXPORT_SYMBOL(user_path_create);
44402
44403 +static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, char **to, int is_dir)
44404 +{
44405 + char *tmp = getname(pathname);
44406 + struct dentry *res;
44407 + if (IS_ERR(tmp))
44408 + return ERR_CAST(tmp);
44409 + res = kern_path_create(dfd, tmp, path, is_dir);
44410 + if (IS_ERR(res))
44411 + putname(tmp);
44412 + else
44413 + *to = tmp;
44414 + return res;
44415 +}
44416 +
44417 int vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
44418 {
44419 int error = may_create(dir, dentry);
44420 @@ -2511,6 +2628,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
44421 error = mnt_want_write(path.mnt);
44422 if (error)
44423 goto out_dput;
44424 +
44425 + if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
44426 + error = -EPERM;
44427 + goto out_drop_write;
44428 + }
44429 +
44430 + if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
44431 + error = -EACCES;
44432 + goto out_drop_write;
44433 + }
44434 +
44435 error = security_path_mknod(&path, dentry, mode, dev);
44436 if (error)
44437 goto out_drop_write;
44438 @@ -2528,6 +2656,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
44439 }
44440 out_drop_write:
44441 mnt_drop_write(path.mnt);
44442 +
44443 + if (!error)
44444 + gr_handle_create(dentry, path.mnt);
44445 out_dput:
44446 dput(dentry);
44447 mutex_unlock(&path.dentry->d_inode->i_mutex);
44448 @@ -2577,12 +2708,21 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode)
44449 error = mnt_want_write(path.mnt);
44450 if (error)
44451 goto out_dput;
44452 +
44453 + if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
44454 + error = -EACCES;
44455 + goto out_drop_write;
44456 + }
44457 +
44458 error = security_path_mkdir(&path, dentry, mode);
44459 if (error)
44460 goto out_drop_write;
44461 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
44462 out_drop_write:
44463 mnt_drop_write(path.mnt);
44464 +
44465 + if (!error)
44466 + gr_handle_create(dentry, path.mnt);
44467 out_dput:
44468 dput(dentry);
44469 mutex_unlock(&path.dentry->d_inode->i_mutex);
44470 @@ -2662,6 +2802,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
44471 char * name;
44472 struct dentry *dentry;
44473 struct nameidata nd;
44474 + ino_t saved_ino = 0;
44475 + dev_t saved_dev = 0;
44476
44477 error = user_path_parent(dfd, pathname, &nd, &name);
44478 if (error)
44479 @@ -2690,6 +2832,15 @@ static long do_rmdir(int dfd, const char __user *pathname)
44480 error = -ENOENT;
44481 goto exit3;
44482 }
44483 +
44484 + saved_ino = dentry->d_inode->i_ino;
44485 + saved_dev = gr_get_dev_from_dentry(dentry);
44486 +
44487 + if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
44488 + error = -EACCES;
44489 + goto exit3;
44490 + }
44491 +
44492 error = mnt_want_write(nd.path.mnt);
44493 if (error)
44494 goto exit3;
44495 @@ -2697,6 +2848,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
44496 if (error)
44497 goto exit4;
44498 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
44499 + if (!error && (saved_dev || saved_ino))
44500 + gr_handle_delete(saved_ino, saved_dev);
44501 exit4:
44502 mnt_drop_write(nd.path.mnt);
44503 exit3:
44504 @@ -2759,6 +2912,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
44505 struct dentry *dentry;
44506 struct nameidata nd;
44507 struct inode *inode = NULL;
44508 + ino_t saved_ino = 0;
44509 + dev_t saved_dev = 0;
44510
44511 error = user_path_parent(dfd, pathname, &nd, &name);
44512 if (error)
44513 @@ -2781,6 +2936,16 @@ static long do_unlinkat(int dfd, const char __user *pathname)
44514 if (!inode)
44515 goto slashes;
44516 ihold(inode);
44517 +
44518 + if (inode->i_nlink <= 1) {
44519 + saved_ino = inode->i_ino;
44520 + saved_dev = gr_get_dev_from_dentry(dentry);
44521 + }
44522 + if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
44523 + error = -EACCES;
44524 + goto exit2;
44525 + }
44526 +
44527 error = mnt_want_write(nd.path.mnt);
44528 if (error)
44529 goto exit2;
44530 @@ -2788,6 +2953,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
44531 if (error)
44532 goto exit3;
44533 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
44534 + if (!error && (saved_ino || saved_dev))
44535 + gr_handle_delete(saved_ino, saved_dev);
44536 exit3:
44537 mnt_drop_write(nd.path.mnt);
44538 exit2:
44539 @@ -2863,10 +3030,18 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
44540 error = mnt_want_write(path.mnt);
44541 if (error)
44542 goto out_dput;
44543 +
44544 + if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
44545 + error = -EACCES;
44546 + goto out_drop_write;
44547 + }
44548 +
44549 error = security_path_symlink(&path, dentry, from);
44550 if (error)
44551 goto out_drop_write;
44552 error = vfs_symlink(path.dentry->d_inode, dentry, from);
44553 + if (!error)
44554 + gr_handle_create(dentry, path.mnt);
44555 out_drop_write:
44556 mnt_drop_write(path.mnt);
44557 out_dput:
44558 @@ -2938,6 +3113,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
44559 {
44560 struct dentry *new_dentry;
44561 struct path old_path, new_path;
44562 + char *to = NULL;
44563 int how = 0;
44564 int error;
44565
44566 @@ -2961,7 +3137,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
44567 if (error)
44568 return error;
44569
44570 - new_dentry = user_path_create(newdfd, newname, &new_path, 0);
44571 + new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to, 0);
44572 error = PTR_ERR(new_dentry);
44573 if (IS_ERR(new_dentry))
44574 goto out;
44575 @@ -2972,13 +3148,30 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
44576 error = mnt_want_write(new_path.mnt);
44577 if (error)
44578 goto out_dput;
44579 +
44580 + if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
44581 + old_path.dentry->d_inode,
44582 + old_path.dentry->d_inode->i_mode, to)) {
44583 + error = -EACCES;
44584 + goto out_drop_write;
44585 + }
44586 +
44587 + if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
44588 + old_path.dentry, old_path.mnt, to)) {
44589 + error = -EACCES;
44590 + goto out_drop_write;
44591 + }
44592 +
44593 error = security_path_link(old_path.dentry, &new_path, new_dentry);
44594 if (error)
44595 goto out_drop_write;
44596 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
44597 + if (!error)
44598 + gr_handle_create(new_dentry, new_path.mnt);
44599 out_drop_write:
44600 mnt_drop_write(new_path.mnt);
44601 out_dput:
44602 + putname(to);
44603 dput(new_dentry);
44604 mutex_unlock(&new_path.dentry->d_inode->i_mutex);
44605 path_put(&new_path);
44606 @@ -3206,6 +3399,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
44607 if (new_dentry == trap)
44608 goto exit5;
44609
44610 + error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
44611 + old_dentry, old_dir->d_inode, oldnd.path.mnt,
44612 + to);
44613 + if (error)
44614 + goto exit5;
44615 +
44616 error = mnt_want_write(oldnd.path.mnt);
44617 if (error)
44618 goto exit5;
44619 @@ -3215,6 +3414,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
44620 goto exit6;
44621 error = vfs_rename(old_dir->d_inode, old_dentry,
44622 new_dir->d_inode, new_dentry);
44623 + if (!error)
44624 + gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
44625 + new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
44626 exit6:
44627 mnt_drop_write(oldnd.path.mnt);
44628 exit5:
44629 @@ -3240,6 +3442,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
44630
44631 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
44632 {
44633 + char tmpbuf[64];
44634 + const char *newlink;
44635 int len;
44636
44637 len = PTR_ERR(link);
44638 @@ -3249,7 +3453,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
44639 len = strlen(link);
44640 if (len > (unsigned) buflen)
44641 len = buflen;
44642 - if (copy_to_user(buffer, link, len))
44643 +
44644 + if (len < sizeof(tmpbuf)) {
44645 + memcpy(tmpbuf, link, len);
44646 + newlink = tmpbuf;
44647 + } else
44648 + newlink = link;
44649 +
44650 + if (copy_to_user(buffer, newlink, len))
44651 len = -EFAULT;
44652 out:
44653 return len;
44654 diff --git a/fs/namespace.c b/fs/namespace.c
44655 index cfc6d44..b4632a5 100644
44656 --- a/fs/namespace.c
44657 +++ b/fs/namespace.c
44658 @@ -1326,6 +1326,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
44659 if (!(sb->s_flags & MS_RDONLY))
44660 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
44661 up_write(&sb->s_umount);
44662 +
44663 + gr_log_remount(mnt->mnt_devname, retval);
44664 +
44665 return retval;
44666 }
44667
44668 @@ -1345,6 +1348,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
44669 br_write_unlock(vfsmount_lock);
44670 up_write(&namespace_sem);
44671 release_mounts(&umount_list);
44672 +
44673 + gr_log_unmount(mnt->mnt_devname, retval);
44674 +
44675 return retval;
44676 }
44677
44678 @@ -2336,6 +2342,16 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
44679 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
44680 MS_STRICTATIME);
44681
44682 + if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
44683 + retval = -EPERM;
44684 + goto dput_out;
44685 + }
44686 +
44687 + if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
44688 + retval = -EPERM;
44689 + goto dput_out;
44690 + }
44691 +
44692 if (flags & MS_REMOUNT)
44693 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
44694 data_page);
44695 @@ -2350,6 +2366,9 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
44696 dev_name, data_page);
44697 dput_out:
44698 path_put(&path);
44699 +
44700 + gr_log_mount(dev_name, dir_name, retval);
44701 +
44702 return retval;
44703 }
44704
44705 @@ -2605,6 +2624,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
44706 if (error)
44707 goto out2;
44708
44709 + if (gr_handle_chroot_pivot()) {
44710 + error = -EPERM;
44711 + goto out2;
44712 + }
44713 +
44714 get_fs_root(current->fs, &root);
44715 error = lock_mount(&old);
44716 if (error)
44717 diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
44718 index 3db6b82..a57597e 100644
44719 --- a/fs/nfs/blocklayout/blocklayout.c
44720 +++ b/fs/nfs/blocklayout/blocklayout.c
44721 @@ -90,7 +90,7 @@ static int is_writable(struct pnfs_block_extent *be, sector_t isect)
44722 */
44723 struct parallel_io {
44724 struct kref refcnt;
44725 - struct rpc_call_ops call_ops;
44726 + rpc_call_ops_no_const call_ops;
44727 void (*pnfs_callback) (void *data);
44728 void *data;
44729 };
44730 diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
44731 index 50a15fa..ca113f9 100644
44732 --- a/fs/nfs/inode.c
44733 +++ b/fs/nfs/inode.c
44734 @@ -150,7 +150,7 @@ static void nfs_zap_caches_locked(struct inode *inode)
44735 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
44736 nfsi->attrtimeo_timestamp = jiffies;
44737
44738 - memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
44739 + memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
44740 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
44741 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
44742 else
44743 @@ -1002,16 +1002,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
44744 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
44745 }
44746
44747 -static atomic_long_t nfs_attr_generation_counter;
44748 +static atomic_long_unchecked_t nfs_attr_generation_counter;
44749
44750 static unsigned long nfs_read_attr_generation_counter(void)
44751 {
44752 - return atomic_long_read(&nfs_attr_generation_counter);
44753 + return atomic_long_read_unchecked(&nfs_attr_generation_counter);
44754 }
44755
44756 unsigned long nfs_inc_attr_generation_counter(void)
44757 {
44758 - return atomic_long_inc_return(&nfs_attr_generation_counter);
44759 + return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
44760 }
44761
44762 void nfs_fattr_init(struct nfs_fattr *fattr)
44763 diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
44764 index 7a2e442..8e544cc 100644
44765 --- a/fs/nfsd/vfs.c
44766 +++ b/fs/nfsd/vfs.c
44767 @@ -914,7 +914,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
44768 } else {
44769 oldfs = get_fs();
44770 set_fs(KERNEL_DS);
44771 - host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
44772 + host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
44773 set_fs(oldfs);
44774 }
44775
44776 @@ -1018,7 +1018,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
44777
44778 /* Write the data. */
44779 oldfs = get_fs(); set_fs(KERNEL_DS);
44780 - host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
44781 + host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
44782 set_fs(oldfs);
44783 if (host_err < 0)
44784 goto out_nfserr;
44785 @@ -1553,7 +1553,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
44786 */
44787
44788 oldfs = get_fs(); set_fs(KERNEL_DS);
44789 - host_err = inode->i_op->readlink(dentry, buf, *lenp);
44790 + host_err = inode->i_op->readlink(dentry, (char __force_user *)buf, *lenp);
44791 set_fs(oldfs);
44792
44793 if (host_err < 0)
44794 diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
44795 index 9fde1c0..14e8827 100644
44796 --- a/fs/notify/fanotify/fanotify_user.c
44797 +++ b/fs/notify/fanotify/fanotify_user.c
44798 @@ -276,7 +276,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
44799 goto out_close_fd;
44800
44801 ret = -EFAULT;
44802 - if (copy_to_user(buf, &fanotify_event_metadata,
44803 + if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
44804 + copy_to_user(buf, &fanotify_event_metadata,
44805 fanotify_event_metadata.event_len))
44806 goto out_kill_access_response;
44807
44808 diff --git a/fs/notify/notification.c b/fs/notify/notification.c
44809 index ee18815..7aa5d01 100644
44810 --- a/fs/notify/notification.c
44811 +++ b/fs/notify/notification.c
44812 @@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
44813 * get set to 0 so it will never get 'freed'
44814 */
44815 static struct fsnotify_event *q_overflow_event;
44816 -static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
44817 +static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
44818
44819 /**
44820 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
44821 @@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
44822 */
44823 u32 fsnotify_get_cookie(void)
44824 {
44825 - return atomic_inc_return(&fsnotify_sync_cookie);
44826 + return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
44827 }
44828 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
44829
44830 diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
44831 index 99e3610..02c1068 100644
44832 --- a/fs/ntfs/dir.c
44833 +++ b/fs/ntfs/dir.c
44834 @@ -1329,7 +1329,7 @@ find_next_index_buffer:
44835 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
44836 ~(s64)(ndir->itype.index.block_size - 1)));
44837 /* Bounds checks. */
44838 - if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
44839 + if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
44840 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
44841 "inode 0x%lx or driver bug.", vdir->i_ino);
44842 goto err_out;
44843 diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
44844 index c587e2d..3641eaa 100644
44845 --- a/fs/ntfs/file.c
44846 +++ b/fs/ntfs/file.c
44847 @@ -2229,6 +2229,6 @@ const struct inode_operations ntfs_file_inode_ops = {
44848 #endif /* NTFS_RW */
44849 };
44850
44851 -const struct file_operations ntfs_empty_file_ops = {};
44852 +const struct file_operations ntfs_empty_file_ops __read_only;
44853
44854 -const struct inode_operations ntfs_empty_inode_ops = {};
44855 +const struct inode_operations ntfs_empty_inode_ops __read_only;
44856 diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
44857 index 210c352..a174f83 100644
44858 --- a/fs/ocfs2/localalloc.c
44859 +++ b/fs/ocfs2/localalloc.c
44860 @@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
44861 goto bail;
44862 }
44863
44864 - atomic_inc(&osb->alloc_stats.moves);
44865 + atomic_inc_unchecked(&osb->alloc_stats.moves);
44866
44867 bail:
44868 if (handle)
44869 diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
44870 index d355e6e..578d905 100644
44871 --- a/fs/ocfs2/ocfs2.h
44872 +++ b/fs/ocfs2/ocfs2.h
44873 @@ -235,11 +235,11 @@ enum ocfs2_vol_state
44874
44875 struct ocfs2_alloc_stats
44876 {
44877 - atomic_t moves;
44878 - atomic_t local_data;
44879 - atomic_t bitmap_data;
44880 - atomic_t bg_allocs;
44881 - atomic_t bg_extends;
44882 + atomic_unchecked_t moves;
44883 + atomic_unchecked_t local_data;
44884 + atomic_unchecked_t bitmap_data;
44885 + atomic_unchecked_t bg_allocs;
44886 + atomic_unchecked_t bg_extends;
44887 };
44888
44889 enum ocfs2_local_alloc_state
44890 diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
44891 index ba5d97e..c77db25 100644
44892 --- a/fs/ocfs2/suballoc.c
44893 +++ b/fs/ocfs2/suballoc.c
44894 @@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
44895 mlog_errno(status);
44896 goto bail;
44897 }
44898 - atomic_inc(&osb->alloc_stats.bg_extends);
44899 + atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
44900
44901 /* You should never ask for this much metadata */
44902 BUG_ON(bits_wanted >
44903 @@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handle,
44904 mlog_errno(status);
44905 goto bail;
44906 }
44907 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
44908 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
44909
44910 *suballoc_loc = res.sr_bg_blkno;
44911 *suballoc_bit_start = res.sr_bit_offset;
44912 @@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
44913 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
44914 res->sr_bits);
44915
44916 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
44917 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
44918
44919 BUG_ON(res->sr_bits != 1);
44920
44921 @@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
44922 mlog_errno(status);
44923 goto bail;
44924 }
44925 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
44926 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
44927
44928 BUG_ON(res.sr_bits != 1);
44929
44930 @@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
44931 cluster_start,
44932 num_clusters);
44933 if (!status)
44934 - atomic_inc(&osb->alloc_stats.local_data);
44935 + atomic_inc_unchecked(&osb->alloc_stats.local_data);
44936 } else {
44937 if (min_clusters > (osb->bitmap_cpg - 1)) {
44938 /* The only paths asking for contiguousness
44939 @@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
44940 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
44941 res.sr_bg_blkno,
44942 res.sr_bit_offset);
44943 - atomic_inc(&osb->alloc_stats.bitmap_data);
44944 + atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
44945 *num_clusters = res.sr_bits;
44946 }
44947 }
44948 diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
44949 index 4994f8b..eaab8eb 100644
44950 --- a/fs/ocfs2/super.c
44951 +++ b/fs/ocfs2/super.c
44952 @@ -301,11 +301,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
44953 "%10s => GlobalAllocs: %d LocalAllocs: %d "
44954 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
44955 "Stats",
44956 - atomic_read(&osb->alloc_stats.bitmap_data),
44957 - atomic_read(&osb->alloc_stats.local_data),
44958 - atomic_read(&osb->alloc_stats.bg_allocs),
44959 - atomic_read(&osb->alloc_stats.moves),
44960 - atomic_read(&osb->alloc_stats.bg_extends));
44961 + atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
44962 + atomic_read_unchecked(&osb->alloc_stats.local_data),
44963 + atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
44964 + atomic_read_unchecked(&osb->alloc_stats.moves),
44965 + atomic_read_unchecked(&osb->alloc_stats.bg_extends));
44966
44967 out += snprintf(buf + out, len - out,
44968 "%10s => State: %u Descriptor: %llu Size: %u bits "
44969 @@ -2119,11 +2119,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
44970 spin_lock_init(&osb->osb_xattr_lock);
44971 ocfs2_init_steal_slots(osb);
44972
44973 - atomic_set(&osb->alloc_stats.moves, 0);
44974 - atomic_set(&osb->alloc_stats.local_data, 0);
44975 - atomic_set(&osb->alloc_stats.bitmap_data, 0);
44976 - atomic_set(&osb->alloc_stats.bg_allocs, 0);
44977 - atomic_set(&osb->alloc_stats.bg_extends, 0);
44978 + atomic_set_unchecked(&osb->alloc_stats.moves, 0);
44979 + atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
44980 + atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
44981 + atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
44982 + atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
44983
44984 /* Copy the blockcheck stats from the superblock probe */
44985 osb->osb_ecc_stats = *stats;
44986 diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c
44987 index 5d22872..523db20 100644
44988 --- a/fs/ocfs2/symlink.c
44989 +++ b/fs/ocfs2/symlink.c
44990 @@ -142,7 +142,7 @@ bail:
44991
44992 static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
44993 {
44994 - char *link = nd_get_link(nd);
44995 + const char *link = nd_get_link(nd);
44996 if (!IS_ERR(link))
44997 kfree(link);
44998 }
44999 diff --git a/fs/open.c b/fs/open.c
45000 index 22c41b5..78894cf 100644
45001 --- a/fs/open.c
45002 +++ b/fs/open.c
45003 @@ -112,6 +112,10 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
45004 error = locks_verify_truncate(inode, NULL, length);
45005 if (!error)
45006 error = security_path_truncate(&path);
45007 +
45008 + if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
45009 + error = -EACCES;
45010 +
45011 if (!error)
45012 error = do_truncate(path.dentry, length, 0, NULL);
45013
45014 @@ -358,6 +362,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
45015 if (__mnt_is_readonly(path.mnt))
45016 res = -EROFS;
45017
45018 + if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
45019 + res = -EACCES;
45020 +
45021 out_path_release:
45022 path_put(&path);
45023 out:
45024 @@ -384,6 +391,8 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename)
45025 if (error)
45026 goto dput_and_out;
45027
45028 + gr_log_chdir(path.dentry, path.mnt);
45029 +
45030 set_fs_pwd(current->fs, &path);
45031
45032 dput_and_out:
45033 @@ -410,6 +419,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
45034 goto out_putf;
45035
45036 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
45037 +
45038 + if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
45039 + error = -EPERM;
45040 +
45041 + if (!error)
45042 + gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
45043 +
45044 if (!error)
45045 set_fs_pwd(current->fs, &file->f_path);
45046 out_putf:
45047 @@ -438,7 +454,13 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
45048 if (error)
45049 goto dput_and_out;
45050
45051 + if (gr_handle_chroot_chroot(path.dentry, path.mnt))
45052 + goto dput_and_out;
45053 +
45054 set_fs_root(current->fs, &path);
45055 +
45056 + gr_handle_chroot_chdir(&path);
45057 +
45058 error = 0;
45059 dput_and_out:
45060 path_put(&path);
45061 @@ -456,6 +478,16 @@ static int chmod_common(struct path *path, umode_t mode)
45062 if (error)
45063 return error;
45064 mutex_lock(&inode->i_mutex);
45065 +
45066 + if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
45067 + error = -EACCES;
45068 + goto out_unlock;
45069 + }
45070 + if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
45071 + error = -EACCES;
45072 + goto out_unlock;
45073 + }
45074 +
45075 error = security_path_chmod(path->dentry, path->mnt, mode);
45076 if (error)
45077 goto out_unlock;
45078 @@ -506,6 +538,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
45079 int error;
45080 struct iattr newattrs;
45081
45082 + if (!gr_acl_handle_chown(path->dentry, path->mnt))
45083 + return -EACCES;
45084 +
45085 newattrs.ia_valid = ATTR_CTIME;
45086 if (user != (uid_t) -1) {
45087 newattrs.ia_valid |= ATTR_UID;
45088 diff --git a/fs/partitions/efi.c b/fs/partitions/efi.c
45089 index 6296b40..417c00f 100644
45090 --- a/fs/partitions/efi.c
45091 +++ b/fs/partitions/efi.c
45092 @@ -234,14 +234,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
45093 if (!gpt)
45094 return NULL;
45095
45096 + if (!le32_to_cpu(gpt->num_partition_entries))
45097 + return NULL;
45098 + pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
45099 + if (!pte)
45100 + return NULL;
45101 +
45102 count = le32_to_cpu(gpt->num_partition_entries) *
45103 le32_to_cpu(gpt->sizeof_partition_entry);
45104 - if (!count)
45105 - return NULL;
45106 - pte = kzalloc(count, GFP_KERNEL);
45107 - if (!pte)
45108 - return NULL;
45109 -
45110 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
45111 (u8 *) pte,
45112 count) < count) {
45113 diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c
45114 index bd8ae78..539d250 100644
45115 --- a/fs/partitions/ldm.c
45116 +++ b/fs/partitions/ldm.c
45117 @@ -1324,7 +1324,7 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
45118 goto found;
45119 }
45120
45121 - f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
45122 + f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
45123 if (!f) {
45124 ldm_crit ("Out of memory.");
45125 return false;
45126 diff --git a/fs/pipe.c b/fs/pipe.c
45127 index 4065f07..68c0706 100644
45128 --- a/fs/pipe.c
45129 +++ b/fs/pipe.c
45130 @@ -420,9 +420,9 @@ redo:
45131 }
45132 if (bufs) /* More to do? */
45133 continue;
45134 - if (!pipe->writers)
45135 + if (!atomic_read(&pipe->writers))
45136 break;
45137 - if (!pipe->waiting_writers) {
45138 + if (!atomic_read(&pipe->waiting_writers)) {
45139 /* syscall merging: Usually we must not sleep
45140 * if O_NONBLOCK is set, or if we got some data.
45141 * But if a writer sleeps in kernel space, then
45142 @@ -481,7 +481,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
45143 mutex_lock(&inode->i_mutex);
45144 pipe = inode->i_pipe;
45145
45146 - if (!pipe->readers) {
45147 + if (!atomic_read(&pipe->readers)) {
45148 send_sig(SIGPIPE, current, 0);
45149 ret = -EPIPE;
45150 goto out;
45151 @@ -530,7 +530,7 @@ redo1:
45152 for (;;) {
45153 int bufs;
45154
45155 - if (!pipe->readers) {
45156 + if (!atomic_read(&pipe->readers)) {
45157 send_sig(SIGPIPE, current, 0);
45158 if (!ret)
45159 ret = -EPIPE;
45160 @@ -616,9 +616,9 @@ redo2:
45161 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
45162 do_wakeup = 0;
45163 }
45164 - pipe->waiting_writers++;
45165 + atomic_inc(&pipe->waiting_writers);
45166 pipe_wait(pipe);
45167 - pipe->waiting_writers--;
45168 + atomic_dec(&pipe->waiting_writers);
45169 }
45170 out:
45171 mutex_unlock(&inode->i_mutex);
45172 @@ -685,7 +685,7 @@ pipe_poll(struct file *filp, poll_table *wait)
45173 mask = 0;
45174 if (filp->f_mode & FMODE_READ) {
45175 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
45176 - if (!pipe->writers && filp->f_version != pipe->w_counter)
45177 + if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
45178 mask |= POLLHUP;
45179 }
45180
45181 @@ -695,7 +695,7 @@ pipe_poll(struct file *filp, poll_table *wait)
45182 * Most Unices do not set POLLERR for FIFOs but on Linux they
45183 * behave exactly like pipes for poll().
45184 */
45185 - if (!pipe->readers)
45186 + if (!atomic_read(&pipe->readers))
45187 mask |= POLLERR;
45188 }
45189
45190 @@ -709,10 +709,10 @@ pipe_release(struct inode *inode, int decr, int decw)
45191
45192 mutex_lock(&inode->i_mutex);
45193 pipe = inode->i_pipe;
45194 - pipe->readers -= decr;
45195 - pipe->writers -= decw;
45196 + atomic_sub(decr, &pipe->readers);
45197 + atomic_sub(decw, &pipe->writers);
45198
45199 - if (!pipe->readers && !pipe->writers) {
45200 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
45201 free_pipe_info(inode);
45202 } else {
45203 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
45204 @@ -802,7 +802,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
45205
45206 if (inode->i_pipe) {
45207 ret = 0;
45208 - inode->i_pipe->readers++;
45209 + atomic_inc(&inode->i_pipe->readers);
45210 }
45211
45212 mutex_unlock(&inode->i_mutex);
45213 @@ -819,7 +819,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
45214
45215 if (inode->i_pipe) {
45216 ret = 0;
45217 - inode->i_pipe->writers++;
45218 + atomic_inc(&inode->i_pipe->writers);
45219 }
45220
45221 mutex_unlock(&inode->i_mutex);
45222 @@ -837,9 +837,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
45223 if (inode->i_pipe) {
45224 ret = 0;
45225 if (filp->f_mode & FMODE_READ)
45226 - inode->i_pipe->readers++;
45227 + atomic_inc(&inode->i_pipe->readers);
45228 if (filp->f_mode & FMODE_WRITE)
45229 - inode->i_pipe->writers++;
45230 + atomic_inc(&inode->i_pipe->writers);
45231 }
45232
45233 mutex_unlock(&inode->i_mutex);
45234 @@ -931,7 +931,7 @@ void free_pipe_info(struct inode *inode)
45235 inode->i_pipe = NULL;
45236 }
45237
45238 -static struct vfsmount *pipe_mnt __read_mostly;
45239 +struct vfsmount *pipe_mnt __read_mostly;
45240
45241 /*
45242 * pipefs_dname() is called from d_path().
45243 @@ -961,7 +961,8 @@ static struct inode * get_pipe_inode(void)
45244 goto fail_iput;
45245 inode->i_pipe = pipe;
45246
45247 - pipe->readers = pipe->writers = 1;
45248 + atomic_set(&pipe->readers, 1);
45249 + atomic_set(&pipe->writers, 1);
45250 inode->i_fop = &rdwr_pipefifo_fops;
45251
45252 /*
45253 diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
45254 index 15af622..0e9f4467 100644
45255 --- a/fs/proc/Kconfig
45256 +++ b/fs/proc/Kconfig
45257 @@ -30,12 +30,12 @@ config PROC_FS
45258
45259 config PROC_KCORE
45260 bool "/proc/kcore support" if !ARM
45261 - depends on PROC_FS && MMU
45262 + depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
45263
45264 config PROC_VMCORE
45265 bool "/proc/vmcore support"
45266 - depends on PROC_FS && CRASH_DUMP
45267 - default y
45268 + depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
45269 + default n
45270 help
45271 Exports the dump image of crashed kernel in ELF format.
45272
45273 @@ -59,8 +59,8 @@ config PROC_SYSCTL
45274 limited in memory.
45275
45276 config PROC_PAGE_MONITOR
45277 - default y
45278 - depends on PROC_FS && MMU
45279 + default n
45280 + depends on PROC_FS && MMU && !GRKERNSEC
45281 bool "Enable /proc page monitoring" if EXPERT
45282 help
45283 Various /proc files exist to monitor process memory utilization:
45284 diff --git a/fs/proc/array.c b/fs/proc/array.c
45285 index 3a1dafd..1456746 100644
45286 --- a/fs/proc/array.c
45287 +++ b/fs/proc/array.c
45288 @@ -60,6 +60,7 @@
45289 #include <linux/tty.h>
45290 #include <linux/string.h>
45291 #include <linux/mman.h>
45292 +#include <linux/grsecurity.h>
45293 #include <linux/proc_fs.h>
45294 #include <linux/ioport.h>
45295 #include <linux/uaccess.h>
45296 @@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
45297 seq_putc(m, '\n');
45298 }
45299
45300 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
45301 +static inline void task_pax(struct seq_file *m, struct task_struct *p)
45302 +{
45303 + if (p->mm)
45304 + seq_printf(m, "PaX:\t%c%c%c%c%c\n",
45305 + p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
45306 + p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
45307 + p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
45308 + p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
45309 + p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
45310 + else
45311 + seq_printf(m, "PaX:\t-----\n");
45312 +}
45313 +#endif
45314 +
45315 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
45316 struct pid *pid, struct task_struct *task)
45317 {
45318 @@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
45319 task_cpus_allowed(m, task);
45320 cpuset_task_status_allowed(m, task);
45321 task_context_switch_counts(m, task);
45322 +
45323 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
45324 + task_pax(m, task);
45325 +#endif
45326 +
45327 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
45328 + task_grsec_rbac(m, task);
45329 +#endif
45330 +
45331 return 0;
45332 }
45333
45334 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45335 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
45336 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
45337 + _mm->pax_flags & MF_PAX_SEGMEXEC))
45338 +#endif
45339 +
45340 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
45341 struct pid *pid, struct task_struct *task, int whole)
45342 {
45343 @@ -378,6 +409,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
45344 char tcomm[sizeof(task->comm)];
45345 unsigned long flags;
45346
45347 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45348 + if (current->exec_id != m->exec_id) {
45349 + gr_log_badprocpid("stat");
45350 + return 0;
45351 + }
45352 +#endif
45353 +
45354 state = *get_task_state(task);
45355 vsize = eip = esp = 0;
45356 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
45357 @@ -449,6 +487,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
45358 gtime = task->gtime;
45359 }
45360
45361 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45362 + if (PAX_RAND_FLAGS(mm)) {
45363 + eip = 0;
45364 + esp = 0;
45365 + wchan = 0;
45366 + }
45367 +#endif
45368 +#ifdef CONFIG_GRKERNSEC_HIDESYM
45369 + wchan = 0;
45370 + eip =0;
45371 + esp =0;
45372 +#endif
45373 +
45374 /* scale priority and nice values from timeslices to -20..20 */
45375 /* to make it look like a "normal" Unix priority/nice value */
45376 priority = task_prio(task);
45377 @@ -489,9 +540,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
45378 vsize,
45379 mm ? get_mm_rss(mm) : 0,
45380 rsslim,
45381 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45382 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
45383 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
45384 + PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
45385 +#else
45386 mm ? (permitted ? mm->start_code : 1) : 0,
45387 mm ? (permitted ? mm->end_code : 1) : 0,
45388 (permitted && mm) ? mm->start_stack : 0,
45389 +#endif
45390 esp,
45391 eip,
45392 /* The signal information here is obsolete.
45393 @@ -535,6 +592,13 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
45394 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
45395 struct mm_struct *mm = get_task_mm(task);
45396
45397 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45398 + if (current->exec_id != m->exec_id) {
45399 + gr_log_badprocpid("statm");
45400 + return 0;
45401 + }
45402 +#endif
45403 +
45404 if (mm) {
45405 size = task_statm(mm, &shared, &text, &data, &resident);
45406 mmput(mm);
45407 @@ -544,3 +608,18 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
45408
45409 return 0;
45410 }
45411 +
45412 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45413 +int proc_pid_ipaddr(struct task_struct *task, char *buffer)
45414 +{
45415 + u32 curr_ip = 0;
45416 + unsigned long flags;
45417 +
45418 + if (lock_task_sighand(task, &flags)) {
45419 + curr_ip = task->signal->curr_ip;
45420 + unlock_task_sighand(task, &flags);
45421 + }
45422 +
45423 + return sprintf(buffer, "%pI4\n", &curr_ip);
45424 +}
45425 +#endif
45426 diff --git a/fs/proc/base.c b/fs/proc/base.c
45427 index 1ace83d..357b933 100644
45428 --- a/fs/proc/base.c
45429 +++ b/fs/proc/base.c
45430 @@ -107,6 +107,22 @@ struct pid_entry {
45431 union proc_op op;
45432 };
45433
45434 +struct getdents_callback {
45435 + struct linux_dirent __user * current_dir;
45436 + struct linux_dirent __user * previous;
45437 + struct file * file;
45438 + int count;
45439 + int error;
45440 +};
45441 +
45442 +static int gr_fake_filldir(void * __buf, const char *name, int namlen,
45443 + loff_t offset, u64 ino, unsigned int d_type)
45444 +{
45445 + struct getdents_callback * buf = (struct getdents_callback *) __buf;
45446 + buf->error = -EINVAL;
45447 + return 0;
45448 +}
45449 +
45450 #define NOD(NAME, MODE, IOP, FOP, OP) { \
45451 .name = (NAME), \
45452 .len = sizeof(NAME) - 1, \
45453 @@ -194,26 +210,6 @@ static int proc_root_link(struct inode *inode, struct path *path)
45454 return result;
45455 }
45456
45457 -static struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
45458 -{
45459 - struct mm_struct *mm;
45460 - int err;
45461 -
45462 - err = mutex_lock_killable(&task->signal->cred_guard_mutex);
45463 - if (err)
45464 - return ERR_PTR(err);
45465 -
45466 - mm = get_task_mm(task);
45467 - if (mm && mm != current->mm &&
45468 - !ptrace_may_access(task, mode)) {
45469 - mmput(mm);
45470 - mm = ERR_PTR(-EACCES);
45471 - }
45472 - mutex_unlock(&task->signal->cred_guard_mutex);
45473 -
45474 - return mm;
45475 -}
45476 -
45477 struct mm_struct *mm_for_maps(struct task_struct *task)
45478 {
45479 return mm_access(task, PTRACE_MODE_READ);
45480 @@ -229,6 +225,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
45481 if (!mm->arg_end)
45482 goto out_mm; /* Shh! No looking before we're done */
45483
45484 + if (gr_acl_handle_procpidmem(task))
45485 + goto out_mm;
45486 +
45487 len = mm->arg_end - mm->arg_start;
45488
45489 if (len > PAGE_SIZE)
45490 @@ -256,12 +255,28 @@ out:
45491 return res;
45492 }
45493
45494 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45495 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
45496 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
45497 + _mm->pax_flags & MF_PAX_SEGMEXEC))
45498 +#endif
45499 +
45500 static int proc_pid_auxv(struct task_struct *task, char *buffer)
45501 {
45502 struct mm_struct *mm = mm_for_maps(task);
45503 int res = PTR_ERR(mm);
45504 if (mm && !IS_ERR(mm)) {
45505 unsigned int nwords = 0;
45506 +
45507 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45508 + /* allow if we're currently ptracing this task */
45509 + if (PAX_RAND_FLAGS(mm) &&
45510 + (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
45511 + mmput(mm);
45512 + return 0;
45513 + }
45514 +#endif
45515 +
45516 do {
45517 nwords += 2;
45518 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
45519 @@ -275,7 +290,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
45520 }
45521
45522
45523 -#ifdef CONFIG_KALLSYMS
45524 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45525 /*
45526 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
45527 * Returns the resolved symbol. If that fails, simply return the address.
45528 @@ -314,7 +329,7 @@ static void unlock_trace(struct task_struct *task)
45529 mutex_unlock(&task->signal->cred_guard_mutex);
45530 }
45531
45532 -#ifdef CONFIG_STACKTRACE
45533 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45534
45535 #define MAX_STACK_TRACE_DEPTH 64
45536
45537 @@ -505,7 +520,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
45538 return count;
45539 }
45540
45541 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45542 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45543 static int proc_pid_syscall(struct task_struct *task, char *buffer)
45544 {
45545 long nr;
45546 @@ -534,7 +549,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
45547 /************************************************************************/
45548
45549 /* permission checks */
45550 -static int proc_fd_access_allowed(struct inode *inode)
45551 +static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
45552 {
45553 struct task_struct *task;
45554 int allowed = 0;
45555 @@ -544,7 +559,10 @@ static int proc_fd_access_allowed(struct inode *inode)
45556 */
45557 task = get_proc_task(inode);
45558 if (task) {
45559 - allowed = ptrace_may_access(task, PTRACE_MODE_READ);
45560 + if (log)
45561 + allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
45562 + else
45563 + allowed = ptrace_may_access(task, PTRACE_MODE_READ);
45564 put_task_struct(task);
45565 }
45566 return allowed;
45567 @@ -797,6 +815,11 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
45568 ssize_t copied;
45569 char *page;
45570
45571 +#ifdef CONFIG_GRKERNSEC
45572 + if (write)
45573 + return -EPERM;
45574 +#endif
45575 +
45576 if (!mm)
45577 return 0;
45578
45579 @@ -897,6 +920,9 @@ static ssize_t environ_read(struct file *file, char __user *buf,
45580 if (!task)
45581 goto out_no_task;
45582
45583 + if (gr_acl_handle_procpidmem(task))
45584 + goto out;
45585 +
45586 ret = -ENOMEM;
45587 page = (char *)__get_free_page(GFP_TEMPORARY);
45588 if (!page)
45589 @@ -1519,7 +1545,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
45590 path_put(&nd->path);
45591
45592 /* Are we allowed to snoop on the tasks file descriptors? */
45593 - if (!proc_fd_access_allowed(inode))
45594 + if (!proc_fd_access_allowed(inode,0))
45595 goto out;
45596
45597 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
45598 @@ -1558,8 +1584,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
45599 struct path path;
45600
45601 /* Are we allowed to snoop on the tasks file descriptors? */
45602 - if (!proc_fd_access_allowed(inode))
45603 - goto out;
45604 + /* logging this is needed for learning on chromium to work properly,
45605 + but we don't want to flood the logs from 'ps' which does a readlink
45606 + on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
45607 + CAP_SYS_PTRACE as it's not necessary for its basic functionality
45608 + */
45609 + if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
45610 + if (!proc_fd_access_allowed(inode,0))
45611 + goto out;
45612 + } else {
45613 + if (!proc_fd_access_allowed(inode,1))
45614 + goto out;
45615 + }
45616
45617 error = PROC_I(inode)->op.proc_get_link(inode, &path);
45618 if (error)
45619 @@ -1624,7 +1660,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
45620 rcu_read_lock();
45621 cred = __task_cred(task);
45622 inode->i_uid = cred->euid;
45623 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45624 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45625 +#else
45626 inode->i_gid = cred->egid;
45627 +#endif
45628 rcu_read_unlock();
45629 }
45630 security_task_to_inode(task, inode);
45631 @@ -1642,6 +1682,9 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
45632 struct inode *inode = dentry->d_inode;
45633 struct task_struct *task;
45634 const struct cred *cred;
45635 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45636 + const struct cred *tmpcred = current_cred();
45637 +#endif
45638
45639 generic_fillattr(inode, stat);
45640
45641 @@ -1649,13 +1692,41 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
45642 stat->uid = 0;
45643 stat->gid = 0;
45644 task = pid_task(proc_pid(inode), PIDTYPE_PID);
45645 +
45646 + if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
45647 + rcu_read_unlock();
45648 + return -ENOENT;
45649 + }
45650 +
45651 if (task) {
45652 + cred = __task_cred(task);
45653 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45654 + if (!tmpcred->uid || (tmpcred->uid == cred->uid)
45655 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45656 + || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
45657 +#endif
45658 + ) {
45659 +#endif
45660 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
45661 +#ifdef CONFIG_GRKERNSEC_PROC_USER
45662 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
45663 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45664 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
45665 +#endif
45666 task_dumpable(task)) {
45667 - cred = __task_cred(task);
45668 stat->uid = cred->euid;
45669 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45670 + stat->gid = CONFIG_GRKERNSEC_PROC_GID;
45671 +#else
45672 stat->gid = cred->egid;
45673 +#endif
45674 }
45675 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45676 + } else {
45677 + rcu_read_unlock();
45678 + return -ENOENT;
45679 + }
45680 +#endif
45681 }
45682 rcu_read_unlock();
45683 return 0;
45684 @@ -1692,11 +1763,20 @@ int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
45685
45686 if (task) {
45687 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
45688 +#ifdef CONFIG_GRKERNSEC_PROC_USER
45689 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
45690 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45691 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
45692 +#endif
45693 task_dumpable(task)) {
45694 rcu_read_lock();
45695 cred = __task_cred(task);
45696 inode->i_uid = cred->euid;
45697 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45698 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45699 +#else
45700 inode->i_gid = cred->egid;
45701 +#endif
45702 rcu_read_unlock();
45703 } else {
45704 inode->i_uid = 0;
45705 @@ -1814,7 +1894,8 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
45706 int fd = proc_fd(inode);
45707
45708 if (task) {
45709 - files = get_files_struct(task);
45710 + if (!gr_acl_handle_procpidmem(task))
45711 + files = get_files_struct(task);
45712 put_task_struct(task);
45713 }
45714 if (files) {
45715 @@ -2082,11 +2163,21 @@ static const struct file_operations proc_fd_operations = {
45716 */
45717 static int proc_fd_permission(struct inode *inode, int mask)
45718 {
45719 + struct task_struct *task;
45720 int rv = generic_permission(inode, mask);
45721 - if (rv == 0)
45722 - return 0;
45723 +
45724 if (task_pid(current) == proc_pid(inode))
45725 rv = 0;
45726 +
45727 + task = get_proc_task(inode);
45728 + if (task == NULL)
45729 + return rv;
45730 +
45731 + if (gr_acl_handle_procpidmem(task))
45732 + rv = -EACCES;
45733 +
45734 + put_task_struct(task);
45735 +
45736 return rv;
45737 }
45738
45739 @@ -2196,6 +2287,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
45740 if (!task)
45741 goto out_no_task;
45742
45743 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45744 + goto out;
45745 +
45746 /*
45747 * Yes, it does not scale. And it should not. Don't add
45748 * new entries into /proc/<tgid>/ without very good reasons.
45749 @@ -2240,6 +2334,9 @@ static int proc_pident_readdir(struct file *filp,
45750 if (!task)
45751 goto out_no_task;
45752
45753 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45754 + goto out;
45755 +
45756 ret = 0;
45757 i = filp->f_pos;
45758 switch (i) {
45759 @@ -2510,7 +2607,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
45760 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
45761 void *cookie)
45762 {
45763 - char *s = nd_get_link(nd);
45764 + const char *s = nd_get_link(nd);
45765 if (!IS_ERR(s))
45766 __putname(s);
45767 }
45768 @@ -2708,7 +2805,7 @@ static const struct pid_entry tgid_base_stuff[] = {
45769 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
45770 #endif
45771 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
45772 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45773 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45774 INF("syscall", S_IRUGO, proc_pid_syscall),
45775 #endif
45776 INF("cmdline", S_IRUGO, proc_pid_cmdline),
45777 @@ -2733,10 +2830,10 @@ static const struct pid_entry tgid_base_stuff[] = {
45778 #ifdef CONFIG_SECURITY
45779 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
45780 #endif
45781 -#ifdef CONFIG_KALLSYMS
45782 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45783 INF("wchan", S_IRUGO, proc_pid_wchan),
45784 #endif
45785 -#ifdef CONFIG_STACKTRACE
45786 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45787 ONE("stack", S_IRUGO, proc_pid_stack),
45788 #endif
45789 #ifdef CONFIG_SCHEDSTATS
45790 @@ -2770,6 +2867,9 @@ static const struct pid_entry tgid_base_stuff[] = {
45791 #ifdef CONFIG_HARDWALL
45792 INF("hardwall", S_IRUGO, proc_pid_hardwall),
45793 #endif
45794 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45795 + INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
45796 +#endif
45797 };
45798
45799 static int proc_tgid_base_readdir(struct file * filp,
45800 @@ -2895,7 +2995,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
45801 if (!inode)
45802 goto out;
45803
45804 +#ifdef CONFIG_GRKERNSEC_PROC_USER
45805 + inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
45806 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45807 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45808 + inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
45809 +#else
45810 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
45811 +#endif
45812 inode->i_op = &proc_tgid_base_inode_operations;
45813 inode->i_fop = &proc_tgid_base_operations;
45814 inode->i_flags|=S_IMMUTABLE;
45815 @@ -2937,7 +3044,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
45816 if (!task)
45817 goto out;
45818
45819 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45820 + goto out_put_task;
45821 +
45822 result = proc_pid_instantiate(dir, dentry, task, NULL);
45823 +out_put_task:
45824 put_task_struct(task);
45825 out:
45826 return result;
45827 @@ -3002,6 +3113,11 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
45828 {
45829 unsigned int nr;
45830 struct task_struct *reaper;
45831 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45832 + const struct cred *tmpcred = current_cred();
45833 + const struct cred *itercred;
45834 +#endif
45835 + filldir_t __filldir = filldir;
45836 struct tgid_iter iter;
45837 struct pid_namespace *ns;
45838
45839 @@ -3025,8 +3141,27 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
45840 for (iter = next_tgid(ns, iter);
45841 iter.task;
45842 iter.tgid += 1, iter = next_tgid(ns, iter)) {
45843 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45844 + rcu_read_lock();
45845 + itercred = __task_cred(iter.task);
45846 +#endif
45847 + if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
45848 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45849 + || (tmpcred->uid && (itercred->uid != tmpcred->uid)
45850 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45851 + && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
45852 +#endif
45853 + )
45854 +#endif
45855 + )
45856 + __filldir = &gr_fake_filldir;
45857 + else
45858 + __filldir = filldir;
45859 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45860 + rcu_read_unlock();
45861 +#endif
45862 filp->f_pos = iter.tgid + TGID_OFFSET;
45863 - if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
45864 + if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
45865 put_task_struct(iter.task);
45866 goto out;
45867 }
45868 @@ -3054,7 +3189,7 @@ static const struct pid_entry tid_base_stuff[] = {
45869 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
45870 #endif
45871 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
45872 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45873 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45874 INF("syscall", S_IRUGO, proc_pid_syscall),
45875 #endif
45876 INF("cmdline", S_IRUGO, proc_pid_cmdline),
45877 @@ -3078,10 +3213,10 @@ static const struct pid_entry tid_base_stuff[] = {
45878 #ifdef CONFIG_SECURITY
45879 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
45880 #endif
45881 -#ifdef CONFIG_KALLSYMS
45882 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45883 INF("wchan", S_IRUGO, proc_pid_wchan),
45884 #endif
45885 -#ifdef CONFIG_STACKTRACE
45886 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45887 ONE("stack", S_IRUGO, proc_pid_stack),
45888 #endif
45889 #ifdef CONFIG_SCHEDSTATS
45890 diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
45891 index 82676e3..5f8518a 100644
45892 --- a/fs/proc/cmdline.c
45893 +++ b/fs/proc/cmdline.c
45894 @@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
45895
45896 static int __init proc_cmdline_init(void)
45897 {
45898 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
45899 + proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
45900 +#else
45901 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
45902 +#endif
45903 return 0;
45904 }
45905 module_init(proc_cmdline_init);
45906 diff --git a/fs/proc/devices.c b/fs/proc/devices.c
45907 index b143471..bb105e5 100644
45908 --- a/fs/proc/devices.c
45909 +++ b/fs/proc/devices.c
45910 @@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
45911
45912 static int __init proc_devices_init(void)
45913 {
45914 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
45915 + proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
45916 +#else
45917 proc_create("devices", 0, NULL, &proc_devinfo_operations);
45918 +#endif
45919 return 0;
45920 }
45921 module_init(proc_devices_init);
45922 diff --git a/fs/proc/inode.c b/fs/proc/inode.c
45923 index 7737c54..7172574 100644
45924 --- a/fs/proc/inode.c
45925 +++ b/fs/proc/inode.c
45926 @@ -18,12 +18,18 @@
45927 #include <linux/module.h>
45928 #include <linux/sysctl.h>
45929 #include <linux/slab.h>
45930 +#include <linux/grsecurity.h>
45931
45932 #include <asm/system.h>
45933 #include <asm/uaccess.h>
45934
45935 #include "internal.h"
45936
45937 +#ifdef CONFIG_PROC_SYSCTL
45938 +extern const struct inode_operations proc_sys_inode_operations;
45939 +extern const struct inode_operations proc_sys_dir_operations;
45940 +#endif
45941 +
45942 static void proc_evict_inode(struct inode *inode)
45943 {
45944 struct proc_dir_entry *de;
45945 @@ -49,6 +55,13 @@ static void proc_evict_inode(struct inode *inode)
45946 ns_ops = PROC_I(inode)->ns_ops;
45947 if (ns_ops && ns_ops->put)
45948 ns_ops->put(PROC_I(inode)->ns);
45949 +
45950 +#ifdef CONFIG_PROC_SYSCTL
45951 + if (inode->i_op == &proc_sys_inode_operations ||
45952 + inode->i_op == &proc_sys_dir_operations)
45953 + gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
45954 +#endif
45955 +
45956 }
45957
45958 static struct kmem_cache * proc_inode_cachep;
45959 @@ -440,7 +453,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
45960 if (de->mode) {
45961 inode->i_mode = de->mode;
45962 inode->i_uid = de->uid;
45963 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45964 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45965 +#else
45966 inode->i_gid = de->gid;
45967 +#endif
45968 }
45969 if (de->size)
45970 inode->i_size = de->size;
45971 diff --git a/fs/proc/internal.h b/fs/proc/internal.h
45972 index 7838e5c..ff92cbc 100644
45973 --- a/fs/proc/internal.h
45974 +++ b/fs/proc/internal.h
45975 @@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
45976 struct pid *pid, struct task_struct *task);
45977 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
45978 struct pid *pid, struct task_struct *task);
45979 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45980 +extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
45981 +#endif
45982 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
45983
45984 extern const struct file_operations proc_maps_operations;
45985 diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
45986 index d245cb2..f4e8498 100644
45987 --- a/fs/proc/kcore.c
45988 +++ b/fs/proc/kcore.c
45989 @@ -478,9 +478,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
45990 * the addresses in the elf_phdr on our list.
45991 */
45992 start = kc_offset_to_vaddr(*fpos - elf_buflen);
45993 - if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
45994 + tsz = PAGE_SIZE - (start & ~PAGE_MASK);
45995 + if (tsz > buflen)
45996 tsz = buflen;
45997 -
45998 +
45999 while (buflen) {
46000 struct kcore_list *m;
46001
46002 @@ -509,20 +510,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
46003 kfree(elf_buf);
46004 } else {
46005 if (kern_addr_valid(start)) {
46006 - unsigned long n;
46007 + char *elf_buf;
46008 + mm_segment_t oldfs;
46009
46010 - n = copy_to_user(buffer, (char *)start, tsz);
46011 - /*
46012 - * We cannot distingush between fault on source
46013 - * and fault on destination. When this happens
46014 - * we clear too and hope it will trigger the
46015 - * EFAULT again.
46016 - */
46017 - if (n) {
46018 - if (clear_user(buffer + tsz - n,
46019 - n))
46020 + elf_buf = kmalloc(tsz, GFP_KERNEL);
46021 + if (!elf_buf)
46022 + return -ENOMEM;
46023 + oldfs = get_fs();
46024 + set_fs(KERNEL_DS);
46025 + if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
46026 + set_fs(oldfs);
46027 + if (copy_to_user(buffer, elf_buf, tsz)) {
46028 + kfree(elf_buf);
46029 return -EFAULT;
46030 + }
46031 }
46032 + set_fs(oldfs);
46033 + kfree(elf_buf);
46034 } else {
46035 if (clear_user(buffer, tsz))
46036 return -EFAULT;
46037 @@ -542,6 +546,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
46038
46039 static int open_kcore(struct inode *inode, struct file *filp)
46040 {
46041 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
46042 + return -EPERM;
46043 +#endif
46044 if (!capable(CAP_SYS_RAWIO))
46045 return -EPERM;
46046 if (kcore_need_update)
46047 diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
46048 index 80e4645..53e5fcf 100644
46049 --- a/fs/proc/meminfo.c
46050 +++ b/fs/proc/meminfo.c
46051 @@ -158,7 +158,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
46052 vmi.used >> 10,
46053 vmi.largest_chunk >> 10
46054 #ifdef CONFIG_MEMORY_FAILURE
46055 - ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
46056 + ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
46057 #endif
46058 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
46059 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
46060 diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
46061 index b1822dd..df622cb 100644
46062 --- a/fs/proc/nommu.c
46063 +++ b/fs/proc/nommu.c
46064 @@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
46065 if (len < 1)
46066 len = 1;
46067 seq_printf(m, "%*c", len, ' ');
46068 - seq_path(m, &file->f_path, "");
46069 + seq_path(m, &file->f_path, "\n\\");
46070 }
46071
46072 seq_putc(m, '\n');
46073 diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
46074 index f738024..876984a 100644
46075 --- a/fs/proc/proc_net.c
46076 +++ b/fs/proc/proc_net.c
46077 @@ -105,6 +105,17 @@ static struct net *get_proc_task_net(struct inode *dir)
46078 struct task_struct *task;
46079 struct nsproxy *ns;
46080 struct net *net = NULL;
46081 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46082 + const struct cred *cred = current_cred();
46083 +#endif
46084 +
46085 +#ifdef CONFIG_GRKERNSEC_PROC_USER
46086 + if (cred->fsuid)
46087 + return net;
46088 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46089 + if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
46090 + return net;
46091 +#endif
46092
46093 rcu_read_lock();
46094 task = pid_task(proc_pid(dir), PIDTYPE_PID);
46095 diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
46096 index a6b6217..1e0579d 100644
46097 --- a/fs/proc/proc_sysctl.c
46098 +++ b/fs/proc/proc_sysctl.c
46099 @@ -9,11 +9,13 @@
46100 #include <linux/namei.h>
46101 #include "internal.h"
46102
46103 +extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
46104 +
46105 static const struct dentry_operations proc_sys_dentry_operations;
46106 static const struct file_operations proc_sys_file_operations;
46107 -static const struct inode_operations proc_sys_inode_operations;
46108 +const struct inode_operations proc_sys_inode_operations;
46109 static const struct file_operations proc_sys_dir_file_operations;
46110 -static const struct inode_operations proc_sys_dir_operations;
46111 +const struct inode_operations proc_sys_dir_operations;
46112
46113 void proc_sys_poll_notify(struct ctl_table_poll *poll)
46114 {
46115 @@ -131,8 +133,14 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
46116
46117 err = NULL;
46118 d_set_d_op(dentry, &proc_sys_dentry_operations);
46119 +
46120 + gr_handle_proc_create(dentry, inode);
46121 +
46122 d_add(dentry, inode);
46123
46124 + if (gr_handle_sysctl(p, MAY_EXEC))
46125 + err = ERR_PTR(-ENOENT);
46126 +
46127 out:
46128 sysctl_head_finish(head);
46129 return err;
46130 @@ -163,6 +171,12 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
46131 if (!table->proc_handler)
46132 goto out;
46133
46134 +#ifdef CONFIG_GRKERNSEC
46135 + error = -EPERM;
46136 + if (write && !capable(CAP_SYS_ADMIN))
46137 + goto out;
46138 +#endif
46139 +
46140 /* careful: calling conventions are nasty here */
46141 res = count;
46142 error = table->proc_handler(table, write, buf, &res, ppos);
46143 @@ -245,6 +259,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
46144 return -ENOMEM;
46145 } else {
46146 d_set_d_op(child, &proc_sys_dentry_operations);
46147 +
46148 + gr_handle_proc_create(child, inode);
46149 +
46150 d_add(child, inode);
46151 }
46152 } else {
46153 @@ -273,6 +290,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
46154 if (*pos < file->f_pos)
46155 continue;
46156
46157 + if (gr_handle_sysctl(table, 0))
46158 + continue;
46159 +
46160 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
46161 if (res)
46162 return res;
46163 @@ -398,6 +418,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
46164 if (IS_ERR(head))
46165 return PTR_ERR(head);
46166
46167 + if (table && gr_handle_sysctl(table, MAY_EXEC))
46168 + return -ENOENT;
46169 +
46170 generic_fillattr(inode, stat);
46171 if (table)
46172 stat->mode = (stat->mode & S_IFMT) | table->mode;
46173 @@ -420,13 +443,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
46174 .llseek = generic_file_llseek,
46175 };
46176
46177 -static const struct inode_operations proc_sys_inode_operations = {
46178 +const struct inode_operations proc_sys_inode_operations = {
46179 .permission = proc_sys_permission,
46180 .setattr = proc_sys_setattr,
46181 .getattr = proc_sys_getattr,
46182 };
46183
46184 -static const struct inode_operations proc_sys_dir_operations = {
46185 +const struct inode_operations proc_sys_dir_operations = {
46186 .lookup = proc_sys_lookup,
46187 .permission = proc_sys_permission,
46188 .setattr = proc_sys_setattr,
46189 diff --git a/fs/proc/root.c b/fs/proc/root.c
46190 index 03102d9..4ae347e 100644
46191 --- a/fs/proc/root.c
46192 +++ b/fs/proc/root.c
46193 @@ -121,7 +121,15 @@ void __init proc_root_init(void)
46194 #ifdef CONFIG_PROC_DEVICETREE
46195 proc_device_tree_init();
46196 #endif
46197 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
46198 +#ifdef CONFIG_GRKERNSEC_PROC_USER
46199 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
46200 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46201 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
46202 +#endif
46203 +#else
46204 proc_mkdir("bus", NULL);
46205 +#endif
46206 proc_sys_init();
46207 }
46208
46209 diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
46210 index 7dcd2a2..b2f410e 100644
46211 --- a/fs/proc/task_mmu.c
46212 +++ b/fs/proc/task_mmu.c
46213 @@ -11,6 +11,7 @@
46214 #include <linux/rmap.h>
46215 #include <linux/swap.h>
46216 #include <linux/swapops.h>
46217 +#include <linux/grsecurity.h>
46218
46219 #include <asm/elf.h>
46220 #include <asm/uaccess.h>
46221 @@ -52,8 +53,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
46222 "VmExe:\t%8lu kB\n"
46223 "VmLib:\t%8lu kB\n"
46224 "VmPTE:\t%8lu kB\n"
46225 - "VmSwap:\t%8lu kB\n",
46226 - hiwater_vm << (PAGE_SHIFT-10),
46227 + "VmSwap:\t%8lu kB\n"
46228 +
46229 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
46230 + "CsBase:\t%8lx\nCsLim:\t%8lx\n"
46231 +#endif
46232 +
46233 + ,hiwater_vm << (PAGE_SHIFT-10),
46234 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
46235 mm->locked_vm << (PAGE_SHIFT-10),
46236 mm->pinned_vm << (PAGE_SHIFT-10),
46237 @@ -62,7 +68,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
46238 data << (PAGE_SHIFT-10),
46239 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
46240 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
46241 - swap << (PAGE_SHIFT-10));
46242 + swap << (PAGE_SHIFT-10)
46243 +
46244 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
46245 + , mm->context.user_cs_base, mm->context.user_cs_limit
46246 +#endif
46247 +
46248 + );
46249 }
46250
46251 unsigned long task_vsize(struct mm_struct *mm)
46252 @@ -209,6 +221,12 @@ static int do_maps_open(struct inode *inode, struct file *file,
46253 return ret;
46254 }
46255
46256 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46257 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
46258 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
46259 + _mm->pax_flags & MF_PAX_SEGMEXEC))
46260 +#endif
46261 +
46262 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
46263 {
46264 struct mm_struct *mm = vma->vm_mm;
46265 @@ -227,13 +245,13 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
46266 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
46267 }
46268
46269 - /* We don't show the stack guard page in /proc/maps */
46270 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46271 + start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
46272 + end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
46273 +#else
46274 start = vma->vm_start;
46275 - if (stack_guard_page_start(vma, start))
46276 - start += PAGE_SIZE;
46277 end = vma->vm_end;
46278 - if (stack_guard_page_end(vma, end))
46279 - end -= PAGE_SIZE;
46280 +#endif
46281
46282 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
46283 start,
46284 @@ -242,7 +260,11 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
46285 flags & VM_WRITE ? 'w' : '-',
46286 flags & VM_EXEC ? 'x' : '-',
46287 flags & VM_MAYSHARE ? 's' : 'p',
46288 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46289 + PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
46290 +#else
46291 pgoff,
46292 +#endif
46293 MAJOR(dev), MINOR(dev), ino, &len);
46294
46295 /*
46296 @@ -251,7 +273,7 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
46297 */
46298 if (file) {
46299 pad_len_spaces(m, len);
46300 - seq_path(m, &file->f_path, "\n");
46301 + seq_path(m, &file->f_path, "\n\\");
46302 } else {
46303 const char *name = arch_vma_name(vma);
46304 if (!name) {
46305 @@ -259,8 +281,9 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
46306 if (vma->vm_start <= mm->brk &&
46307 vma->vm_end >= mm->start_brk) {
46308 name = "[heap]";
46309 - } else if (vma->vm_start <= mm->start_stack &&
46310 - vma->vm_end >= mm->start_stack) {
46311 + } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
46312 + (vma->vm_start <= mm->start_stack &&
46313 + vma->vm_end >= mm->start_stack)) {
46314 name = "[stack]";
46315 }
46316 } else {
46317 @@ -281,6 +304,13 @@ static int show_map(struct seq_file *m, void *v)
46318 struct proc_maps_private *priv = m->private;
46319 struct task_struct *task = priv->task;
46320
46321 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46322 + if (current->exec_id != m->exec_id) {
46323 + gr_log_badprocpid("maps");
46324 + return 0;
46325 + }
46326 +#endif
46327 +
46328 show_map_vma(m, vma);
46329
46330 if (m->count < m->size) /* vma is copied successfully */
46331 @@ -434,12 +464,23 @@ static int show_smap(struct seq_file *m, void *v)
46332 .private = &mss,
46333 };
46334
46335 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46336 + if (current->exec_id != m->exec_id) {
46337 + gr_log_badprocpid("smaps");
46338 + return 0;
46339 + }
46340 +#endif
46341 memset(&mss, 0, sizeof mss);
46342 - mss.vma = vma;
46343 - /* mmap_sem is held in m_start */
46344 - if (vma->vm_mm && !is_vm_hugetlb_page(vma))
46345 - walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
46346 -
46347 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46348 + if (!PAX_RAND_FLAGS(vma->vm_mm)) {
46349 +#endif
46350 + mss.vma = vma;
46351 + /* mmap_sem is held in m_start */
46352 + if (vma->vm_mm && !is_vm_hugetlb_page(vma))
46353 + walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
46354 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46355 + }
46356 +#endif
46357 show_map_vma(m, vma);
46358
46359 seq_printf(m,
46360 @@ -457,7 +498,11 @@ static int show_smap(struct seq_file *m, void *v)
46361 "KernelPageSize: %8lu kB\n"
46362 "MMUPageSize: %8lu kB\n"
46363 "Locked: %8lu kB\n",
46364 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46365 + PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
46366 +#else
46367 (vma->vm_end - vma->vm_start) >> 10,
46368 +#endif
46369 mss.resident >> 10,
46370 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
46371 mss.shared_clean >> 10,
46372 @@ -1015,6 +1060,13 @@ static int show_numa_map(struct seq_file *m, void *v)
46373 int n;
46374 char buffer[50];
46375
46376 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46377 + if (current->exec_id != m->exec_id) {
46378 + gr_log_badprocpid("numa_maps");
46379 + return 0;
46380 + }
46381 +#endif
46382 +
46383 if (!mm)
46384 return 0;
46385
46386 @@ -1032,11 +1084,15 @@ static int show_numa_map(struct seq_file *m, void *v)
46387 mpol_to_str(buffer, sizeof(buffer), pol, 0);
46388 mpol_cond_put(pol);
46389
46390 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46391 + seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
46392 +#else
46393 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
46394 +#endif
46395
46396 if (file) {
46397 seq_printf(m, " file=");
46398 - seq_path(m, &file->f_path, "\n\t= ");
46399 + seq_path(m, &file->f_path, "\n\t\\= ");
46400 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
46401 seq_printf(m, " heap");
46402 } else if (vma->vm_start <= mm->start_stack &&
46403 diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
46404 index 980de54..2a4db5f 100644
46405 --- a/fs/proc/task_nommu.c
46406 +++ b/fs/proc/task_nommu.c
46407 @@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
46408 else
46409 bytes += kobjsize(mm);
46410
46411 - if (current->fs && current->fs->users > 1)
46412 + if (current->fs && atomic_read(&current->fs->users) > 1)
46413 sbytes += kobjsize(current->fs);
46414 else
46415 bytes += kobjsize(current->fs);
46416 @@ -166,7 +166,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
46417
46418 if (file) {
46419 pad_len_spaces(m, len);
46420 - seq_path(m, &file->f_path, "");
46421 + seq_path(m, &file->f_path, "\n\\");
46422 } else if (mm) {
46423 if (vma->vm_start <= mm->start_stack &&
46424 vma->vm_end >= mm->start_stack) {
46425 diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
46426 index d67908b..d13f6a6 100644
46427 --- a/fs/quota/netlink.c
46428 +++ b/fs/quota/netlink.c
46429 @@ -33,7 +33,7 @@ static struct genl_family quota_genl_family = {
46430 void quota_send_warning(short type, unsigned int id, dev_t dev,
46431 const char warntype)
46432 {
46433 - static atomic_t seq;
46434 + static atomic_unchecked_t seq;
46435 struct sk_buff *skb;
46436 void *msg_head;
46437 int ret;
46438 @@ -49,7 +49,7 @@ void quota_send_warning(short type, unsigned int id, dev_t dev,
46439 "VFS: Not enough memory to send quota warning.\n");
46440 return;
46441 }
46442 - msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
46443 + msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
46444 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
46445 if (!msg_head) {
46446 printk(KERN_ERR
46447 diff --git a/fs/readdir.c b/fs/readdir.c
46448 index 356f715..c918d38 100644
46449 --- a/fs/readdir.c
46450 +++ b/fs/readdir.c
46451 @@ -17,6 +17,7 @@
46452 #include <linux/security.h>
46453 #include <linux/syscalls.h>
46454 #include <linux/unistd.h>
46455 +#include <linux/namei.h>
46456
46457 #include <asm/uaccess.h>
46458
46459 @@ -67,6 +68,7 @@ struct old_linux_dirent {
46460
46461 struct readdir_callback {
46462 struct old_linux_dirent __user * dirent;
46463 + struct file * file;
46464 int result;
46465 };
46466
46467 @@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
46468 buf->result = -EOVERFLOW;
46469 return -EOVERFLOW;
46470 }
46471 +
46472 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46473 + return 0;
46474 +
46475 buf->result++;
46476 dirent = buf->dirent;
46477 if (!access_ok(VERIFY_WRITE, dirent,
46478 @@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
46479
46480 buf.result = 0;
46481 buf.dirent = dirent;
46482 + buf.file = file;
46483
46484 error = vfs_readdir(file, fillonedir, &buf);
46485 if (buf.result)
46486 @@ -142,6 +149,7 @@ struct linux_dirent {
46487 struct getdents_callback {
46488 struct linux_dirent __user * current_dir;
46489 struct linux_dirent __user * previous;
46490 + struct file * file;
46491 int count;
46492 int error;
46493 };
46494 @@ -163,6 +171,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
46495 buf->error = -EOVERFLOW;
46496 return -EOVERFLOW;
46497 }
46498 +
46499 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46500 + return 0;
46501 +
46502 dirent = buf->previous;
46503 if (dirent) {
46504 if (__put_user(offset, &dirent->d_off))
46505 @@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
46506 buf.previous = NULL;
46507 buf.count = count;
46508 buf.error = 0;
46509 + buf.file = file;
46510
46511 error = vfs_readdir(file, filldir, &buf);
46512 if (error >= 0)
46513 @@ -229,6 +242,7 @@ out:
46514 struct getdents_callback64 {
46515 struct linux_dirent64 __user * current_dir;
46516 struct linux_dirent64 __user * previous;
46517 + struct file *file;
46518 int count;
46519 int error;
46520 };
46521 @@ -244,6 +258,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
46522 buf->error = -EINVAL; /* only used if we fail.. */
46523 if (reclen > buf->count)
46524 return -EINVAL;
46525 +
46526 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46527 + return 0;
46528 +
46529 dirent = buf->previous;
46530 if (dirent) {
46531 if (__put_user(offset, &dirent->d_off))
46532 @@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
46533
46534 buf.current_dir = dirent;
46535 buf.previous = NULL;
46536 + buf.file = file;
46537 buf.count = count;
46538 buf.error = 0;
46539
46540 @@ -299,7 +318,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
46541 error = buf.error;
46542 lastdirent = buf.previous;
46543 if (lastdirent) {
46544 - typeof(lastdirent->d_off) d_off = file->f_pos;
46545 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
46546 if (__put_user(d_off, &lastdirent->d_off))
46547 error = -EFAULT;
46548 else
46549 diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
46550 index 60c0804..d814f98 100644
46551 --- a/fs/reiserfs/do_balan.c
46552 +++ b/fs/reiserfs/do_balan.c
46553 @@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
46554 return;
46555 }
46556
46557 - atomic_inc(&(fs_generation(tb->tb_sb)));
46558 + atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
46559 do_balance_starts(tb);
46560
46561 /* balance leaf returns 0 except if combining L R and S into
46562 diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
46563 index 7a99811..a7c96c4 100644
46564 --- a/fs/reiserfs/procfs.c
46565 +++ b/fs/reiserfs/procfs.c
46566 @@ -113,7 +113,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
46567 "SMALL_TAILS " : "NO_TAILS ",
46568 replay_only(sb) ? "REPLAY_ONLY " : "",
46569 convert_reiserfs(sb) ? "CONV " : "",
46570 - atomic_read(&r->s_generation_counter),
46571 + atomic_read_unchecked(&r->s_generation_counter),
46572 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
46573 SF(s_do_balance), SF(s_unneeded_left_neighbor),
46574 SF(s_good_search_by_key_reada), SF(s_bmaps),
46575 diff --git a/fs/select.c b/fs/select.c
46576 index d33418f..2a5345e 100644
46577 --- a/fs/select.c
46578 +++ b/fs/select.c
46579 @@ -20,6 +20,7 @@
46580 #include <linux/module.h>
46581 #include <linux/slab.h>
46582 #include <linux/poll.h>
46583 +#include <linux/security.h>
46584 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
46585 #include <linux/file.h>
46586 #include <linux/fdtable.h>
46587 @@ -837,6 +838,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
46588 struct poll_list *walk = head;
46589 unsigned long todo = nfds;
46590
46591 + gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
46592 if (nfds > rlimit(RLIMIT_NOFILE))
46593 return -EINVAL;
46594
46595 diff --git a/fs/seq_file.c b/fs/seq_file.c
46596 index dba43c3..9fb8511 100644
46597 --- a/fs/seq_file.c
46598 +++ b/fs/seq_file.c
46599 @@ -9,6 +9,7 @@
46600 #include <linux/module.h>
46601 #include <linux/seq_file.h>
46602 #include <linux/slab.h>
46603 +#include <linux/sched.h>
46604
46605 #include <asm/uaccess.h>
46606 #include <asm/page.h>
46607 @@ -40,6 +41,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
46608 memset(p, 0, sizeof(*p));
46609 mutex_init(&p->lock);
46610 p->op = op;
46611 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46612 + p->exec_id = current->exec_id;
46613 +#endif
46614
46615 /*
46616 * Wrappers around seq_open(e.g. swaps_open) need to be
46617 @@ -76,7 +80,8 @@ static int traverse(struct seq_file *m, loff_t offset)
46618 return 0;
46619 }
46620 if (!m->buf) {
46621 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
46622 + m->size = PAGE_SIZE;
46623 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
46624 if (!m->buf)
46625 return -ENOMEM;
46626 }
46627 @@ -116,7 +121,8 @@ static int traverse(struct seq_file *m, loff_t offset)
46628 Eoverflow:
46629 m->op->stop(m, p);
46630 kfree(m->buf);
46631 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
46632 + m->size <<= 1;
46633 + m->buf = kmalloc(m->size, GFP_KERNEL);
46634 return !m->buf ? -ENOMEM : -EAGAIN;
46635 }
46636
46637 @@ -169,7 +175,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
46638 m->version = file->f_version;
46639 /* grab buffer if we didn't have one */
46640 if (!m->buf) {
46641 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
46642 + m->size = PAGE_SIZE;
46643 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
46644 if (!m->buf)
46645 goto Enomem;
46646 }
46647 @@ -210,7 +217,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
46648 goto Fill;
46649 m->op->stop(m, p);
46650 kfree(m->buf);
46651 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
46652 + m->size <<= 1;
46653 + m->buf = kmalloc(m->size, GFP_KERNEL);
46654 if (!m->buf)
46655 goto Enomem;
46656 m->count = 0;
46657 @@ -549,7 +557,7 @@ static void single_stop(struct seq_file *p, void *v)
46658 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
46659 void *data)
46660 {
46661 - struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
46662 + seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
46663 int res = -ENOMEM;
46664
46665 if (op) {
46666 diff --git a/fs/splice.c b/fs/splice.c
46667 index fa2defa..8601650 100644
46668 --- a/fs/splice.c
46669 +++ b/fs/splice.c
46670 @@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
46671 pipe_lock(pipe);
46672
46673 for (;;) {
46674 - if (!pipe->readers) {
46675 + if (!atomic_read(&pipe->readers)) {
46676 send_sig(SIGPIPE, current, 0);
46677 if (!ret)
46678 ret = -EPIPE;
46679 @@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
46680 do_wakeup = 0;
46681 }
46682
46683 - pipe->waiting_writers++;
46684 + atomic_inc(&pipe->waiting_writers);
46685 pipe_wait(pipe);
46686 - pipe->waiting_writers--;
46687 + atomic_dec(&pipe->waiting_writers);
46688 }
46689
46690 pipe_unlock(pipe);
46691 @@ -560,7 +560,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
46692 old_fs = get_fs();
46693 set_fs(get_ds());
46694 /* The cast to a user pointer is valid due to the set_fs() */
46695 - res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
46696 + res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
46697 set_fs(old_fs);
46698
46699 return res;
46700 @@ -575,7 +575,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
46701 old_fs = get_fs();
46702 set_fs(get_ds());
46703 /* The cast to a user pointer is valid due to the set_fs() */
46704 - res = vfs_write(file, (const char __user *)buf, count, &pos);
46705 + res = vfs_write(file, (const char __force_user *)buf, count, &pos);
46706 set_fs(old_fs);
46707
46708 return res;
46709 @@ -626,7 +626,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
46710 goto err;
46711
46712 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
46713 - vec[i].iov_base = (void __user *) page_address(page);
46714 + vec[i].iov_base = (void __force_user *) page_address(page);
46715 vec[i].iov_len = this_len;
46716 spd.pages[i] = page;
46717 spd.nr_pages++;
46718 @@ -846,10 +846,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
46719 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
46720 {
46721 while (!pipe->nrbufs) {
46722 - if (!pipe->writers)
46723 + if (!atomic_read(&pipe->writers))
46724 return 0;
46725
46726 - if (!pipe->waiting_writers && sd->num_spliced)
46727 + if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
46728 return 0;
46729
46730 if (sd->flags & SPLICE_F_NONBLOCK)
46731 @@ -1182,7 +1182,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
46732 * out of the pipe right after the splice_to_pipe(). So set
46733 * PIPE_READERS appropriately.
46734 */
46735 - pipe->readers = 1;
46736 + atomic_set(&pipe->readers, 1);
46737
46738 current->splice_pipe = pipe;
46739 }
46740 @@ -1734,9 +1734,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
46741 ret = -ERESTARTSYS;
46742 break;
46743 }
46744 - if (!pipe->writers)
46745 + if (!atomic_read(&pipe->writers))
46746 break;
46747 - if (!pipe->waiting_writers) {
46748 + if (!atomic_read(&pipe->waiting_writers)) {
46749 if (flags & SPLICE_F_NONBLOCK) {
46750 ret = -EAGAIN;
46751 break;
46752 @@ -1768,7 +1768,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
46753 pipe_lock(pipe);
46754
46755 while (pipe->nrbufs >= pipe->buffers) {
46756 - if (!pipe->readers) {
46757 + if (!atomic_read(&pipe->readers)) {
46758 send_sig(SIGPIPE, current, 0);
46759 ret = -EPIPE;
46760 break;
46761 @@ -1781,9 +1781,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
46762 ret = -ERESTARTSYS;
46763 break;
46764 }
46765 - pipe->waiting_writers++;
46766 + atomic_inc(&pipe->waiting_writers);
46767 pipe_wait(pipe);
46768 - pipe->waiting_writers--;
46769 + atomic_dec(&pipe->waiting_writers);
46770 }
46771
46772 pipe_unlock(pipe);
46773 @@ -1819,14 +1819,14 @@ retry:
46774 pipe_double_lock(ipipe, opipe);
46775
46776 do {
46777 - if (!opipe->readers) {
46778 + if (!atomic_read(&opipe->readers)) {
46779 send_sig(SIGPIPE, current, 0);
46780 if (!ret)
46781 ret = -EPIPE;
46782 break;
46783 }
46784
46785 - if (!ipipe->nrbufs && !ipipe->writers)
46786 + if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
46787 break;
46788
46789 /*
46790 @@ -1923,7 +1923,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
46791 pipe_double_lock(ipipe, opipe);
46792
46793 do {
46794 - if (!opipe->readers) {
46795 + if (!atomic_read(&opipe->readers)) {
46796 send_sig(SIGPIPE, current, 0);
46797 if (!ret)
46798 ret = -EPIPE;
46799 @@ -1968,7 +1968,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
46800 * return EAGAIN if we have the potential of some data in the
46801 * future, otherwise just return 0
46802 */
46803 - if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
46804 + if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
46805 ret = -EAGAIN;
46806
46807 pipe_unlock(ipipe);
46808 diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
46809 index 7fdf6a7..e6cd8ad 100644
46810 --- a/fs/sysfs/dir.c
46811 +++ b/fs/sysfs/dir.c
46812 @@ -642,6 +642,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
46813 struct sysfs_dirent *sd;
46814 int rc;
46815
46816 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
46817 + const char *parent_name = parent_sd->s_name;
46818 +
46819 + mode = S_IFDIR | S_IRWXU;
46820 +
46821 + if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
46822 + (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
46823 + (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse"))) ||
46824 + (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
46825 + mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
46826 +#endif
46827 +
46828 /* allocate */
46829 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
46830 if (!sd)
46831 diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
46832 index 779789a..f58193c 100644
46833 --- a/fs/sysfs/file.c
46834 +++ b/fs/sysfs/file.c
46835 @@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
46836
46837 struct sysfs_open_dirent {
46838 atomic_t refcnt;
46839 - atomic_t event;
46840 + atomic_unchecked_t event;
46841 wait_queue_head_t poll;
46842 struct list_head buffers; /* goes through sysfs_buffer.list */
46843 };
46844 @@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
46845 if (!sysfs_get_active(attr_sd))
46846 return -ENODEV;
46847
46848 - buffer->event = atomic_read(&attr_sd->s_attr.open->event);
46849 + buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
46850 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
46851
46852 sysfs_put_active(attr_sd);
46853 @@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
46854 return -ENOMEM;
46855
46856 atomic_set(&new_od->refcnt, 0);
46857 - atomic_set(&new_od->event, 1);
46858 + atomic_set_unchecked(&new_od->event, 1);
46859 init_waitqueue_head(&new_od->poll);
46860 INIT_LIST_HEAD(&new_od->buffers);
46861 goto retry;
46862 @@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
46863
46864 sysfs_put_active(attr_sd);
46865
46866 - if (buffer->event != atomic_read(&od->event))
46867 + if (buffer->event != atomic_read_unchecked(&od->event))
46868 goto trigger;
46869
46870 return DEFAULT_POLLMASK;
46871 @@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
46872
46873 od = sd->s_attr.open;
46874 if (od) {
46875 - atomic_inc(&od->event);
46876 + atomic_inc_unchecked(&od->event);
46877 wake_up_interruptible(&od->poll);
46878 }
46879
46880 diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
46881 index a7ac78f..02158e1 100644
46882 --- a/fs/sysfs/symlink.c
46883 +++ b/fs/sysfs/symlink.c
46884 @@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
46885
46886 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
46887 {
46888 - char *page = nd_get_link(nd);
46889 + const char *page = nd_get_link(nd);
46890 if (!IS_ERR(page))
46891 free_page((unsigned long)page);
46892 }
46893 diff --git a/fs/udf/misc.c b/fs/udf/misc.c
46894 index c175b4d..8f36a16 100644
46895 --- a/fs/udf/misc.c
46896 +++ b/fs/udf/misc.c
46897 @@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
46898
46899 u8 udf_tag_checksum(const struct tag *t)
46900 {
46901 - u8 *data = (u8 *)t;
46902 + const u8 *data = (const u8 *)t;
46903 u8 checksum = 0;
46904 int i;
46905 for (i = 0; i < sizeof(struct tag); ++i)
46906 diff --git a/fs/utimes.c b/fs/utimes.c
46907 index ba653f3..06ea4b1 100644
46908 --- a/fs/utimes.c
46909 +++ b/fs/utimes.c
46910 @@ -1,6 +1,7 @@
46911 #include <linux/compiler.h>
46912 #include <linux/file.h>
46913 #include <linux/fs.h>
46914 +#include <linux/security.h>
46915 #include <linux/linkage.h>
46916 #include <linux/mount.h>
46917 #include <linux/namei.h>
46918 @@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
46919 goto mnt_drop_write_and_out;
46920 }
46921 }
46922 +
46923 + if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
46924 + error = -EACCES;
46925 + goto mnt_drop_write_and_out;
46926 + }
46927 +
46928 mutex_lock(&inode->i_mutex);
46929 error = notify_change(path->dentry, &newattrs);
46930 mutex_unlock(&inode->i_mutex);
46931 diff --git a/fs/xattr.c b/fs/xattr.c
46932 index 67583de..c5aad14 100644
46933 --- a/fs/xattr.c
46934 +++ b/fs/xattr.c
46935 @@ -315,7 +315,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
46936 * Extended attribute SET operations
46937 */
46938 static long
46939 -setxattr(struct dentry *d, const char __user *name, const void __user *value,
46940 +setxattr(struct path *path, const char __user *name, const void __user *value,
46941 size_t size, int flags)
46942 {
46943 int error;
46944 @@ -339,7 +339,13 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
46945 return PTR_ERR(kvalue);
46946 }
46947
46948 - error = vfs_setxattr(d, kname, kvalue, size, flags);
46949 + if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
46950 + error = -EACCES;
46951 + goto out;
46952 + }
46953 +
46954 + error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
46955 +out:
46956 kfree(kvalue);
46957 return error;
46958 }
46959 @@ -356,7 +362,7 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
46960 return error;
46961 error = mnt_want_write(path.mnt);
46962 if (!error) {
46963 - error = setxattr(path.dentry, name, value, size, flags);
46964 + error = setxattr(&path, name, value, size, flags);
46965 mnt_drop_write(path.mnt);
46966 }
46967 path_put(&path);
46968 @@ -375,7 +381,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
46969 return error;
46970 error = mnt_want_write(path.mnt);
46971 if (!error) {
46972 - error = setxattr(path.dentry, name, value, size, flags);
46973 + error = setxattr(&path, name, value, size, flags);
46974 mnt_drop_write(path.mnt);
46975 }
46976 path_put(&path);
46977 @@ -386,17 +392,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
46978 const void __user *,value, size_t, size, int, flags)
46979 {
46980 struct file *f;
46981 - struct dentry *dentry;
46982 int error = -EBADF;
46983
46984 f = fget(fd);
46985 if (!f)
46986 return error;
46987 - dentry = f->f_path.dentry;
46988 - audit_inode(NULL, dentry);
46989 + audit_inode(NULL, f->f_path.dentry);
46990 error = mnt_want_write_file(f);
46991 if (!error) {
46992 - error = setxattr(dentry, name, value, size, flags);
46993 + error = setxattr(&f->f_path, name, value, size, flags);
46994 mnt_drop_write(f->f_path.mnt);
46995 }
46996 fput(f);
46997 diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
46998 index 8d5a506..7f62712 100644
46999 --- a/fs/xattr_acl.c
47000 +++ b/fs/xattr_acl.c
47001 @@ -17,8 +17,8 @@
47002 struct posix_acl *
47003 posix_acl_from_xattr(const void *value, size_t size)
47004 {
47005 - posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
47006 - posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
47007 + const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
47008 + const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
47009 int count;
47010 struct posix_acl *acl;
47011 struct posix_acl_entry *acl_e;
47012 diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
47013 index d0ab788..827999b 100644
47014 --- a/fs/xfs/xfs_bmap.c
47015 +++ b/fs/xfs/xfs_bmap.c
47016 @@ -190,7 +190,7 @@ xfs_bmap_validate_ret(
47017 int nmap,
47018 int ret_nmap);
47019 #else
47020 -#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
47021 +#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
47022 #endif /* DEBUG */
47023
47024 STATIC int
47025 diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
47026 index 79d05e8..e3e5861 100644
47027 --- a/fs/xfs/xfs_dir2_sf.c
47028 +++ b/fs/xfs/xfs_dir2_sf.c
47029 @@ -852,7 +852,15 @@ xfs_dir2_sf_getdents(
47030 }
47031
47032 ino = xfs_dir2_sfe_get_ino(sfp, sfep);
47033 - if (filldir(dirent, (char *)sfep->name, sfep->namelen,
47034 + if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
47035 + char name[sfep->namelen];
47036 + memcpy(name, sfep->name, sfep->namelen);
47037 + if (filldir(dirent, name, sfep->namelen,
47038 + off & 0x7fffffff, ino, DT_UNKNOWN)) {
47039 + *offset = off & 0x7fffffff;
47040 + return 0;
47041 + }
47042 + } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
47043 off & 0x7fffffff, ino, DT_UNKNOWN)) {
47044 *offset = off & 0x7fffffff;
47045 return 0;
47046 diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
47047 index d99a905..9f88202 100644
47048 --- a/fs/xfs/xfs_ioctl.c
47049 +++ b/fs/xfs/xfs_ioctl.c
47050 @@ -128,7 +128,7 @@ xfs_find_handle(
47051 }
47052
47053 error = -EFAULT;
47054 - if (copy_to_user(hreq->ohandle, &handle, hsize) ||
47055 + if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
47056 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
47057 goto out_put;
47058
47059 diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
47060 index 23ce927..e274cc1 100644
47061 --- a/fs/xfs/xfs_iops.c
47062 +++ b/fs/xfs/xfs_iops.c
47063 @@ -447,7 +447,7 @@ xfs_vn_put_link(
47064 struct nameidata *nd,
47065 void *p)
47066 {
47067 - char *s = nd_get_link(nd);
47068 + const char *s = nd_get_link(nd);
47069
47070 if (!IS_ERR(s))
47071 kfree(s);
47072 diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
47073 new file mode 100644
47074 index 0000000..41df561
47075 --- /dev/null
47076 +++ b/grsecurity/Kconfig
47077 @@ -0,0 +1,1075 @@
47078 +#
47079 +# grecurity configuration
47080 +#
47081 +
47082 +menu "Grsecurity"
47083 +
47084 +config GRKERNSEC
47085 + bool "Grsecurity"
47086 + select CRYPTO
47087 + select CRYPTO_SHA256
47088 + help
47089 + If you say Y here, you will be able to configure many features
47090 + that will enhance the security of your system. It is highly
47091 + recommended that you say Y here and read through the help
47092 + for each option so that you fully understand the features and
47093 + can evaluate their usefulness for your machine.
47094 +
47095 +choice
47096 + prompt "Security Level"
47097 + depends on GRKERNSEC
47098 + default GRKERNSEC_CUSTOM
47099 +
47100 +config GRKERNSEC_LOW
47101 + bool "Low"
47102 + select GRKERNSEC_LINK
47103 + select GRKERNSEC_FIFO
47104 + select GRKERNSEC_RANDNET
47105 + select GRKERNSEC_DMESG
47106 + select GRKERNSEC_CHROOT
47107 + select GRKERNSEC_CHROOT_CHDIR
47108 +
47109 + help
47110 + If you choose this option, several of the grsecurity options will
47111 + be enabled that will give you greater protection against a number
47112 + of attacks, while assuring that none of your software will have any
47113 + conflicts with the additional security measures. If you run a lot
47114 + of unusual software, or you are having problems with the higher
47115 + security levels, you should say Y here. With this option, the
47116 + following features are enabled:
47117 +
47118 + - Linking restrictions
47119 + - FIFO restrictions
47120 + - Restricted dmesg
47121 + - Enforced chdir("/") on chroot
47122 + - Runtime module disabling
47123 +
47124 +config GRKERNSEC_MEDIUM
47125 + bool "Medium"
47126 + select PAX
47127 + select PAX_EI_PAX
47128 + select PAX_PT_PAX_FLAGS
47129 + select PAX_HAVE_ACL_FLAGS
47130 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
47131 + select GRKERNSEC_CHROOT
47132 + select GRKERNSEC_CHROOT_SYSCTL
47133 + select GRKERNSEC_LINK
47134 + select GRKERNSEC_FIFO
47135 + select GRKERNSEC_DMESG
47136 + select GRKERNSEC_RANDNET
47137 + select GRKERNSEC_FORKFAIL
47138 + select GRKERNSEC_TIME
47139 + select GRKERNSEC_SIGNAL
47140 + select GRKERNSEC_CHROOT
47141 + select GRKERNSEC_CHROOT_UNIX
47142 + select GRKERNSEC_CHROOT_MOUNT
47143 + select GRKERNSEC_CHROOT_PIVOT
47144 + select GRKERNSEC_CHROOT_DOUBLE
47145 + select GRKERNSEC_CHROOT_CHDIR
47146 + select GRKERNSEC_CHROOT_MKNOD
47147 + select GRKERNSEC_PROC
47148 + select GRKERNSEC_PROC_USERGROUP
47149 + select PAX_RANDUSTACK
47150 + select PAX_ASLR
47151 + select PAX_RANDMMAP
47152 + select PAX_REFCOUNT if (X86 || SPARC64)
47153 + select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
47154 +
47155 + help
47156 + If you say Y here, several features in addition to those included
47157 + in the low additional security level will be enabled. These
47158 + features provide even more security to your system, though in rare
47159 + cases they may be incompatible with very old or poorly written
47160 + software. If you enable this option, make sure that your auth
47161 + service (identd) is running as gid 1001. With this option,
47162 + the following features (in addition to those provided in the
47163 + low additional security level) will be enabled:
47164 +
47165 + - Failed fork logging
47166 + - Time change logging
47167 + - Signal logging
47168 + - Deny mounts in chroot
47169 + - Deny double chrooting
47170 + - Deny sysctl writes in chroot
47171 + - Deny mknod in chroot
47172 + - Deny access to abstract AF_UNIX sockets out of chroot
47173 + - Deny pivot_root in chroot
47174 + - Denied reads/writes of /dev/kmem, /dev/mem, and /dev/port
47175 + - /proc restrictions with special GID set to 10 (usually wheel)
47176 + - Address Space Layout Randomization (ASLR)
47177 + - Prevent exploitation of most refcount overflows
47178 + - Bounds checking of copying between the kernel and userland
47179 +
47180 +config GRKERNSEC_HIGH
47181 + bool "High"
47182 + select GRKERNSEC_LINK
47183 + select GRKERNSEC_FIFO
47184 + select GRKERNSEC_DMESG
47185 + select GRKERNSEC_FORKFAIL
47186 + select GRKERNSEC_TIME
47187 + select GRKERNSEC_SIGNAL
47188 + select GRKERNSEC_CHROOT
47189 + select GRKERNSEC_CHROOT_SHMAT
47190 + select GRKERNSEC_CHROOT_UNIX
47191 + select GRKERNSEC_CHROOT_MOUNT
47192 + select GRKERNSEC_CHROOT_FCHDIR
47193 + select GRKERNSEC_CHROOT_PIVOT
47194 + select GRKERNSEC_CHROOT_DOUBLE
47195 + select GRKERNSEC_CHROOT_CHDIR
47196 + select GRKERNSEC_CHROOT_MKNOD
47197 + select GRKERNSEC_CHROOT_CAPS
47198 + select GRKERNSEC_CHROOT_SYSCTL
47199 + select GRKERNSEC_CHROOT_FINDTASK
47200 + select GRKERNSEC_SYSFS_RESTRICT
47201 + select GRKERNSEC_PROC
47202 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
47203 + select GRKERNSEC_HIDESYM
47204 + select GRKERNSEC_BRUTE
47205 + select GRKERNSEC_PROC_USERGROUP
47206 + select GRKERNSEC_KMEM
47207 + select GRKERNSEC_RESLOG
47208 + select GRKERNSEC_RANDNET
47209 + select GRKERNSEC_PROC_ADD
47210 + select GRKERNSEC_CHROOT_CHMOD
47211 + select GRKERNSEC_CHROOT_NICE
47212 + select GRKERNSEC_SETXID
47213 + select GRKERNSEC_AUDIT_MOUNT
47214 + select GRKERNSEC_MODHARDEN if (MODULES)
47215 + select GRKERNSEC_HARDEN_PTRACE
47216 + select GRKERNSEC_PTRACE_READEXEC
47217 + select GRKERNSEC_VM86 if (X86_32)
47218 + select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
47219 + select PAX
47220 + select PAX_RANDUSTACK
47221 + select PAX_ASLR
47222 + select PAX_RANDMMAP
47223 + select PAX_NOEXEC
47224 + select PAX_MPROTECT
47225 + select PAX_EI_PAX
47226 + select PAX_PT_PAX_FLAGS
47227 + select PAX_HAVE_ACL_FLAGS
47228 + select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
47229 + select PAX_MEMORY_UDEREF if (X86 && !XEN)
47230 + select PAX_RANDKSTACK if (X86_TSC && X86)
47231 + select PAX_SEGMEXEC if (X86_32)
47232 + select PAX_PAGEEXEC
47233 + select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
47234 + select PAX_EMUTRAMP if (PARISC)
47235 + select PAX_EMUSIGRT if (PARISC)
47236 + select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
47237 + select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
47238 + select PAX_REFCOUNT if (X86 || SPARC64)
47239 + select PAX_USERCOPY if ((X86 || PPC || SPARC || ARM) && (SLAB || SLUB || SLOB))
47240 + help
47241 + If you say Y here, many of the features of grsecurity will be
47242 + enabled, which will protect you against many kinds of attacks
47243 + against your system. The heightened security comes at a cost
47244 + of an increased chance of incompatibilities with rare software
47245 + on your machine. Since this security level enables PaX, you should
47246 + view <http://pax.grsecurity.net> and read about the PaX
47247 + project. While you are there, download chpax and run it on
47248 + binaries that cause problems with PaX. Also remember that
47249 + since the /proc restrictions are enabled, you must run your
47250 + identd as gid 1001. This security level enables the following
47251 + features in addition to those listed in the low and medium
47252 + security levels:
47253 +
47254 + - Additional /proc restrictions
47255 + - Chmod restrictions in chroot
47256 + - No signals, ptrace, or viewing of processes outside of chroot
47257 + - Capability restrictions in chroot
47258 + - Deny fchdir out of chroot
47259 + - Priority restrictions in chroot
47260 + - Segmentation-based implementation of PaX
47261 + - Mprotect restrictions
47262 + - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
47263 + - Kernel stack randomization
47264 + - Mount/unmount/remount logging
47265 + - Kernel symbol hiding
47266 + - Hardening of module auto-loading
47267 + - Ptrace restrictions
47268 + - Restricted vm86 mode
47269 + - Restricted sysfs/debugfs
47270 + - Active kernel exploit response
47271 +
47272 +config GRKERNSEC_CUSTOM
47273 + bool "Custom"
47274 + help
47275 + If you say Y here, you will be able to configure every grsecurity
47276 + option, which allows you to enable many more features that aren't
47277 + covered in the basic security levels. These additional features
47278 + include TPE, socket restrictions, and the sysctl system for
47279 + grsecurity. It is advised that you read through the help for
47280 + each option to determine its usefulness in your situation.
47281 +
47282 +endchoice
47283 +
47284 +menu "Memory Protections"
47285 +depends on GRKERNSEC
47286 +
47287 +config GRKERNSEC_KMEM
47288 + bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
47289 + select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
47290 + help
47291 + If you say Y here, /dev/kmem and /dev/mem won't be allowed to
47292 + be written to or read from to modify or leak the contents of the running
47293 + kernel. /dev/port will also not be allowed to be opened. If you have module
47294 + support disabled, enabling this will close up four ways that are
47295 + currently used to insert malicious code into the running kernel.
47296 + Even with all these features enabled, we still highly recommend that
47297 + you use the RBAC system, as it is still possible for an attacker to
47298 + modify the running kernel through privileged I/O granted by ioperm/iopl.
47299 + If you are not using XFree86, you may be able to stop this additional
47300 + case by enabling the 'Disable privileged I/O' option. Though nothing
47301 + legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
47302 + but only to video memory, which is the only writing we allow in this
47303 + case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
47304 + not be allowed to mprotect it with PROT_WRITE later.
47305 + It is highly recommended that you say Y here if you meet all the
47306 + conditions above.
47307 +
47308 +config GRKERNSEC_VM86
47309 + bool "Restrict VM86 mode"
47310 + depends on X86_32
47311 +
47312 + help
47313 + If you say Y here, only processes with CAP_SYS_RAWIO will be able to
47314 + make use of a special execution mode on 32bit x86 processors called
47315 + Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
47316 + video cards and will still work with this option enabled. The purpose
47317 + of the option is to prevent exploitation of emulation errors in
47318 + virtualization of vm86 mode like the one discovered in VMWare in 2009.
47319 + Nearly all users should be able to enable this option.
47320 +
47321 +config GRKERNSEC_IO
47322 + bool "Disable privileged I/O"
47323 + depends on X86
47324 + select RTC_CLASS
47325 + select RTC_INTF_DEV
47326 + select RTC_DRV_CMOS
47327 +
47328 + help
47329 + If you say Y here, all ioperm and iopl calls will return an error.
47330 + Ioperm and iopl can be used to modify the running kernel.
47331 + Unfortunately, some programs need this access to operate properly,
47332 + the most notable of which are XFree86 and hwclock. hwclock can be
47333 + remedied by having RTC support in the kernel, so real-time
47334 + clock support is enabled if this option is enabled, to ensure
47335 + that hwclock operates correctly. XFree86 still will not
47336 + operate correctly with this option enabled, so DO NOT CHOOSE Y
47337 + IF YOU USE XFree86. If you use XFree86 and you still want to
47338 + protect your kernel against modification, use the RBAC system.
47339 +
47340 +config GRKERNSEC_PROC_MEMMAP
47341 + bool "Harden ASLR against information leaks and entropy reduction"
47342 + default y if (PAX_NOEXEC || PAX_ASLR)
47343 + depends on PAX_NOEXEC || PAX_ASLR
47344 + help
47345 + If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
47346 + give no information about the addresses of its mappings if
47347 + PaX features that rely on random addresses are enabled on the task.
47348 + In addition to sanitizing this information and disabling other
47349 + dangerous sources of information, this option causes reads of sensitive
47350 + /proc/<pid> entries where the file descriptor was opened in a different
47351 + task than the one performing the read. Such attempts are logged.
47352 + Finally, this option limits argv/env strings for suid/sgid binaries
47353 + to 1MB to prevent a complete exhaustion of the stack entropy provided
47354 + by ASLR.
47355 + If you use PaX it is essential that you say Y here as it closes up
47356 + several holes that make full ASLR useless for suid/sgid binaries.
47357 +
47358 +config GRKERNSEC_BRUTE
47359 + bool "Deter exploit bruteforcing"
47360 + help
47361 + If you say Y here, attempts to bruteforce exploits against forking
47362 + daemons such as apache or sshd, as well as against suid/sgid binaries
47363 + will be deterred. When a child of a forking daemon is killed by PaX
47364 + or crashes due to an illegal instruction or other suspicious signal,
47365 + the parent process will be delayed 30 seconds upon every subsequent
47366 + fork until the administrator is able to assess the situation and
47367 + restart the daemon.
47368 + In the suid/sgid case, the attempt is logged, the user has all their
47369 + processes terminated, and they are prevented from executing any further
47370 + processes for 15 minutes.
47371 + It is recommended that you also enable signal logging in the auditing
47372 + section so that logs are generated when a process triggers a suspicious
47373 + signal.
47374 + If the sysctl option is enabled, a sysctl option with name
47375 + "deter_bruteforce" is created.
47376 +
47377 +
47378 +config GRKERNSEC_MODHARDEN
47379 + bool "Harden module auto-loading"
47380 + depends on MODULES
47381 + help
47382 + If you say Y here, module auto-loading in response to use of some
47383 + feature implemented by an unloaded module will be restricted to
47384 + root users. Enabling this option helps defend against attacks
47385 + by unprivileged users who abuse the auto-loading behavior to
47386 + cause a vulnerable module to load that is then exploited.
47387 +
47388 + If this option prevents a legitimate use of auto-loading for a
47389 + non-root user, the administrator can execute modprobe manually
47390 + with the exact name of the module mentioned in the alert log.
47391 + Alternatively, the administrator can add the module to the list
47392 + of modules loaded at boot by modifying init scripts.
47393 +
47394 + Modification of init scripts will most likely be needed on
47395 + Ubuntu servers with encrypted home directory support enabled,
47396 + as the first non-root user logging in will cause the ecb(aes),
47397 + ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
47398 +
47399 +config GRKERNSEC_HIDESYM
47400 + bool "Hide kernel symbols"
47401 + help
47402 + If you say Y here, getting information on loaded modules, and
47403 + displaying all kernel symbols through a syscall will be restricted
47404 + to users with CAP_SYS_MODULE. For software compatibility reasons,
47405 + /proc/kallsyms will be restricted to the root user. The RBAC
47406 + system can hide that entry even from root.
47407 +
47408 + This option also prevents leaking of kernel addresses through
47409 + several /proc entries.
47410 +
47411 + Note that this option is only effective provided the following
47412 + conditions are met:
47413 + 1) The kernel using grsecurity is not precompiled by some distribution
47414 + 2) You have also enabled GRKERNSEC_DMESG
47415 + 3) You are using the RBAC system and hiding other files such as your
47416 + kernel image and System.map. Alternatively, enabling this option
47417 + causes the permissions on /boot, /lib/modules, and the kernel
47418 + source directory to change at compile time to prevent
47419 + reading by non-root users.
47420 + If the above conditions are met, this option will aid in providing a
47421 + useful protection against local kernel exploitation of overflows
47422 + and arbitrary read/write vulnerabilities.
47423 +
47424 +config GRKERNSEC_KERN_LOCKOUT
47425 + bool "Active kernel exploit response"
47426 + depends on X86 || ARM || PPC || SPARC
47427 + help
47428 + If you say Y here, when a PaX alert is triggered due to suspicious
47429 + activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
47430 + or an OOPs occurs due to bad memory accesses, instead of just
47431 + terminating the offending process (and potentially allowing
47432 + a subsequent exploit from the same user), we will take one of two
47433 + actions:
47434 + If the user was root, we will panic the system
47435 + If the user was non-root, we will log the attempt, terminate
47436 + all processes owned by the user, then prevent them from creating
47437 + any new processes until the system is restarted
47438 + This deters repeated kernel exploitation/bruteforcing attempts
47439 + and is useful for later forensics.
47440 +
47441 +endmenu
47442 +menu "Role Based Access Control Options"
47443 +depends on GRKERNSEC
47444 +
47445 +config GRKERNSEC_RBAC_DEBUG
47446 + bool
47447 +
47448 +config GRKERNSEC_NO_RBAC
47449 + bool "Disable RBAC system"
47450 + help
47451 + If you say Y here, the /dev/grsec device will be removed from the kernel,
47452 + preventing the RBAC system from being enabled. You should only say Y
47453 + here if you have no intention of using the RBAC system, so as to prevent
47454 + an attacker with root access from misusing the RBAC system to hide files
47455 + and processes when loadable module support and /dev/[k]mem have been
47456 + locked down.
47457 +
47458 +config GRKERNSEC_ACL_HIDEKERN
47459 + bool "Hide kernel processes"
47460 + help
47461 + If you say Y here, all kernel threads will be hidden to all
47462 + processes but those whose subject has the "view hidden processes"
47463 + flag.
47464 +
47465 +config GRKERNSEC_ACL_MAXTRIES
47466 + int "Maximum tries before password lockout"
47467 + default 3
47468 + help
47469 + This option enforces the maximum number of times a user can attempt
47470 + to authorize themselves with the grsecurity RBAC system before being
47471 + denied the ability to attempt authorization again for a specified time.
47472 + The lower the number, the harder it will be to brute-force a password.
47473 +
47474 +config GRKERNSEC_ACL_TIMEOUT
47475 + int "Time to wait after max password tries, in seconds"
47476 + default 30
47477 + help
47478 + This option specifies the time the user must wait after attempting to
47479 + authorize to the RBAC system with the maximum number of invalid
47480 + passwords. The higher the number, the harder it will be to brute-force
47481 + a password.
47482 +
47483 +endmenu
47484 +menu "Filesystem Protections"
47485 +depends on GRKERNSEC
47486 +
47487 +config GRKERNSEC_PROC
47488 + bool "Proc restrictions"
47489 + help
47490 + If you say Y here, the permissions of the /proc filesystem
47491 + will be altered to enhance system security and privacy. You MUST
47492 + choose either a user only restriction or a user and group restriction.
47493 + Depending upon the option you choose, you can either restrict users to
47494 + see only the processes they themselves run, or choose a group that can
47495 + view all processes and files normally restricted to root if you choose
47496 + the "restrict to user only" option. NOTE: If you're running identd as
47497 + a non-root user, you will have to run it as the group you specify here.
47498 +
47499 +config GRKERNSEC_PROC_USER
47500 + bool "Restrict /proc to user only"
47501 + depends on GRKERNSEC_PROC
47502 + help
47503 + If you say Y here, non-root users will only be able to view their own
47504 + processes, and restricts them from viewing network-related information,
47505 + and viewing kernel symbol and module information.
47506 +
47507 +config GRKERNSEC_PROC_USERGROUP
47508 + bool "Allow special group"
47509 + depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
47510 + help
47511 + If you say Y here, you will be able to select a group that will be
47512 + able to view all processes and network-related information. If you've
47513 + enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
47514 + remain hidden. This option is useful if you want to run identd as
47515 + a non-root user.
47516 +
47517 +config GRKERNSEC_PROC_GID
47518 + int "GID for special group"
47519 + depends on GRKERNSEC_PROC_USERGROUP
47520 + default 1001
47521 +
47522 +config GRKERNSEC_PROC_ADD
47523 + bool "Additional restrictions"
47524 + depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
47525 + help
47526 + If you say Y here, additional restrictions will be placed on
47527 + /proc that keep normal users from viewing device information and
47528 + slabinfo information that could be useful for exploits.
47529 +
47530 +config GRKERNSEC_LINK
47531 + bool "Linking restrictions"
47532 + help
47533 + If you say Y here, /tmp race exploits will be prevented, since users
47534 + will no longer be able to follow symlinks owned by other users in
47535 + world-writable +t directories (e.g. /tmp), unless the owner of the
47536 + symlink is the owner of the directory. users will also not be
47537 + able to hardlink to files they do not own. If the sysctl option is
47538 + enabled, a sysctl option with name "linking_restrictions" is created.
47539 +
47540 +config GRKERNSEC_FIFO
47541 + bool "FIFO restrictions"
47542 + help
47543 + If you say Y here, users will not be able to write to FIFOs they don't
47544 + own in world-writable +t directories (e.g. /tmp), unless the owner of
47545 + the FIFO is the same owner of the directory it's held in. If the sysctl
47546 + option is enabled, a sysctl option with name "fifo_restrictions" is
47547 + created.
47548 +
47549 +config GRKERNSEC_SYSFS_RESTRICT
47550 + bool "Sysfs/debugfs restriction"
47551 + depends on SYSFS
47552 + help
47553 + If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
47554 + any filesystem normally mounted under it (e.g. debugfs) will be
47555 + mostly accessible only by root. These filesystems generally provide access
47556 + to hardware and debug information that isn't appropriate for unprivileged
47557 + users of the system. Sysfs and debugfs have also become a large source
47558 + of new vulnerabilities, ranging from infoleaks to local compromise.
47559 + There has been very little oversight with an eye toward security involved
47560 + in adding new exporters of information to these filesystems, so their
47561 + use is discouraged.
47562 + For reasons of compatibility, a few directories have been whitelisted
47563 + for access by non-root users:
47564 + /sys/fs/selinux
47565 + /sys/fs/fuse
47566 + /sys/devices/system/cpu
47567 +
47568 +config GRKERNSEC_ROFS
47569 + bool "Runtime read-only mount protection"
47570 + help
47571 + If you say Y here, a sysctl option with name "romount_protect" will
47572 + be created. By setting this option to 1 at runtime, filesystems
47573 + will be protected in the following ways:
47574 + * No new writable mounts will be allowed
47575 + * Existing read-only mounts won't be able to be remounted read/write
47576 + * Write operations will be denied on all block devices
47577 + This option acts independently of grsec_lock: once it is set to 1,
47578 + it cannot be turned off. Therefore, please be mindful of the resulting
47579 + behavior if this option is enabled in an init script on a read-only
47580 + filesystem. This feature is mainly intended for secure embedded systems.
47581 +
47582 +config GRKERNSEC_CHROOT
47583 + bool "Chroot jail restrictions"
47584 + help
47585 + If you say Y here, you will be able to choose several options that will
47586 + make breaking out of a chrooted jail much more difficult. If you
47587 + encounter no software incompatibilities with the following options, it
47588 + is recommended that you enable each one.
47589 +
47590 +config GRKERNSEC_CHROOT_MOUNT
47591 + bool "Deny mounts"
47592 + depends on GRKERNSEC_CHROOT
47593 + help
47594 + If you say Y here, processes inside a chroot will not be able to
47595 + mount or remount filesystems. If the sysctl option is enabled, a
47596 + sysctl option with name "chroot_deny_mount" is created.
47597 +
47598 +config GRKERNSEC_CHROOT_DOUBLE
47599 + bool "Deny double-chroots"
47600 + depends on GRKERNSEC_CHROOT
47601 + help
47602 + If you say Y here, processes inside a chroot will not be able to chroot
47603 + again outside the chroot. This is a widely used method of breaking
47604 + out of a chroot jail and should not be allowed. If the sysctl
47605 + option is enabled, a sysctl option with name
47606 + "chroot_deny_chroot" is created.
47607 +
47608 +config GRKERNSEC_CHROOT_PIVOT
47609 + bool "Deny pivot_root in chroot"
47610 + depends on GRKERNSEC_CHROOT
47611 + help
47612 + If you say Y here, processes inside a chroot will not be able to use
47613 + a function called pivot_root() that was introduced in Linux 2.3.41. It
47614 + works similar to chroot in that it changes the root filesystem. This
47615 + function could be misused in a chrooted process to attempt to break out
47616 + of the chroot, and therefore should not be allowed. If the sysctl
47617 + option is enabled, a sysctl option with name "chroot_deny_pivot" is
47618 + created.
47619 +
47620 +config GRKERNSEC_CHROOT_CHDIR
47621 + bool "Enforce chdir(\"/\") on all chroots"
47622 + depends on GRKERNSEC_CHROOT
47623 + help
47624 + If you say Y here, the current working directory of all newly-chrooted
47625 + applications will be set to the the root directory of the chroot.
47626 + The man page on chroot(2) states:
47627 + Note that this call does not change the current working
47628 + directory, so that `.' can be outside the tree rooted at
47629 + `/'. In particular, the super-user can escape from a
47630 + `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
47631 +
47632 + It is recommended that you say Y here, since it's not known to break
47633 + any software. If the sysctl option is enabled, a sysctl option with
47634 + name "chroot_enforce_chdir" is created.
47635 +
47636 +config GRKERNSEC_CHROOT_CHMOD
47637 + bool "Deny (f)chmod +s"
47638 + depends on GRKERNSEC_CHROOT
47639 + help
47640 + If you say Y here, processes inside a chroot will not be able to chmod
47641 + or fchmod files to make them have suid or sgid bits. This protects
47642 + against another published method of breaking a chroot. If the sysctl
47643 + option is enabled, a sysctl option with name "chroot_deny_chmod" is
47644 + created.
47645 +
47646 +config GRKERNSEC_CHROOT_FCHDIR
47647 + bool "Deny fchdir out of chroot"
47648 + depends on GRKERNSEC_CHROOT
47649 + help
47650 + If you say Y here, a well-known method of breaking chroots by fchdir'ing
47651 + to a file descriptor of the chrooting process that points to a directory
47652 + outside the filesystem will be stopped. If the sysctl option
47653 + is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
47654 +
47655 +config GRKERNSEC_CHROOT_MKNOD
47656 + bool "Deny mknod"
47657 + depends on GRKERNSEC_CHROOT
47658 + help
47659 + If you say Y here, processes inside a chroot will not be allowed to
47660 + mknod. The problem with using mknod inside a chroot is that it
47661 + would allow an attacker to create a device entry that is the same
47662 + as one on the physical root of your system, which could range from
47663 + anything from the console device to a device for your harddrive (which
47664 + they could then use to wipe the drive or steal data). It is recommended
47665 + that you say Y here, unless you run into software incompatibilities.
47666 + If the sysctl option is enabled, a sysctl option with name
47667 + "chroot_deny_mknod" is created.
47668 +
47669 +config GRKERNSEC_CHROOT_SHMAT
47670 + bool "Deny shmat() out of chroot"
47671 + depends on GRKERNSEC_CHROOT
47672 + help
47673 + If you say Y here, processes inside a chroot will not be able to attach
47674 + to shared memory segments that were created outside of the chroot jail.
47675 + It is recommended that you say Y here. If the sysctl option is enabled,
47676 + a sysctl option with name "chroot_deny_shmat" is created.
47677 +
47678 +config GRKERNSEC_CHROOT_UNIX
47679 + bool "Deny access to abstract AF_UNIX sockets out of chroot"
47680 + depends on GRKERNSEC_CHROOT
47681 + help
47682 + If you say Y here, processes inside a chroot will not be able to
47683 + connect to abstract (meaning not belonging to a filesystem) Unix
47684 + domain sockets that were bound outside of a chroot. It is recommended
47685 + that you say Y here. If the sysctl option is enabled, a sysctl option
47686 + with name "chroot_deny_unix" is created.
47687 +
47688 +config GRKERNSEC_CHROOT_FINDTASK
47689 + bool "Protect outside processes"
47690 + depends on GRKERNSEC_CHROOT
47691 + help
47692 + If you say Y here, processes inside a chroot will not be able to
47693 + kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
47694 + getsid, or view any process outside of the chroot. If the sysctl
47695 + option is enabled, a sysctl option with name "chroot_findtask" is
47696 + created.
47697 +
47698 +config GRKERNSEC_CHROOT_NICE
47699 + bool "Restrict priority changes"
47700 + depends on GRKERNSEC_CHROOT
47701 + help
47702 + If you say Y here, processes inside a chroot will not be able to raise
47703 + the priority of processes in the chroot, or alter the priority of
47704 + processes outside the chroot. This provides more security than simply
47705 + removing CAP_SYS_NICE from the process' capability set. If the
47706 + sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
47707 + is created.
47708 +
47709 +config GRKERNSEC_CHROOT_SYSCTL
47710 + bool "Deny sysctl writes"
47711 + depends on GRKERNSEC_CHROOT
47712 + help
47713 + If you say Y here, an attacker in a chroot will not be able to
47714 + write to sysctl entries, either by sysctl(2) or through a /proc
47715 + interface. It is strongly recommended that you say Y here. If the
47716 + sysctl option is enabled, a sysctl option with name
47717 + "chroot_deny_sysctl" is created.
47718 +
47719 +config GRKERNSEC_CHROOT_CAPS
47720 + bool "Capability restrictions"
47721 + depends on GRKERNSEC_CHROOT
47722 + help
47723 + If you say Y here, the capabilities on all processes within a
47724 + chroot jail will be lowered to stop module insertion, raw i/o,
47725 + system and net admin tasks, rebooting the system, modifying immutable
47726 + files, modifying IPC owned by another, and changing the system time.
47727 + This is left an option because it can break some apps. Disable this
47728 + if your chrooted apps are having problems performing those kinds of
47729 + tasks. If the sysctl option is enabled, a sysctl option with
47730 + name "chroot_caps" is created.
47731 +
47732 +endmenu
47733 +menu "Kernel Auditing"
47734 +depends on GRKERNSEC
47735 +
47736 +config GRKERNSEC_AUDIT_GROUP
47737 + bool "Single group for auditing"
47738 + help
47739 + If you say Y here, the exec, chdir, and (un)mount logging features
47740 + will only operate on a group you specify. This option is recommended
47741 + if you only want to watch certain users instead of having a large
47742 + amount of logs from the entire system. If the sysctl option is enabled,
47743 + a sysctl option with name "audit_group" is created.
47744 +
47745 +config GRKERNSEC_AUDIT_GID
47746 + int "GID for auditing"
47747 + depends on GRKERNSEC_AUDIT_GROUP
47748 + default 1007
47749 +
47750 +config GRKERNSEC_EXECLOG
47751 + bool "Exec logging"
47752 + help
47753 + If you say Y here, all execve() calls will be logged (since the
47754 + other exec*() calls are frontends to execve(), all execution
47755 + will be logged). Useful for shell-servers that like to keep track
47756 + of their users. If the sysctl option is enabled, a sysctl option with
47757 + name "exec_logging" is created.
47758 + WARNING: This option when enabled will produce a LOT of logs, especially
47759 + on an active system.
47760 +
47761 +config GRKERNSEC_RESLOG
47762 + bool "Resource logging"
47763 + help
47764 + If you say Y here, all attempts to overstep resource limits will
47765 + be logged with the resource name, the requested size, and the current
47766 + limit. It is highly recommended that you say Y here. If the sysctl
47767 + option is enabled, a sysctl option with name "resource_logging" is
47768 + created. If the RBAC system is enabled, the sysctl value is ignored.
47769 +
47770 +config GRKERNSEC_CHROOT_EXECLOG
47771 + bool "Log execs within chroot"
47772 + help
47773 + If you say Y here, all executions inside a chroot jail will be logged
47774 + to syslog. This can cause a large amount of logs if certain
47775 + applications (eg. djb's daemontools) are installed on the system, and
47776 + is therefore left as an option. If the sysctl option is enabled, a
47777 + sysctl option with name "chroot_execlog" is created.
47778 +
47779 +config GRKERNSEC_AUDIT_PTRACE
47780 + bool "Ptrace logging"
47781 + help
47782 + If you say Y here, all attempts to attach to a process via ptrace
47783 + will be logged. If the sysctl option is enabled, a sysctl option
47784 + with name "audit_ptrace" is created.
47785 +
47786 +config GRKERNSEC_AUDIT_CHDIR
47787 + bool "Chdir logging"
47788 + help
47789 + If you say Y here, all chdir() calls will be logged. If the sysctl
47790 + option is enabled, a sysctl option with name "audit_chdir" is created.
47791 +
47792 +config GRKERNSEC_AUDIT_MOUNT
47793 + bool "(Un)Mount logging"
47794 + help
47795 + If you say Y here, all mounts and unmounts will be logged. If the
47796 + sysctl option is enabled, a sysctl option with name "audit_mount" is
47797 + created.
47798 +
47799 +config GRKERNSEC_SIGNAL
47800 + bool "Signal logging"
47801 + help
47802 + If you say Y here, certain important signals will be logged, such as
47803 + SIGSEGV, which will as a result inform you of when a error in a program
47804 + occurred, which in some cases could mean a possible exploit attempt.
47805 + If the sysctl option is enabled, a sysctl option with name
47806 + "signal_logging" is created.
47807 +
47808 +config GRKERNSEC_FORKFAIL
47809 + bool "Fork failure logging"
47810 + help
47811 + If you say Y here, all failed fork() attempts will be logged.
47812 + This could suggest a fork bomb, or someone attempting to overstep
47813 + their process limit. If the sysctl option is enabled, a sysctl option
47814 + with name "forkfail_logging" is created.
47815 +
47816 +config GRKERNSEC_TIME
47817 + bool "Time change logging"
47818 + help
47819 + If you say Y here, any changes of the system clock will be logged.
47820 + If the sysctl option is enabled, a sysctl option with name
47821 + "timechange_logging" is created.
47822 +
47823 +config GRKERNSEC_PROC_IPADDR
47824 + bool "/proc/<pid>/ipaddr support"
47825 + help
47826 + If you say Y here, a new entry will be added to each /proc/<pid>
47827 + directory that contains the IP address of the person using the task.
47828 + The IP is carried across local TCP and AF_UNIX stream sockets.
47829 + This information can be useful for IDS/IPSes to perform remote response
47830 + to a local attack. The entry is readable by only the owner of the
47831 + process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
47832 + the RBAC system), and thus does not create privacy concerns.
47833 +
47834 +config GRKERNSEC_RWXMAP_LOG
47835 + bool 'Denied RWX mmap/mprotect logging'
47836 + depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
47837 + help
47838 + If you say Y here, calls to mmap() and mprotect() with explicit
47839 + usage of PROT_WRITE and PROT_EXEC together will be logged when
47840 + denied by the PAX_MPROTECT feature. If the sysctl option is
47841 + enabled, a sysctl option with name "rwxmap_logging" is created.
47842 +
47843 +config GRKERNSEC_AUDIT_TEXTREL
47844 + bool 'ELF text relocations logging (READ HELP)'
47845 + depends on PAX_MPROTECT
47846 + help
47847 + If you say Y here, text relocations will be logged with the filename
47848 + of the offending library or binary. The purpose of the feature is
47849 + to help Linux distribution developers get rid of libraries and
47850 + binaries that need text relocations which hinder the future progress
47851 + of PaX. Only Linux distribution developers should say Y here, and
47852 + never on a production machine, as this option creates an information
47853 + leak that could aid an attacker in defeating the randomization of
47854 + a single memory region. If the sysctl option is enabled, a sysctl
47855 + option with name "audit_textrel" is created.
47856 +
47857 +endmenu
47858 +
47859 +menu "Executable Protections"
47860 +depends on GRKERNSEC
47861 +
47862 +config GRKERNSEC_DMESG
47863 + bool "Dmesg(8) restriction"
47864 + help
47865 + If you say Y here, non-root users will not be able to use dmesg(8)
47866 + to view up to the last 4kb of messages in the kernel's log buffer.
47867 + The kernel's log buffer often contains kernel addresses and other
47868 + identifying information useful to an attacker in fingerprinting a
47869 + system for a targeted exploit.
47870 + If the sysctl option is enabled, a sysctl option with name "dmesg" is
47871 + created.
47872 +
47873 +config GRKERNSEC_HARDEN_PTRACE
47874 + bool "Deter ptrace-based process snooping"
47875 + help
47876 + If you say Y here, TTY sniffers and other malicious monitoring
47877 + programs implemented through ptrace will be defeated. If you
47878 + have been using the RBAC system, this option has already been
47879 + enabled for several years for all users, with the ability to make
47880 + fine-grained exceptions.
47881 +
47882 + This option only affects the ability of non-root users to ptrace
47883 + processes that are not a descendent of the ptracing process.
47884 + This means that strace ./binary and gdb ./binary will still work,
47885 + but attaching to arbitrary processes will not. If the sysctl
47886 + option is enabled, a sysctl option with name "harden_ptrace" is
47887 + created.
47888 +
47889 +config GRKERNSEC_PTRACE_READEXEC
47890 + bool "Require read access to ptrace sensitive binaries"
47891 + help
47892 + If you say Y here, unprivileged users will not be able to ptrace unreadable
47893 + binaries. This option is useful in environments that
47894 + remove the read bits (e.g. file mode 4711) from suid binaries to
47895 + prevent infoleaking of their contents. This option adds
47896 + consistency to the use of that file mode, as the binary could normally
47897 + be read out when run without privileges while ptracing.
47898 +
47899 + If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
47900 + is created.
47901 +
47902 +config GRKERNSEC_SETXID
47903 + bool "Enforce consistent multithreaded privileges"
47904 + help
47905 + If you say Y here, a change from a root uid to a non-root uid
47906 + in a multithreaded application will cause the resulting uids,
47907 + gids, supplementary groups, and capabilities in that thread
47908 + to be propagated to the other threads of the process. In most
47909 + cases this is unnecessary, as glibc will emulate this behavior
47910 + on behalf of the application. Other libcs do not act in the
47911 + same way, allowing the other threads of the process to continue
47912 + running with root privileges. If the sysctl option is enabled,
47913 + a sysctl option with name "consistent_setxid" is created.
47914 +
47915 +config GRKERNSEC_TPE
47916 + bool "Trusted Path Execution (TPE)"
47917 + help
47918 + If you say Y here, you will be able to choose a gid to add to the
47919 + supplementary groups of users you want to mark as "untrusted."
47920 + These users will not be able to execute any files that are not in
47921 + root-owned directories writable only by root. If the sysctl option
47922 + is enabled, a sysctl option with name "tpe" is created.
47923 +
47924 +config GRKERNSEC_TPE_ALL
47925 + bool "Partially restrict all non-root users"
47926 + depends on GRKERNSEC_TPE
47927 + help
47928 + If you say Y here, all non-root users will be covered under
47929 + a weaker TPE restriction. This is separate from, and in addition to,
47930 + the main TPE options that you have selected elsewhere. Thus, if a
47931 + "trusted" GID is chosen, this restriction applies to even that GID.
47932 + Under this restriction, all non-root users will only be allowed to
47933 + execute files in directories they own that are not group or
47934 + world-writable, or in directories owned by root and writable only by
47935 + root. If the sysctl option is enabled, a sysctl option with name
47936 + "tpe_restrict_all" is created.
47937 +
47938 +config GRKERNSEC_TPE_INVERT
47939 + bool "Invert GID option"
47940 + depends on GRKERNSEC_TPE
47941 + help
47942 + If you say Y here, the group you specify in the TPE configuration will
47943 + decide what group TPE restrictions will be *disabled* for. This
47944 + option is useful if you want TPE restrictions to be applied to most
47945 + users on the system. If the sysctl option is enabled, a sysctl option
47946 + with name "tpe_invert" is created. Unlike other sysctl options, this
47947 + entry will default to on for backward-compatibility.
47948 +
47949 +config GRKERNSEC_TPE_GID
47950 + int "GID for untrusted users"
47951 + depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
47952 + default 1005
47953 + help
47954 + Setting this GID determines what group TPE restrictions will be
47955 + *enabled* for. If the sysctl option is enabled, a sysctl option
47956 + with name "tpe_gid" is created.
47957 +
47958 +config GRKERNSEC_TPE_GID
47959 + int "GID for trusted users"
47960 + depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
47961 + default 1005
47962 + help
47963 + Setting this GID determines what group TPE restrictions will be
47964 + *disabled* for. If the sysctl option is enabled, a sysctl option
47965 + with name "tpe_gid" is created.
47966 +
47967 +endmenu
47968 +menu "Network Protections"
47969 +depends on GRKERNSEC
47970 +
47971 +config GRKERNSEC_RANDNET
47972 + bool "Larger entropy pools"
47973 + help
47974 + If you say Y here, the entropy pools used for many features of Linux
47975 + and grsecurity will be doubled in size. Since several grsecurity
47976 + features use additional randomness, it is recommended that you say Y
47977 + here. Saying Y here has a similar effect as modifying
47978 + /proc/sys/kernel/random/poolsize.
47979 +
47980 +config GRKERNSEC_BLACKHOLE
47981 + bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
47982 + depends on NET
47983 + help
47984 + If you say Y here, neither TCP resets nor ICMP
47985 + destination-unreachable packets will be sent in response to packets
47986 + sent to ports for which no associated listening process exists.
47987 + This feature supports both IPV4 and IPV6 and exempts the
47988 + loopback interface from blackholing. Enabling this feature
47989 + makes a host more resilient to DoS attacks and reduces network
47990 + visibility against scanners.
47991 +
47992 + The blackhole feature as-implemented is equivalent to the FreeBSD
47993 + blackhole feature, as it prevents RST responses to all packets, not
47994 + just SYNs. Under most application behavior this causes no
47995 + problems, but applications (like haproxy) may not close certain
47996 + connections in a way that cleanly terminates them on the remote
47997 + end, leaving the remote host in LAST_ACK state. Because of this
47998 + side-effect and to prevent intentional LAST_ACK DoSes, this
47999 + feature also adds automatic mitigation against such attacks.
48000 + The mitigation drastically reduces the amount of time a socket
48001 + can spend in LAST_ACK state. If you're using haproxy and not
48002 + all servers it connects to have this option enabled, consider
48003 + disabling this feature on the haproxy host.
48004 +
48005 + If the sysctl option is enabled, two sysctl options with names
48006 + "ip_blackhole" and "lastack_retries" will be created.
48007 + While "ip_blackhole" takes the standard zero/non-zero on/off
48008 + toggle, "lastack_retries" uses the same kinds of values as
48009 + "tcp_retries1" and "tcp_retries2". The default value of 4
48010 + prevents a socket from lasting more than 45 seconds in LAST_ACK
48011 + state.
48012 +
48013 +config GRKERNSEC_SOCKET
48014 + bool "Socket restrictions"
48015 + depends on NET
48016 + help
48017 + If you say Y here, you will be able to choose from several options.
48018 + If you assign a GID on your system and add it to the supplementary
48019 + groups of users you want to restrict socket access to, this patch
48020 + will perform up to three things, based on the option(s) you choose.
48021 +
48022 +config GRKERNSEC_SOCKET_ALL
48023 + bool "Deny any sockets to group"
48024 + depends on GRKERNSEC_SOCKET
48025 + help
48026 + If you say Y here, you will be able to choose a GID of whose users will
48027 + be unable to connect to other hosts from your machine or run server
48028 + applications from your machine. If the sysctl option is enabled, a
48029 + sysctl option with name "socket_all" is created.
48030 +
48031 +config GRKERNSEC_SOCKET_ALL_GID
48032 + int "GID to deny all sockets for"
48033 + depends on GRKERNSEC_SOCKET_ALL
48034 + default 1004
48035 + help
48036 + Here you can choose the GID to disable socket access for. Remember to
48037 + add the users you want socket access disabled for to the GID
48038 + specified here. If the sysctl option is enabled, a sysctl option
48039 + with name "socket_all_gid" is created.
48040 +
48041 +config GRKERNSEC_SOCKET_CLIENT
48042 + bool "Deny client sockets to group"
48043 + depends on GRKERNSEC_SOCKET
48044 + help
48045 + If you say Y here, you will be able to choose a GID of whose users will
48046 + be unable to connect to other hosts from your machine, but will be
48047 + able to run servers. If this option is enabled, all users in the group
48048 + you specify will have to use passive mode when initiating ftp transfers
48049 + from the shell on your machine. If the sysctl option is enabled, a
48050 + sysctl option with name "socket_client" is created.
48051 +
48052 +config GRKERNSEC_SOCKET_CLIENT_GID
48053 + int "GID to deny client sockets for"
48054 + depends on GRKERNSEC_SOCKET_CLIENT
48055 + default 1003
48056 + help
48057 + Here you can choose the GID to disable client socket access for.
48058 + Remember to add the users you want client socket access disabled for to
48059 + the GID specified here. If the sysctl option is enabled, a sysctl
48060 + option with name "socket_client_gid" is created.
48061 +
48062 +config GRKERNSEC_SOCKET_SERVER
48063 + bool "Deny server sockets to group"
48064 + depends on GRKERNSEC_SOCKET
48065 + help
48066 + If you say Y here, you will be able to choose a GID of whose users will
48067 + be unable to run server applications from your machine. If the sysctl
48068 + option is enabled, a sysctl option with name "socket_server" is created.
48069 +
48070 +config GRKERNSEC_SOCKET_SERVER_GID
48071 + int "GID to deny server sockets for"
48072 + depends on GRKERNSEC_SOCKET_SERVER
48073 + default 1002
48074 + help
48075 + Here you can choose the GID to disable server socket access for.
48076 + Remember to add the users you want server socket access disabled for to
48077 + the GID specified here. If the sysctl option is enabled, a sysctl
48078 + option with name "socket_server_gid" is created.
48079 +
48080 +endmenu
48081 +menu "Sysctl support"
48082 +depends on GRKERNSEC && SYSCTL
48083 +
48084 +config GRKERNSEC_SYSCTL
48085 + bool "Sysctl support"
48086 + help
48087 + If you say Y here, you will be able to change the options that
48088 + grsecurity runs with at bootup, without having to recompile your
48089 + kernel. You can echo values to files in /proc/sys/kernel/grsecurity
48090 + to enable (1) or disable (0) various features. All the sysctl entries
48091 + are mutable until the "grsec_lock" entry is set to a non-zero value.
48092 + All features enabled in the kernel configuration are disabled at boot
48093 + if you do not say Y to the "Turn on features by default" option.
48094 + All options should be set at startup, and the grsec_lock entry should
48095 + be set to a non-zero value after all the options are set.
48096 + *THIS IS EXTREMELY IMPORTANT*
48097 +
48098 +config GRKERNSEC_SYSCTL_DISTRO
48099 + bool "Extra sysctl support for distro makers (READ HELP)"
48100 + depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
48101 + help
48102 + If you say Y here, additional sysctl options will be created
48103 + for features that affect processes running as root. Therefore,
48104 + it is critical when using this option that the grsec_lock entry be
48105 + enabled after boot. Only distros with prebuilt kernel packages
48106 + with this option enabled that can ensure grsec_lock is enabled
48107 + after boot should use this option.
48108 + *Failure to set grsec_lock after boot makes all grsec features
48109 + this option covers useless*
48110 +
48111 + Currently this option creates the following sysctl entries:
48112 + "Disable Privileged I/O": "disable_priv_io"
48113 +
48114 +config GRKERNSEC_SYSCTL_ON
48115 + bool "Turn on features by default"
48116 + depends on GRKERNSEC_SYSCTL
48117 + help
48118 + If you say Y here, instead of having all features enabled in the
48119 + kernel configuration disabled at boot time, the features will be
48120 + enabled at boot time. It is recommended you say Y here unless
48121 + there is some reason you would want all sysctl-tunable features to
48122 + be disabled by default. As mentioned elsewhere, it is important
48123 + to enable the grsec_lock entry once you have finished modifying
48124 + the sysctl entries.
48125 +
48126 +endmenu
48127 +menu "Logging Options"
48128 +depends on GRKERNSEC
48129 +
48130 +config GRKERNSEC_FLOODTIME
48131 + int "Seconds in between log messages (minimum)"
48132 + default 10
48133 + help
48134 + This option allows you to enforce the number of seconds between
48135 + grsecurity log messages. The default should be suitable for most
48136 + people, however, if you choose to change it, choose a value small enough
48137 + to allow informative logs to be produced, but large enough to
48138 + prevent flooding.
48139 +
48140 +config GRKERNSEC_FLOODBURST
48141 + int "Number of messages in a burst (maximum)"
48142 + default 6
48143 + help
48144 + This option allows you to choose the maximum number of messages allowed
48145 + within the flood time interval you chose in a separate option. The
48146 + default should be suitable for most people, however if you find that
48147 + many of your logs are being interpreted as flooding, you may want to
48148 + raise this value.
48149 +
48150 +endmenu
48151 +
48152 +endmenu
48153 diff --git a/grsecurity/Makefile b/grsecurity/Makefile
48154 new file mode 100644
48155 index 0000000..1b9afa9
48156 --- /dev/null
48157 +++ b/grsecurity/Makefile
48158 @@ -0,0 +1,38 @@
48159 +# grsecurity's ACL system was originally written in 2001 by Michael Dalton
48160 +# during 2001-2009 it has been completely redesigned by Brad Spengler
48161 +# into an RBAC system
48162 +#
48163 +# All code in this directory and various hooks inserted throughout the kernel
48164 +# are copyright Brad Spengler - Open Source Security, Inc., and released
48165 +# under the GPL v2 or higher
48166 +
48167 +KBUILD_CFLAGS += -Werror
48168 +
48169 +obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
48170 + grsec_mount.o grsec_sig.o grsec_sysctl.o \
48171 + grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
48172 +
48173 +obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
48174 + gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
48175 + gracl_learn.o grsec_log.o
48176 +obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
48177 +
48178 +ifdef CONFIG_NET
48179 +obj-y += grsec_sock.o
48180 +obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
48181 +endif
48182 +
48183 +ifndef CONFIG_GRKERNSEC
48184 +obj-y += grsec_disabled.o
48185 +endif
48186 +
48187 +ifdef CONFIG_GRKERNSEC_HIDESYM
48188 +extra-y := grsec_hidesym.o
48189 +$(obj)/grsec_hidesym.o:
48190 + @-chmod -f 500 /boot
48191 + @-chmod -f 500 /lib/modules
48192 + @-chmod -f 500 /lib64/modules
48193 + @-chmod -f 500 /lib32/modules
48194 + @-chmod -f 700 .
48195 + @echo ' grsec: protected kernel image paths'
48196 +endif
48197 diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
48198 new file mode 100644
48199 index 0000000..7715893
48200 --- /dev/null
48201 +++ b/grsecurity/gracl.c
48202 @@ -0,0 +1,4164 @@
48203 +#include <linux/kernel.h>
48204 +#include <linux/module.h>
48205 +#include <linux/sched.h>
48206 +#include <linux/mm.h>
48207 +#include <linux/file.h>
48208 +#include <linux/fs.h>
48209 +#include <linux/namei.h>
48210 +#include <linux/mount.h>
48211 +#include <linux/tty.h>
48212 +#include <linux/proc_fs.h>
48213 +#include <linux/lglock.h>
48214 +#include <linux/slab.h>
48215 +#include <linux/vmalloc.h>
48216 +#include <linux/types.h>
48217 +#include <linux/sysctl.h>
48218 +#include <linux/netdevice.h>
48219 +#include <linux/ptrace.h>
48220 +#include <linux/gracl.h>
48221 +#include <linux/gralloc.h>
48222 +#include <linux/security.h>
48223 +#include <linux/grinternal.h>
48224 +#include <linux/pid_namespace.h>
48225 +#include <linux/fdtable.h>
48226 +#include <linux/percpu.h>
48227 +
48228 +#include <asm/uaccess.h>
48229 +#include <asm/errno.h>
48230 +#include <asm/mman.h>
48231 +
48232 +static struct acl_role_db acl_role_set;
48233 +static struct name_db name_set;
48234 +static struct inodev_db inodev_set;
48235 +
48236 +/* for keeping track of userspace pointers used for subjects, so we
48237 + can share references in the kernel as well
48238 +*/
48239 +
48240 +static struct path real_root;
48241 +
48242 +static struct acl_subj_map_db subj_map_set;
48243 +
48244 +static struct acl_role_label *default_role;
48245 +
48246 +static struct acl_role_label *role_list;
48247 +
48248 +static u16 acl_sp_role_value;
48249 +
48250 +extern char *gr_shared_page[4];
48251 +static DEFINE_MUTEX(gr_dev_mutex);
48252 +DEFINE_RWLOCK(gr_inode_lock);
48253 +
48254 +struct gr_arg *gr_usermode;
48255 +
48256 +static unsigned int gr_status __read_only = GR_STATUS_INIT;
48257 +
48258 +extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
48259 +extern void gr_clear_learn_entries(void);
48260 +
48261 +#ifdef CONFIG_GRKERNSEC_RESLOG
48262 +extern void gr_log_resource(const struct task_struct *task,
48263 + const int res, const unsigned long wanted, const int gt);
48264 +#endif
48265 +
48266 +unsigned char *gr_system_salt;
48267 +unsigned char *gr_system_sum;
48268 +
48269 +static struct sprole_pw **acl_special_roles = NULL;
48270 +static __u16 num_sprole_pws = 0;
48271 +
48272 +static struct acl_role_label *kernel_role = NULL;
48273 +
48274 +static unsigned int gr_auth_attempts = 0;
48275 +static unsigned long gr_auth_expires = 0UL;
48276 +
48277 +#ifdef CONFIG_NET
48278 +extern struct vfsmount *sock_mnt;
48279 +#endif
48280 +
48281 +extern struct vfsmount *pipe_mnt;
48282 +extern struct vfsmount *shm_mnt;
48283 +#ifdef CONFIG_HUGETLBFS
48284 +extern struct vfsmount *hugetlbfs_vfsmount;
48285 +#endif
48286 +
48287 +static struct acl_object_label *fakefs_obj_rw;
48288 +static struct acl_object_label *fakefs_obj_rwx;
48289 +
48290 +extern int gr_init_uidset(void);
48291 +extern void gr_free_uidset(void);
48292 +extern void gr_remove_uid(uid_t uid);
48293 +extern int gr_find_uid(uid_t uid);
48294 +
48295 +DECLARE_BRLOCK(vfsmount_lock);
48296 +
48297 +__inline__ int
48298 +gr_acl_is_enabled(void)
48299 +{
48300 + return (gr_status & GR_READY);
48301 +}
48302 +
48303 +#ifdef CONFIG_BTRFS_FS
48304 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
48305 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
48306 +#endif
48307 +
48308 +static inline dev_t __get_dev(const struct dentry *dentry)
48309 +{
48310 +#ifdef CONFIG_BTRFS_FS
48311 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
48312 + return get_btrfs_dev_from_inode(dentry->d_inode);
48313 + else
48314 +#endif
48315 + return dentry->d_inode->i_sb->s_dev;
48316 +}
48317 +
48318 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
48319 +{
48320 + return __get_dev(dentry);
48321 +}
48322 +
48323 +static char gr_task_roletype_to_char(struct task_struct *task)
48324 +{
48325 + switch (task->role->roletype &
48326 + (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
48327 + GR_ROLE_SPECIAL)) {
48328 + case GR_ROLE_DEFAULT:
48329 + return 'D';
48330 + case GR_ROLE_USER:
48331 + return 'U';
48332 + case GR_ROLE_GROUP:
48333 + return 'G';
48334 + case GR_ROLE_SPECIAL:
48335 + return 'S';
48336 + }
48337 +
48338 + return 'X';
48339 +}
48340 +
48341 +char gr_roletype_to_char(void)
48342 +{
48343 + return gr_task_roletype_to_char(current);
48344 +}
48345 +
48346 +__inline__ int
48347 +gr_acl_tpe_check(void)
48348 +{
48349 + if (unlikely(!(gr_status & GR_READY)))
48350 + return 0;
48351 + if (current->role->roletype & GR_ROLE_TPE)
48352 + return 1;
48353 + else
48354 + return 0;
48355 +}
48356 +
48357 +int
48358 +gr_handle_rawio(const struct inode *inode)
48359 +{
48360 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
48361 + if (inode && S_ISBLK(inode->i_mode) &&
48362 + grsec_enable_chroot_caps && proc_is_chrooted(current) &&
48363 + !capable(CAP_SYS_RAWIO))
48364 + return 1;
48365 +#endif
48366 + return 0;
48367 +}
48368 +
48369 +static int
48370 +gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
48371 +{
48372 + if (likely(lena != lenb))
48373 + return 0;
48374 +
48375 + return !memcmp(a, b, lena);
48376 +}
48377 +
48378 +static int prepend(char **buffer, int *buflen, const char *str, int namelen)
48379 +{
48380 + *buflen -= namelen;
48381 + if (*buflen < 0)
48382 + return -ENAMETOOLONG;
48383 + *buffer -= namelen;
48384 + memcpy(*buffer, str, namelen);
48385 + return 0;
48386 +}
48387 +
48388 +static int prepend_name(char **buffer, int *buflen, struct qstr *name)
48389 +{
48390 + return prepend(buffer, buflen, name->name, name->len);
48391 +}
48392 +
48393 +static int prepend_path(const struct path *path, struct path *root,
48394 + char **buffer, int *buflen)
48395 +{
48396 + struct dentry *dentry = path->dentry;
48397 + struct vfsmount *vfsmnt = path->mnt;
48398 + bool slash = false;
48399 + int error = 0;
48400 +
48401 + while (dentry != root->dentry || vfsmnt != root->mnt) {
48402 + struct dentry * parent;
48403 +
48404 + if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
48405 + /* Global root? */
48406 + if (vfsmnt->mnt_parent == vfsmnt) {
48407 + goto out;
48408 + }
48409 + dentry = vfsmnt->mnt_mountpoint;
48410 + vfsmnt = vfsmnt->mnt_parent;
48411 + continue;
48412 + }
48413 + parent = dentry->d_parent;
48414 + prefetch(parent);
48415 + spin_lock(&dentry->d_lock);
48416 + error = prepend_name(buffer, buflen, &dentry->d_name);
48417 + spin_unlock(&dentry->d_lock);
48418 + if (!error)
48419 + error = prepend(buffer, buflen, "/", 1);
48420 + if (error)
48421 + break;
48422 +
48423 + slash = true;
48424 + dentry = parent;
48425 + }
48426 +
48427 +out:
48428 + if (!error && !slash)
48429 + error = prepend(buffer, buflen, "/", 1);
48430 +
48431 + return error;
48432 +}
48433 +
48434 +/* this must be called with vfsmount_lock and rename_lock held */
48435 +
48436 +static char *__our_d_path(const struct path *path, struct path *root,
48437 + char *buf, int buflen)
48438 +{
48439 + char *res = buf + buflen;
48440 + int error;
48441 +
48442 + prepend(&res, &buflen, "\0", 1);
48443 + error = prepend_path(path, root, &res, &buflen);
48444 + if (error)
48445 + return ERR_PTR(error);
48446 +
48447 + return res;
48448 +}
48449 +
48450 +static char *
48451 +gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
48452 +{
48453 + char *retval;
48454 +
48455 + retval = __our_d_path(path, root, buf, buflen);
48456 + if (unlikely(IS_ERR(retval)))
48457 + retval = strcpy(buf, "<path too long>");
48458 + else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
48459 + retval[1] = '\0';
48460 +
48461 + return retval;
48462 +}
48463 +
48464 +static char *
48465 +__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
48466 + char *buf, int buflen)
48467 +{
48468 + struct path path;
48469 + char *res;
48470 +
48471 + path.dentry = (struct dentry *)dentry;
48472 + path.mnt = (struct vfsmount *)vfsmnt;
48473 +
48474 + /* we can use real_root.dentry, real_root.mnt, because this is only called
48475 + by the RBAC system */
48476 + res = gen_full_path(&path, &real_root, buf, buflen);
48477 +
48478 + return res;
48479 +}
48480 +
48481 +static char *
48482 +d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
48483 + char *buf, int buflen)
48484 +{
48485 + char *res;
48486 + struct path path;
48487 + struct path root;
48488 + struct task_struct *reaper = &init_task;
48489 +
48490 + path.dentry = (struct dentry *)dentry;
48491 + path.mnt = (struct vfsmount *)vfsmnt;
48492 +
48493 + /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
48494 + get_fs_root(reaper->fs, &root);
48495 +
48496 + write_seqlock(&rename_lock);
48497 + br_read_lock(vfsmount_lock);
48498 + res = gen_full_path(&path, &root, buf, buflen);
48499 + br_read_unlock(vfsmount_lock);
48500 + write_sequnlock(&rename_lock);
48501 +
48502 + path_put(&root);
48503 + return res;
48504 +}
48505 +
48506 +static char *
48507 +gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
48508 +{
48509 + char *ret;
48510 + write_seqlock(&rename_lock);
48511 + br_read_lock(vfsmount_lock);
48512 + ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
48513 + PAGE_SIZE);
48514 + br_read_unlock(vfsmount_lock);
48515 + write_sequnlock(&rename_lock);
48516 + return ret;
48517 +}
48518 +
48519 +static char *
48520 +gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
48521 +{
48522 + char *ret;
48523 + char *buf;
48524 + int buflen;
48525 +
48526 + write_seqlock(&rename_lock);
48527 + br_read_lock(vfsmount_lock);
48528 + buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
48529 + ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
48530 + buflen = (int)(ret - buf);
48531 + if (buflen >= 5)
48532 + prepend(&ret, &buflen, "/proc", 5);
48533 + else
48534 + ret = strcpy(buf, "<path too long>");
48535 + br_read_unlock(vfsmount_lock);
48536 + write_sequnlock(&rename_lock);
48537 + return ret;
48538 +}
48539 +
48540 +char *
48541 +gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
48542 +{
48543 + return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
48544 + PAGE_SIZE);
48545 +}
48546 +
48547 +char *
48548 +gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
48549 +{
48550 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
48551 + PAGE_SIZE);
48552 +}
48553 +
48554 +char *
48555 +gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
48556 +{
48557 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
48558 + PAGE_SIZE);
48559 +}
48560 +
48561 +char *
48562 +gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
48563 +{
48564 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
48565 + PAGE_SIZE);
48566 +}
48567 +
48568 +char *
48569 +gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
48570 +{
48571 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
48572 + PAGE_SIZE);
48573 +}
48574 +
48575 +__inline__ __u32
48576 +to_gr_audit(const __u32 reqmode)
48577 +{
48578 + /* masks off auditable permission flags, then shifts them to create
48579 + auditing flags, and adds the special case of append auditing if
48580 + we're requesting write */
48581 + return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
48582 +}
48583 +
48584 +struct acl_subject_label *
48585 +lookup_subject_map(const struct acl_subject_label *userp)
48586 +{
48587 + unsigned int index = shash(userp, subj_map_set.s_size);
48588 + struct subject_map *match;
48589 +
48590 + match = subj_map_set.s_hash[index];
48591 +
48592 + while (match && match->user != userp)
48593 + match = match->next;
48594 +
48595 + if (match != NULL)
48596 + return match->kernel;
48597 + else
48598 + return NULL;
48599 +}
48600 +
48601 +static void
48602 +insert_subj_map_entry(struct subject_map *subjmap)
48603 +{
48604 + unsigned int index = shash(subjmap->user, subj_map_set.s_size);
48605 + struct subject_map **curr;
48606 +
48607 + subjmap->prev = NULL;
48608 +
48609 + curr = &subj_map_set.s_hash[index];
48610 + if (*curr != NULL)
48611 + (*curr)->prev = subjmap;
48612 +
48613 + subjmap->next = *curr;
48614 + *curr = subjmap;
48615 +
48616 + return;
48617 +}
48618 +
48619 +static struct acl_role_label *
48620 +lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
48621 + const gid_t gid)
48622 +{
48623 + unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
48624 + struct acl_role_label *match;
48625 + struct role_allowed_ip *ipp;
48626 + unsigned int x;
48627 + u32 curr_ip = task->signal->curr_ip;
48628 +
48629 + task->signal->saved_ip = curr_ip;
48630 +
48631 + match = acl_role_set.r_hash[index];
48632 +
48633 + while (match) {
48634 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
48635 + for (x = 0; x < match->domain_child_num; x++) {
48636 + if (match->domain_children[x] == uid)
48637 + goto found;
48638 + }
48639 + } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
48640 + break;
48641 + match = match->next;
48642 + }
48643 +found:
48644 + if (match == NULL) {
48645 + try_group:
48646 + index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
48647 + match = acl_role_set.r_hash[index];
48648 +
48649 + while (match) {
48650 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
48651 + for (x = 0; x < match->domain_child_num; x++) {
48652 + if (match->domain_children[x] == gid)
48653 + goto found2;
48654 + }
48655 + } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
48656 + break;
48657 + match = match->next;
48658 + }
48659 +found2:
48660 + if (match == NULL)
48661 + match = default_role;
48662 + if (match->allowed_ips == NULL)
48663 + return match;
48664 + else {
48665 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
48666 + if (likely
48667 + ((ntohl(curr_ip) & ipp->netmask) ==
48668 + (ntohl(ipp->addr) & ipp->netmask)))
48669 + return match;
48670 + }
48671 + match = default_role;
48672 + }
48673 + } else if (match->allowed_ips == NULL) {
48674 + return match;
48675 + } else {
48676 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
48677 + if (likely
48678 + ((ntohl(curr_ip) & ipp->netmask) ==
48679 + (ntohl(ipp->addr) & ipp->netmask)))
48680 + return match;
48681 + }
48682 + goto try_group;
48683 + }
48684 +
48685 + return match;
48686 +}
48687 +
48688 +struct acl_subject_label *
48689 +lookup_acl_subj_label(const ino_t ino, const dev_t dev,
48690 + const struct acl_role_label *role)
48691 +{
48692 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
48693 + struct acl_subject_label *match;
48694 +
48695 + match = role->subj_hash[index];
48696 +
48697 + while (match && (match->inode != ino || match->device != dev ||
48698 + (match->mode & GR_DELETED))) {
48699 + match = match->next;
48700 + }
48701 +
48702 + if (match && !(match->mode & GR_DELETED))
48703 + return match;
48704 + else
48705 + return NULL;
48706 +}
48707 +
48708 +struct acl_subject_label *
48709 +lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
48710 + const struct acl_role_label *role)
48711 +{
48712 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
48713 + struct acl_subject_label *match;
48714 +
48715 + match = role->subj_hash[index];
48716 +
48717 + while (match && (match->inode != ino || match->device != dev ||
48718 + !(match->mode & GR_DELETED))) {
48719 + match = match->next;
48720 + }
48721 +
48722 + if (match && (match->mode & GR_DELETED))
48723 + return match;
48724 + else
48725 + return NULL;
48726 +}
48727 +
48728 +static struct acl_object_label *
48729 +lookup_acl_obj_label(const ino_t ino, const dev_t dev,
48730 + const struct acl_subject_label *subj)
48731 +{
48732 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
48733 + struct acl_object_label *match;
48734 +
48735 + match = subj->obj_hash[index];
48736 +
48737 + while (match && (match->inode != ino || match->device != dev ||
48738 + (match->mode & GR_DELETED))) {
48739 + match = match->next;
48740 + }
48741 +
48742 + if (match && !(match->mode & GR_DELETED))
48743 + return match;
48744 + else
48745 + return NULL;
48746 +}
48747 +
48748 +static struct acl_object_label *
48749 +lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
48750 + const struct acl_subject_label *subj)
48751 +{
48752 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
48753 + struct acl_object_label *match;
48754 +
48755 + match = subj->obj_hash[index];
48756 +
48757 + while (match && (match->inode != ino || match->device != dev ||
48758 + !(match->mode & GR_DELETED))) {
48759 + match = match->next;
48760 + }
48761 +
48762 + if (match && (match->mode & GR_DELETED))
48763 + return match;
48764 +
48765 + match = subj->obj_hash[index];
48766 +
48767 + while (match && (match->inode != ino || match->device != dev ||
48768 + (match->mode & GR_DELETED))) {
48769 + match = match->next;
48770 + }
48771 +
48772 + if (match && !(match->mode & GR_DELETED))
48773 + return match;
48774 + else
48775 + return NULL;
48776 +}
48777 +
48778 +static struct name_entry *
48779 +lookup_name_entry(const char *name)
48780 +{
48781 + unsigned int len = strlen(name);
48782 + unsigned int key = full_name_hash(name, len);
48783 + unsigned int index = key % name_set.n_size;
48784 + struct name_entry *match;
48785 +
48786 + match = name_set.n_hash[index];
48787 +
48788 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
48789 + match = match->next;
48790 +
48791 + return match;
48792 +}
48793 +
48794 +static struct name_entry *
48795 +lookup_name_entry_create(const char *name)
48796 +{
48797 + unsigned int len = strlen(name);
48798 + unsigned int key = full_name_hash(name, len);
48799 + unsigned int index = key % name_set.n_size;
48800 + struct name_entry *match;
48801 +
48802 + match = name_set.n_hash[index];
48803 +
48804 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
48805 + !match->deleted))
48806 + match = match->next;
48807 +
48808 + if (match && match->deleted)
48809 + return match;
48810 +
48811 + match = name_set.n_hash[index];
48812 +
48813 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
48814 + match->deleted))
48815 + match = match->next;
48816 +
48817 + if (match && !match->deleted)
48818 + return match;
48819 + else
48820 + return NULL;
48821 +}
48822 +
48823 +static struct inodev_entry *
48824 +lookup_inodev_entry(const ino_t ino, const dev_t dev)
48825 +{
48826 + unsigned int index = fhash(ino, dev, inodev_set.i_size);
48827 + struct inodev_entry *match;
48828 +
48829 + match = inodev_set.i_hash[index];
48830 +
48831 + while (match && (match->nentry->inode != ino || match->nentry->device != dev))
48832 + match = match->next;
48833 +
48834 + return match;
48835 +}
48836 +
48837 +static void
48838 +insert_inodev_entry(struct inodev_entry *entry)
48839 +{
48840 + unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
48841 + inodev_set.i_size);
48842 + struct inodev_entry **curr;
48843 +
48844 + entry->prev = NULL;
48845 +
48846 + curr = &inodev_set.i_hash[index];
48847 + if (*curr != NULL)
48848 + (*curr)->prev = entry;
48849 +
48850 + entry->next = *curr;
48851 + *curr = entry;
48852 +
48853 + return;
48854 +}
48855 +
48856 +static void
48857 +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
48858 +{
48859 + unsigned int index =
48860 + rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
48861 + struct acl_role_label **curr;
48862 + struct acl_role_label *tmp;
48863 +
48864 + curr = &acl_role_set.r_hash[index];
48865 +
48866 + /* if role was already inserted due to domains and already has
48867 + a role in the same bucket as it attached, then we need to
48868 + combine these two buckets
48869 + */
48870 + if (role->next) {
48871 + tmp = role->next;
48872 + while (tmp->next)
48873 + tmp = tmp->next;
48874 + tmp->next = *curr;
48875 + } else
48876 + role->next = *curr;
48877 + *curr = role;
48878 +
48879 + return;
48880 +}
48881 +
48882 +static void
48883 +insert_acl_role_label(struct acl_role_label *role)
48884 +{
48885 + int i;
48886 +
48887 + if (role_list == NULL) {
48888 + role_list = role;
48889 + role->prev = NULL;
48890 + } else {
48891 + role->prev = role_list;
48892 + role_list = role;
48893 + }
48894 +
48895 + /* used for hash chains */
48896 + role->next = NULL;
48897 +
48898 + if (role->roletype & GR_ROLE_DOMAIN) {
48899 + for (i = 0; i < role->domain_child_num; i++)
48900 + __insert_acl_role_label(role, role->domain_children[i]);
48901 + } else
48902 + __insert_acl_role_label(role, role->uidgid);
48903 +}
48904 +
48905 +static int
48906 +insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
48907 +{
48908 + struct name_entry **curr, *nentry;
48909 + struct inodev_entry *ientry;
48910 + unsigned int len = strlen(name);
48911 + unsigned int key = full_name_hash(name, len);
48912 + unsigned int index = key % name_set.n_size;
48913 +
48914 + curr = &name_set.n_hash[index];
48915 +
48916 + while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
48917 + curr = &((*curr)->next);
48918 +
48919 + if (*curr != NULL)
48920 + return 1;
48921 +
48922 + nentry = acl_alloc(sizeof (struct name_entry));
48923 + if (nentry == NULL)
48924 + return 0;
48925 + ientry = acl_alloc(sizeof (struct inodev_entry));
48926 + if (ientry == NULL)
48927 + return 0;
48928 + ientry->nentry = nentry;
48929 +
48930 + nentry->key = key;
48931 + nentry->name = name;
48932 + nentry->inode = inode;
48933 + nentry->device = device;
48934 + nentry->len = len;
48935 + nentry->deleted = deleted;
48936 +
48937 + nentry->prev = NULL;
48938 + curr = &name_set.n_hash[index];
48939 + if (*curr != NULL)
48940 + (*curr)->prev = nentry;
48941 + nentry->next = *curr;
48942 + *curr = nentry;
48943 +
48944 + /* insert us into the table searchable by inode/dev */
48945 + insert_inodev_entry(ientry);
48946 +
48947 + return 1;
48948 +}
48949 +
48950 +static void
48951 +insert_acl_obj_label(struct acl_object_label *obj,
48952 + struct acl_subject_label *subj)
48953 +{
48954 + unsigned int index =
48955 + fhash(obj->inode, obj->device, subj->obj_hash_size);
48956 + struct acl_object_label **curr;
48957 +
48958 +
48959 + obj->prev = NULL;
48960 +
48961 + curr = &subj->obj_hash[index];
48962 + if (*curr != NULL)
48963 + (*curr)->prev = obj;
48964 +
48965 + obj->next = *curr;
48966 + *curr = obj;
48967 +
48968 + return;
48969 +}
48970 +
48971 +static void
48972 +insert_acl_subj_label(struct acl_subject_label *obj,
48973 + struct acl_role_label *role)
48974 +{
48975 + unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
48976 + struct acl_subject_label **curr;
48977 +
48978 + obj->prev = NULL;
48979 +
48980 + curr = &role->subj_hash[index];
48981 + if (*curr != NULL)
48982 + (*curr)->prev = obj;
48983 +
48984 + obj->next = *curr;
48985 + *curr = obj;
48986 +
48987 + return;
48988 +}
48989 +
48990 +/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
48991 +
48992 +static void *
48993 +create_table(__u32 * len, int elementsize)
48994 +{
48995 + unsigned int table_sizes[] = {
48996 + 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
48997 + 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
48998 + 4194301, 8388593, 16777213, 33554393, 67108859
48999 + };
49000 + void *newtable = NULL;
49001 + unsigned int pwr = 0;
49002 +
49003 + while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
49004 + table_sizes[pwr] <= *len)
49005 + pwr++;
49006 +
49007 + if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
49008 + return newtable;
49009 +
49010 + if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
49011 + newtable =
49012 + kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
49013 + else
49014 + newtable = vmalloc(table_sizes[pwr] * elementsize);
49015 +
49016 + *len = table_sizes[pwr];
49017 +
49018 + return newtable;
49019 +}
49020 +
49021 +static int
49022 +init_variables(const struct gr_arg *arg)
49023 +{
49024 + struct task_struct *reaper = &init_task;
49025 + unsigned int stacksize;
49026 +
49027 + subj_map_set.s_size = arg->role_db.num_subjects;
49028 + acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
49029 + name_set.n_size = arg->role_db.num_objects;
49030 + inodev_set.i_size = arg->role_db.num_objects;
49031 +
49032 + if (!subj_map_set.s_size || !acl_role_set.r_size ||
49033 + !name_set.n_size || !inodev_set.i_size)
49034 + return 1;
49035 +
49036 + if (!gr_init_uidset())
49037 + return 1;
49038 +
49039 + /* set up the stack that holds allocation info */
49040 +
49041 + stacksize = arg->role_db.num_pointers + 5;
49042 +
49043 + if (!acl_alloc_stack_init(stacksize))
49044 + return 1;
49045 +
49046 + /* grab reference for the real root dentry and vfsmount */
49047 + get_fs_root(reaper->fs, &real_root);
49048 +
49049 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49050 + printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
49051 +#endif
49052 +
49053 + fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
49054 + if (fakefs_obj_rw == NULL)
49055 + return 1;
49056 + fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
49057 +
49058 + fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
49059 + if (fakefs_obj_rwx == NULL)
49060 + return 1;
49061 + fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
49062 +
49063 + subj_map_set.s_hash =
49064 + (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
49065 + acl_role_set.r_hash =
49066 + (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
49067 + name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
49068 + inodev_set.i_hash =
49069 + (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
49070 +
49071 + if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
49072 + !name_set.n_hash || !inodev_set.i_hash)
49073 + return 1;
49074 +
49075 + memset(subj_map_set.s_hash, 0,
49076 + sizeof(struct subject_map *) * subj_map_set.s_size);
49077 + memset(acl_role_set.r_hash, 0,
49078 + sizeof (struct acl_role_label *) * acl_role_set.r_size);
49079 + memset(name_set.n_hash, 0,
49080 + sizeof (struct name_entry *) * name_set.n_size);
49081 + memset(inodev_set.i_hash, 0,
49082 + sizeof (struct inodev_entry *) * inodev_set.i_size);
49083 +
49084 + return 0;
49085 +}
49086 +
49087 +/* free information not needed after startup
49088 + currently contains user->kernel pointer mappings for subjects
49089 +*/
49090 +
49091 +static void
49092 +free_init_variables(void)
49093 +{
49094 + __u32 i;
49095 +
49096 + if (subj_map_set.s_hash) {
49097 + for (i = 0; i < subj_map_set.s_size; i++) {
49098 + if (subj_map_set.s_hash[i]) {
49099 + kfree(subj_map_set.s_hash[i]);
49100 + subj_map_set.s_hash[i] = NULL;
49101 + }
49102 + }
49103 +
49104 + if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
49105 + PAGE_SIZE)
49106 + kfree(subj_map_set.s_hash);
49107 + else
49108 + vfree(subj_map_set.s_hash);
49109 + }
49110 +
49111 + return;
49112 +}
49113 +
49114 +static void
49115 +free_variables(void)
49116 +{
49117 + struct acl_subject_label *s;
49118 + struct acl_role_label *r;
49119 + struct task_struct *task, *task2;
49120 + unsigned int x;
49121 +
49122 + gr_clear_learn_entries();
49123 +
49124 + read_lock(&tasklist_lock);
49125 + do_each_thread(task2, task) {
49126 + task->acl_sp_role = 0;
49127 + task->acl_role_id = 0;
49128 + task->acl = NULL;
49129 + task->role = NULL;
49130 + } while_each_thread(task2, task);
49131 + read_unlock(&tasklist_lock);
49132 +
49133 + /* release the reference to the real root dentry and vfsmount */
49134 + path_put(&real_root);
49135 +
49136 + /* free all object hash tables */
49137 +
49138 + FOR_EACH_ROLE_START(r)
49139 + if (r->subj_hash == NULL)
49140 + goto next_role;
49141 + FOR_EACH_SUBJECT_START(r, s, x)
49142 + if (s->obj_hash == NULL)
49143 + break;
49144 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
49145 + kfree(s->obj_hash);
49146 + else
49147 + vfree(s->obj_hash);
49148 + FOR_EACH_SUBJECT_END(s, x)
49149 + FOR_EACH_NESTED_SUBJECT_START(r, s)
49150 + if (s->obj_hash == NULL)
49151 + break;
49152 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
49153 + kfree(s->obj_hash);
49154 + else
49155 + vfree(s->obj_hash);
49156 + FOR_EACH_NESTED_SUBJECT_END(s)
49157 + if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
49158 + kfree(r->subj_hash);
49159 + else
49160 + vfree(r->subj_hash);
49161 + r->subj_hash = NULL;
49162 +next_role:
49163 + FOR_EACH_ROLE_END(r)
49164 +
49165 + acl_free_all();
49166 +
49167 + if (acl_role_set.r_hash) {
49168 + if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
49169 + PAGE_SIZE)
49170 + kfree(acl_role_set.r_hash);
49171 + else
49172 + vfree(acl_role_set.r_hash);
49173 + }
49174 + if (name_set.n_hash) {
49175 + if ((name_set.n_size * sizeof (struct name_entry *)) <=
49176 + PAGE_SIZE)
49177 + kfree(name_set.n_hash);
49178 + else
49179 + vfree(name_set.n_hash);
49180 + }
49181 +
49182 + if (inodev_set.i_hash) {
49183 + if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
49184 + PAGE_SIZE)
49185 + kfree(inodev_set.i_hash);
49186 + else
49187 + vfree(inodev_set.i_hash);
49188 + }
49189 +
49190 + gr_free_uidset();
49191 +
49192 + memset(&name_set, 0, sizeof (struct name_db));
49193 + memset(&inodev_set, 0, sizeof (struct inodev_db));
49194 + memset(&acl_role_set, 0, sizeof (struct acl_role_db));
49195 + memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
49196 +
49197 + default_role = NULL;
49198 + role_list = NULL;
49199 +
49200 + return;
49201 +}
49202 +
49203 +static __u32
49204 +count_user_objs(struct acl_object_label *userp)
49205 +{
49206 + struct acl_object_label o_tmp;
49207 + __u32 num = 0;
49208 +
49209 + while (userp) {
49210 + if (copy_from_user(&o_tmp, userp,
49211 + sizeof (struct acl_object_label)))
49212 + break;
49213 +
49214 + userp = o_tmp.prev;
49215 + num++;
49216 + }
49217 +
49218 + return num;
49219 +}
49220 +
49221 +static struct acl_subject_label *
49222 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
49223 +
49224 +static int
49225 +copy_user_glob(struct acl_object_label *obj)
49226 +{
49227 + struct acl_object_label *g_tmp, **guser;
49228 + unsigned int len;
49229 + char *tmp;
49230 +
49231 + if (obj->globbed == NULL)
49232 + return 0;
49233 +
49234 + guser = &obj->globbed;
49235 + while (*guser) {
49236 + g_tmp = (struct acl_object_label *)
49237 + acl_alloc(sizeof (struct acl_object_label));
49238 + if (g_tmp == NULL)
49239 + return -ENOMEM;
49240 +
49241 + if (copy_from_user(g_tmp, *guser,
49242 + sizeof (struct acl_object_label)))
49243 + return -EFAULT;
49244 +
49245 + len = strnlen_user(g_tmp->filename, PATH_MAX);
49246 +
49247 + if (!len || len >= PATH_MAX)
49248 + return -EINVAL;
49249 +
49250 + if ((tmp = (char *) acl_alloc(len)) == NULL)
49251 + return -ENOMEM;
49252 +
49253 + if (copy_from_user(tmp, g_tmp->filename, len))
49254 + return -EFAULT;
49255 + tmp[len-1] = '\0';
49256 + g_tmp->filename = tmp;
49257 +
49258 + *guser = g_tmp;
49259 + guser = &(g_tmp->next);
49260 + }
49261 +
49262 + return 0;
49263 +}
49264 +
49265 +static int
49266 +copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
49267 + struct acl_role_label *role)
49268 +{
49269 + struct acl_object_label *o_tmp;
49270 + unsigned int len;
49271 + int ret;
49272 + char *tmp;
49273 +
49274 + while (userp) {
49275 + if ((o_tmp = (struct acl_object_label *)
49276 + acl_alloc(sizeof (struct acl_object_label))) == NULL)
49277 + return -ENOMEM;
49278 +
49279 + if (copy_from_user(o_tmp, userp,
49280 + sizeof (struct acl_object_label)))
49281 + return -EFAULT;
49282 +
49283 + userp = o_tmp->prev;
49284 +
49285 + len = strnlen_user(o_tmp->filename, PATH_MAX);
49286 +
49287 + if (!len || len >= PATH_MAX)
49288 + return -EINVAL;
49289 +
49290 + if ((tmp = (char *) acl_alloc(len)) == NULL)
49291 + return -ENOMEM;
49292 +
49293 + if (copy_from_user(tmp, o_tmp->filename, len))
49294 + return -EFAULT;
49295 + tmp[len-1] = '\0';
49296 + o_tmp->filename = tmp;
49297 +
49298 + insert_acl_obj_label(o_tmp, subj);
49299 + if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
49300 + o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
49301 + return -ENOMEM;
49302 +
49303 + ret = copy_user_glob(o_tmp);
49304 + if (ret)
49305 + return ret;
49306 +
49307 + if (o_tmp->nested) {
49308 + o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
49309 + if (IS_ERR(o_tmp->nested))
49310 + return PTR_ERR(o_tmp->nested);
49311 +
49312 + /* insert into nested subject list */
49313 + o_tmp->nested->next = role->hash->first;
49314 + role->hash->first = o_tmp->nested;
49315 + }
49316 + }
49317 +
49318 + return 0;
49319 +}
49320 +
49321 +static __u32
49322 +count_user_subjs(struct acl_subject_label *userp)
49323 +{
49324 + struct acl_subject_label s_tmp;
49325 + __u32 num = 0;
49326 +
49327 + while (userp) {
49328 + if (copy_from_user(&s_tmp, userp,
49329 + sizeof (struct acl_subject_label)))
49330 + break;
49331 +
49332 + userp = s_tmp.prev;
49333 + /* do not count nested subjects against this count, since
49334 + they are not included in the hash table, but are
49335 + attached to objects. We have already counted
49336 + the subjects in userspace for the allocation
49337 + stack
49338 + */
49339 + if (!(s_tmp.mode & GR_NESTED))
49340 + num++;
49341 + }
49342 +
49343 + return num;
49344 +}
49345 +
49346 +static int
49347 +copy_user_allowedips(struct acl_role_label *rolep)
49348 +{
49349 + struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
49350 +
49351 + ruserip = rolep->allowed_ips;
49352 +
49353 + while (ruserip) {
49354 + rlast = rtmp;
49355 +
49356 + if ((rtmp = (struct role_allowed_ip *)
49357 + acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
49358 + return -ENOMEM;
49359 +
49360 + if (copy_from_user(rtmp, ruserip,
49361 + sizeof (struct role_allowed_ip)))
49362 + return -EFAULT;
49363 +
49364 + ruserip = rtmp->prev;
49365 +
49366 + if (!rlast) {
49367 + rtmp->prev = NULL;
49368 + rolep->allowed_ips = rtmp;
49369 + } else {
49370 + rlast->next = rtmp;
49371 + rtmp->prev = rlast;
49372 + }
49373 +
49374 + if (!ruserip)
49375 + rtmp->next = NULL;
49376 + }
49377 +
49378 + return 0;
49379 +}
49380 +
49381 +static int
49382 +copy_user_transitions(struct acl_role_label *rolep)
49383 +{
49384 + struct role_transition *rusertp, *rtmp = NULL, *rlast;
49385 +
49386 + unsigned int len;
49387 + char *tmp;
49388 +
49389 + rusertp = rolep->transitions;
49390 +
49391 + while (rusertp) {
49392 + rlast = rtmp;
49393 +
49394 + if ((rtmp = (struct role_transition *)
49395 + acl_alloc(sizeof (struct role_transition))) == NULL)
49396 + return -ENOMEM;
49397 +
49398 + if (copy_from_user(rtmp, rusertp,
49399 + sizeof (struct role_transition)))
49400 + return -EFAULT;
49401 +
49402 + rusertp = rtmp->prev;
49403 +
49404 + len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
49405 +
49406 + if (!len || len >= GR_SPROLE_LEN)
49407 + return -EINVAL;
49408 +
49409 + if ((tmp = (char *) acl_alloc(len)) == NULL)
49410 + return -ENOMEM;
49411 +
49412 + if (copy_from_user(tmp, rtmp->rolename, len))
49413 + return -EFAULT;
49414 + tmp[len-1] = '\0';
49415 + rtmp->rolename = tmp;
49416 +
49417 + if (!rlast) {
49418 + rtmp->prev = NULL;
49419 + rolep->transitions = rtmp;
49420 + } else {
49421 + rlast->next = rtmp;
49422 + rtmp->prev = rlast;
49423 + }
49424 +
49425 + if (!rusertp)
49426 + rtmp->next = NULL;
49427 + }
49428 +
49429 + return 0;
49430 +}
49431 +
49432 +static struct acl_subject_label *
49433 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
49434 +{
49435 + struct acl_subject_label *s_tmp = NULL, *s_tmp2;
49436 + unsigned int len;
49437 + char *tmp;
49438 + __u32 num_objs;
49439 + struct acl_ip_label **i_tmp, *i_utmp2;
49440 + struct gr_hash_struct ghash;
49441 + struct subject_map *subjmap;
49442 + unsigned int i_num;
49443 + int err;
49444 +
49445 + s_tmp = lookup_subject_map(userp);
49446 +
49447 + /* we've already copied this subject into the kernel, just return
49448 + the reference to it, and don't copy it over again
49449 + */
49450 + if (s_tmp)
49451 + return(s_tmp);
49452 +
49453 + if ((s_tmp = (struct acl_subject_label *)
49454 + acl_alloc(sizeof (struct acl_subject_label))) == NULL)
49455 + return ERR_PTR(-ENOMEM);
49456 +
49457 + subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
49458 + if (subjmap == NULL)
49459 + return ERR_PTR(-ENOMEM);
49460 +
49461 + subjmap->user = userp;
49462 + subjmap->kernel = s_tmp;
49463 + insert_subj_map_entry(subjmap);
49464 +
49465 + if (copy_from_user(s_tmp, userp,
49466 + sizeof (struct acl_subject_label)))
49467 + return ERR_PTR(-EFAULT);
49468 +
49469 + len = strnlen_user(s_tmp->filename, PATH_MAX);
49470 +
49471 + if (!len || len >= PATH_MAX)
49472 + return ERR_PTR(-EINVAL);
49473 +
49474 + if ((tmp = (char *) acl_alloc(len)) == NULL)
49475 + return ERR_PTR(-ENOMEM);
49476 +
49477 + if (copy_from_user(tmp, s_tmp->filename, len))
49478 + return ERR_PTR(-EFAULT);
49479 + tmp[len-1] = '\0';
49480 + s_tmp->filename = tmp;
49481 +
49482 + if (!strcmp(s_tmp->filename, "/"))
49483 + role->root_label = s_tmp;
49484 +
49485 + if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
49486 + return ERR_PTR(-EFAULT);
49487 +
49488 + /* copy user and group transition tables */
49489 +
49490 + if (s_tmp->user_trans_num) {
49491 + uid_t *uidlist;
49492 +
49493 + uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
49494 + if (uidlist == NULL)
49495 + return ERR_PTR(-ENOMEM);
49496 + if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
49497 + return ERR_PTR(-EFAULT);
49498 +
49499 + s_tmp->user_transitions = uidlist;
49500 + }
49501 +
49502 + if (s_tmp->group_trans_num) {
49503 + gid_t *gidlist;
49504 +
49505 + gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
49506 + if (gidlist == NULL)
49507 + return ERR_PTR(-ENOMEM);
49508 + if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
49509 + return ERR_PTR(-EFAULT);
49510 +
49511 + s_tmp->group_transitions = gidlist;
49512 + }
49513 +
49514 + /* set up object hash table */
49515 + num_objs = count_user_objs(ghash.first);
49516 +
49517 + s_tmp->obj_hash_size = num_objs;
49518 + s_tmp->obj_hash =
49519 + (struct acl_object_label **)
49520 + create_table(&(s_tmp->obj_hash_size), sizeof(void *));
49521 +
49522 + if (!s_tmp->obj_hash)
49523 + return ERR_PTR(-ENOMEM);
49524 +
49525 + memset(s_tmp->obj_hash, 0,
49526 + s_tmp->obj_hash_size *
49527 + sizeof (struct acl_object_label *));
49528 +
49529 + /* add in objects */
49530 + err = copy_user_objs(ghash.first, s_tmp, role);
49531 +
49532 + if (err)
49533 + return ERR_PTR(err);
49534 +
49535 + /* set pointer for parent subject */
49536 + if (s_tmp->parent_subject) {
49537 + s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
49538 +
49539 + if (IS_ERR(s_tmp2))
49540 + return s_tmp2;
49541 +
49542 + s_tmp->parent_subject = s_tmp2;
49543 + }
49544 +
49545 + /* add in ip acls */
49546 +
49547 + if (!s_tmp->ip_num) {
49548 + s_tmp->ips = NULL;
49549 + goto insert;
49550 + }
49551 +
49552 + i_tmp =
49553 + (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
49554 + sizeof (struct acl_ip_label *));
49555 +
49556 + if (!i_tmp)
49557 + return ERR_PTR(-ENOMEM);
49558 +
49559 + for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
49560 + *(i_tmp + i_num) =
49561 + (struct acl_ip_label *)
49562 + acl_alloc(sizeof (struct acl_ip_label));
49563 + if (!*(i_tmp + i_num))
49564 + return ERR_PTR(-ENOMEM);
49565 +
49566 + if (copy_from_user
49567 + (&i_utmp2, s_tmp->ips + i_num,
49568 + sizeof (struct acl_ip_label *)))
49569 + return ERR_PTR(-EFAULT);
49570 +
49571 + if (copy_from_user
49572 + (*(i_tmp + i_num), i_utmp2,
49573 + sizeof (struct acl_ip_label)))
49574 + return ERR_PTR(-EFAULT);
49575 +
49576 + if ((*(i_tmp + i_num))->iface == NULL)
49577 + continue;
49578 +
49579 + len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
49580 + if (!len || len >= IFNAMSIZ)
49581 + return ERR_PTR(-EINVAL);
49582 + tmp = acl_alloc(len);
49583 + if (tmp == NULL)
49584 + return ERR_PTR(-ENOMEM);
49585 + if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
49586 + return ERR_PTR(-EFAULT);
49587 + (*(i_tmp + i_num))->iface = tmp;
49588 + }
49589 +
49590 + s_tmp->ips = i_tmp;
49591 +
49592 +insert:
49593 + if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
49594 + s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
49595 + return ERR_PTR(-ENOMEM);
49596 +
49597 + return s_tmp;
49598 +}
49599 +
49600 +static int
49601 +copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
49602 +{
49603 + struct acl_subject_label s_pre;
49604 + struct acl_subject_label * ret;
49605 + int err;
49606 +
49607 + while (userp) {
49608 + if (copy_from_user(&s_pre, userp,
49609 + sizeof (struct acl_subject_label)))
49610 + return -EFAULT;
49611 +
49612 + /* do not add nested subjects here, add
49613 + while parsing objects
49614 + */
49615 +
49616 + if (s_pre.mode & GR_NESTED) {
49617 + userp = s_pre.prev;
49618 + continue;
49619 + }
49620 +
49621 + ret = do_copy_user_subj(userp, role);
49622 +
49623 + err = PTR_ERR(ret);
49624 + if (IS_ERR(ret))
49625 + return err;
49626 +
49627 + insert_acl_subj_label(ret, role);
49628 +
49629 + userp = s_pre.prev;
49630 + }
49631 +
49632 + return 0;
49633 +}
49634 +
49635 +static int
49636 +copy_user_acl(struct gr_arg *arg)
49637 +{
49638 + struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
49639 + struct sprole_pw *sptmp;
49640 + struct gr_hash_struct *ghash;
49641 + uid_t *domainlist;
49642 + unsigned int r_num;
49643 + unsigned int len;
49644 + char *tmp;
49645 + int err = 0;
49646 + __u16 i;
49647 + __u32 num_subjs;
49648 +
49649 + /* we need a default and kernel role */
49650 + if (arg->role_db.num_roles < 2)
49651 + return -EINVAL;
49652 +
49653 + /* copy special role authentication info from userspace */
49654 +
49655 + num_sprole_pws = arg->num_sprole_pws;
49656 + acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
49657 +
49658 + if (!acl_special_roles) {
49659 + err = -ENOMEM;
49660 + goto cleanup;
49661 + }
49662 +
49663 + for (i = 0; i < num_sprole_pws; i++) {
49664 + sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
49665 + if (!sptmp) {
49666 + err = -ENOMEM;
49667 + goto cleanup;
49668 + }
49669 + if (copy_from_user(sptmp, arg->sprole_pws + i,
49670 + sizeof (struct sprole_pw))) {
49671 + err = -EFAULT;
49672 + goto cleanup;
49673 + }
49674 +
49675 + len =
49676 + strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
49677 +
49678 + if (!len || len >= GR_SPROLE_LEN) {
49679 + err = -EINVAL;
49680 + goto cleanup;
49681 + }
49682 +
49683 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
49684 + err = -ENOMEM;
49685 + goto cleanup;
49686 + }
49687 +
49688 + if (copy_from_user(tmp, sptmp->rolename, len)) {
49689 + err = -EFAULT;
49690 + goto cleanup;
49691 + }
49692 + tmp[len-1] = '\0';
49693 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49694 + printk(KERN_ALERT "Copying special role %s\n", tmp);
49695 +#endif
49696 + sptmp->rolename = tmp;
49697 + acl_special_roles[i] = sptmp;
49698 + }
49699 +
49700 + r_utmp = (struct acl_role_label **) arg->role_db.r_table;
49701 +
49702 + for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
49703 + r_tmp = acl_alloc(sizeof (struct acl_role_label));
49704 +
49705 + if (!r_tmp) {
49706 + err = -ENOMEM;
49707 + goto cleanup;
49708 + }
49709 +
49710 + if (copy_from_user(&r_utmp2, r_utmp + r_num,
49711 + sizeof (struct acl_role_label *))) {
49712 + err = -EFAULT;
49713 + goto cleanup;
49714 + }
49715 +
49716 + if (copy_from_user(r_tmp, r_utmp2,
49717 + sizeof (struct acl_role_label))) {
49718 + err = -EFAULT;
49719 + goto cleanup;
49720 + }
49721 +
49722 + len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
49723 +
49724 + if (!len || len >= PATH_MAX) {
49725 + err = -EINVAL;
49726 + goto cleanup;
49727 + }
49728 +
49729 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
49730 + err = -ENOMEM;
49731 + goto cleanup;
49732 + }
49733 + if (copy_from_user(tmp, r_tmp->rolename, len)) {
49734 + err = -EFAULT;
49735 + goto cleanup;
49736 + }
49737 + tmp[len-1] = '\0';
49738 + r_tmp->rolename = tmp;
49739 +
49740 + if (!strcmp(r_tmp->rolename, "default")
49741 + && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
49742 + default_role = r_tmp;
49743 + } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
49744 + kernel_role = r_tmp;
49745 + }
49746 +
49747 + if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
49748 + err = -ENOMEM;
49749 + goto cleanup;
49750 + }
49751 + if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
49752 + err = -EFAULT;
49753 + goto cleanup;
49754 + }
49755 +
49756 + r_tmp->hash = ghash;
49757 +
49758 + num_subjs = count_user_subjs(r_tmp->hash->first);
49759 +
49760 + r_tmp->subj_hash_size = num_subjs;
49761 + r_tmp->subj_hash =
49762 + (struct acl_subject_label **)
49763 + create_table(&(r_tmp->subj_hash_size), sizeof(void *));
49764 +
49765 + if (!r_tmp->subj_hash) {
49766 + err = -ENOMEM;
49767 + goto cleanup;
49768 + }
49769 +
49770 + err = copy_user_allowedips(r_tmp);
49771 + if (err)
49772 + goto cleanup;
49773 +
49774 + /* copy domain info */
49775 + if (r_tmp->domain_children != NULL) {
49776 + domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
49777 + if (domainlist == NULL) {
49778 + err = -ENOMEM;
49779 + goto cleanup;
49780 + }
49781 + if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
49782 + err = -EFAULT;
49783 + goto cleanup;
49784 + }
49785 + r_tmp->domain_children = domainlist;
49786 + }
49787 +
49788 + err = copy_user_transitions(r_tmp);
49789 + if (err)
49790 + goto cleanup;
49791 +
49792 + memset(r_tmp->subj_hash, 0,
49793 + r_tmp->subj_hash_size *
49794 + sizeof (struct acl_subject_label *));
49795 +
49796 + err = copy_user_subjs(r_tmp->hash->first, r_tmp);
49797 +
49798 + if (err)
49799 + goto cleanup;
49800 +
49801 + /* set nested subject list to null */
49802 + r_tmp->hash->first = NULL;
49803 +
49804 + insert_acl_role_label(r_tmp);
49805 + }
49806 +
49807 + goto return_err;
49808 + cleanup:
49809 + free_variables();
49810 + return_err:
49811 + return err;
49812 +
49813 +}
49814 +
49815 +static int
49816 +gracl_init(struct gr_arg *args)
49817 +{
49818 + int error = 0;
49819 +
49820 + memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
49821 + memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
49822 +
49823 + if (init_variables(args)) {
49824 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
49825 + error = -ENOMEM;
49826 + free_variables();
49827 + goto out;
49828 + }
49829 +
49830 + error = copy_user_acl(args);
49831 + free_init_variables();
49832 + if (error) {
49833 + free_variables();
49834 + goto out;
49835 + }
49836 +
49837 + if ((error = gr_set_acls(0))) {
49838 + free_variables();
49839 + goto out;
49840 + }
49841 +
49842 + pax_open_kernel();
49843 + gr_status |= GR_READY;
49844 + pax_close_kernel();
49845 +
49846 + out:
49847 + return error;
49848 +}
49849 +
49850 +/* derived from glibc fnmatch() 0: match, 1: no match*/
49851 +
49852 +static int
49853 +glob_match(const char *p, const char *n)
49854 +{
49855 + char c;
49856 +
49857 + while ((c = *p++) != '\0') {
49858 + switch (c) {
49859 + case '?':
49860 + if (*n == '\0')
49861 + return 1;
49862 + else if (*n == '/')
49863 + return 1;
49864 + break;
49865 + case '\\':
49866 + if (*n != c)
49867 + return 1;
49868 + break;
49869 + case '*':
49870 + for (c = *p++; c == '?' || c == '*'; c = *p++) {
49871 + if (*n == '/')
49872 + return 1;
49873 + else if (c == '?') {
49874 + if (*n == '\0')
49875 + return 1;
49876 + else
49877 + ++n;
49878 + }
49879 + }
49880 + if (c == '\0') {
49881 + return 0;
49882 + } else {
49883 + const char *endp;
49884 +
49885 + if ((endp = strchr(n, '/')) == NULL)
49886 + endp = n + strlen(n);
49887 +
49888 + if (c == '[') {
49889 + for (--p; n < endp; ++n)
49890 + if (!glob_match(p, n))
49891 + return 0;
49892 + } else if (c == '/') {
49893 + while (*n != '\0' && *n != '/')
49894 + ++n;
49895 + if (*n == '/' && !glob_match(p, n + 1))
49896 + return 0;
49897 + } else {
49898 + for (--p; n < endp; ++n)
49899 + if (*n == c && !glob_match(p, n))
49900 + return 0;
49901 + }
49902 +
49903 + return 1;
49904 + }
49905 + case '[':
49906 + {
49907 + int not;
49908 + char cold;
49909 +
49910 + if (*n == '\0' || *n == '/')
49911 + return 1;
49912 +
49913 + not = (*p == '!' || *p == '^');
49914 + if (not)
49915 + ++p;
49916 +
49917 + c = *p++;
49918 + for (;;) {
49919 + unsigned char fn = (unsigned char)*n;
49920 +
49921 + if (c == '\0')
49922 + return 1;
49923 + else {
49924 + if (c == fn)
49925 + goto matched;
49926 + cold = c;
49927 + c = *p++;
49928 +
49929 + if (c == '-' && *p != ']') {
49930 + unsigned char cend = *p++;
49931 +
49932 + if (cend == '\0')
49933 + return 1;
49934 +
49935 + if (cold <= fn && fn <= cend)
49936 + goto matched;
49937 +
49938 + c = *p++;
49939 + }
49940 + }
49941 +
49942 + if (c == ']')
49943 + break;
49944 + }
49945 + if (!not)
49946 + return 1;
49947 + break;
49948 + matched:
49949 + while (c != ']') {
49950 + if (c == '\0')
49951 + return 1;
49952 +
49953 + c = *p++;
49954 + }
49955 + if (not)
49956 + return 1;
49957 + }
49958 + break;
49959 + default:
49960 + if (c != *n)
49961 + return 1;
49962 + }
49963 +
49964 + ++n;
49965 + }
49966 +
49967 + if (*n == '\0')
49968 + return 0;
49969 +
49970 + if (*n == '/')
49971 + return 0;
49972 +
49973 + return 1;
49974 +}
49975 +
49976 +static struct acl_object_label *
49977 +chk_glob_label(struct acl_object_label *globbed,
49978 + struct dentry *dentry, struct vfsmount *mnt, char **path)
49979 +{
49980 + struct acl_object_label *tmp;
49981 +
49982 + if (*path == NULL)
49983 + *path = gr_to_filename_nolock(dentry, mnt);
49984 +
49985 + tmp = globbed;
49986 +
49987 + while (tmp) {
49988 + if (!glob_match(tmp->filename, *path))
49989 + return tmp;
49990 + tmp = tmp->next;
49991 + }
49992 +
49993 + return NULL;
49994 +}
49995 +
49996 +static struct acl_object_label *
49997 +__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
49998 + const ino_t curr_ino, const dev_t curr_dev,
49999 + const struct acl_subject_label *subj, char **path, const int checkglob)
50000 +{
50001 + struct acl_subject_label *tmpsubj;
50002 + struct acl_object_label *retval;
50003 + struct acl_object_label *retval2;
50004 +
50005 + tmpsubj = (struct acl_subject_label *) subj;
50006 + read_lock(&gr_inode_lock);
50007 + do {
50008 + retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
50009 + if (retval) {
50010 + if (checkglob && retval->globbed) {
50011 + retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
50012 + (struct vfsmount *)orig_mnt, path);
50013 + if (retval2)
50014 + retval = retval2;
50015 + }
50016 + break;
50017 + }
50018 + } while ((tmpsubj = tmpsubj->parent_subject));
50019 + read_unlock(&gr_inode_lock);
50020 +
50021 + return retval;
50022 +}
50023 +
50024 +static __inline__ struct acl_object_label *
50025 +full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
50026 + struct dentry *curr_dentry,
50027 + const struct acl_subject_label *subj, char **path, const int checkglob)
50028 +{
50029 + int newglob = checkglob;
50030 + ino_t inode;
50031 + dev_t device;
50032 +
50033 + /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
50034 + as we don't want a / * rule to match instead of the / object
50035 + don't do this for create lookups that call this function though, since they're looking up
50036 + on the parent and thus need globbing checks on all paths
50037 + */
50038 + if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
50039 + newglob = GR_NO_GLOB;
50040 +
50041 + spin_lock(&curr_dentry->d_lock);
50042 + inode = curr_dentry->d_inode->i_ino;
50043 + device = __get_dev(curr_dentry);
50044 + spin_unlock(&curr_dentry->d_lock);
50045 +
50046 + return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
50047 +}
50048 +
50049 +static struct acl_object_label *
50050 +__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
50051 + const struct acl_subject_label *subj, char *path, const int checkglob)
50052 +{
50053 + struct dentry *dentry = (struct dentry *) l_dentry;
50054 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
50055 + struct acl_object_label *retval;
50056 + struct dentry *parent;
50057 +
50058 + write_seqlock(&rename_lock);
50059 + br_read_lock(vfsmount_lock);
50060 +
50061 + if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
50062 +#ifdef CONFIG_NET
50063 + mnt == sock_mnt ||
50064 +#endif
50065 +#ifdef CONFIG_HUGETLBFS
50066 + (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
50067 +#endif
50068 + /* ignore Eric Biederman */
50069 + IS_PRIVATE(l_dentry->d_inode))) {
50070 + retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
50071 + goto out;
50072 + }
50073 +
50074 + for (;;) {
50075 + if (dentry == real_root.dentry && mnt == real_root.mnt)
50076 + break;
50077 +
50078 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
50079 + if (mnt->mnt_parent == mnt)
50080 + break;
50081 +
50082 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
50083 + if (retval != NULL)
50084 + goto out;
50085 +
50086 + dentry = mnt->mnt_mountpoint;
50087 + mnt = mnt->mnt_parent;
50088 + continue;
50089 + }
50090 +
50091 + parent = dentry->d_parent;
50092 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
50093 + if (retval != NULL)
50094 + goto out;
50095 +
50096 + dentry = parent;
50097 + }
50098 +
50099 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
50100 +
50101 + /* real_root is pinned so we don't have to hold a reference */
50102 + if (retval == NULL)
50103 + retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
50104 +out:
50105 + br_read_unlock(vfsmount_lock);
50106 + write_sequnlock(&rename_lock);
50107 +
50108 + BUG_ON(retval == NULL);
50109 +
50110 + return retval;
50111 +}
50112 +
50113 +static __inline__ struct acl_object_label *
50114 +chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
50115 + const struct acl_subject_label *subj)
50116 +{
50117 + char *path = NULL;
50118 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
50119 +}
50120 +
50121 +static __inline__ struct acl_object_label *
50122 +chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
50123 + const struct acl_subject_label *subj)
50124 +{
50125 + char *path = NULL;
50126 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
50127 +}
50128 +
50129 +static __inline__ struct acl_object_label *
50130 +chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
50131 + const struct acl_subject_label *subj, char *path)
50132 +{
50133 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
50134 +}
50135 +
50136 +static struct acl_subject_label *
50137 +chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
50138 + const struct acl_role_label *role)
50139 +{
50140 + struct dentry *dentry = (struct dentry *) l_dentry;
50141 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
50142 + struct acl_subject_label *retval;
50143 + struct dentry *parent;
50144 +
50145 + write_seqlock(&rename_lock);
50146 + br_read_lock(vfsmount_lock);
50147 +
50148 + for (;;) {
50149 + if (dentry == real_root.dentry && mnt == real_root.mnt)
50150 + break;
50151 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
50152 + if (mnt->mnt_parent == mnt)
50153 + break;
50154 +
50155 + spin_lock(&dentry->d_lock);
50156 + read_lock(&gr_inode_lock);
50157 + retval =
50158 + lookup_acl_subj_label(dentry->d_inode->i_ino,
50159 + __get_dev(dentry), role);
50160 + read_unlock(&gr_inode_lock);
50161 + spin_unlock(&dentry->d_lock);
50162 + if (retval != NULL)
50163 + goto out;
50164 +
50165 + dentry = mnt->mnt_mountpoint;
50166 + mnt = mnt->mnt_parent;
50167 + continue;
50168 + }
50169 +
50170 + spin_lock(&dentry->d_lock);
50171 + read_lock(&gr_inode_lock);
50172 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
50173 + __get_dev(dentry), role);
50174 + read_unlock(&gr_inode_lock);
50175 + parent = dentry->d_parent;
50176 + spin_unlock(&dentry->d_lock);
50177 +
50178 + if (retval != NULL)
50179 + goto out;
50180 +
50181 + dentry = parent;
50182 + }
50183 +
50184 + spin_lock(&dentry->d_lock);
50185 + read_lock(&gr_inode_lock);
50186 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
50187 + __get_dev(dentry), role);
50188 + read_unlock(&gr_inode_lock);
50189 + spin_unlock(&dentry->d_lock);
50190 +
50191 + if (unlikely(retval == NULL)) {
50192 + /* real_root is pinned, we don't need to hold a reference */
50193 + read_lock(&gr_inode_lock);
50194 + retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
50195 + __get_dev(real_root.dentry), role);
50196 + read_unlock(&gr_inode_lock);
50197 + }
50198 +out:
50199 + br_read_unlock(vfsmount_lock);
50200 + write_sequnlock(&rename_lock);
50201 +
50202 + BUG_ON(retval == NULL);
50203 +
50204 + return retval;
50205 +}
50206 +
50207 +static void
50208 +gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
50209 +{
50210 + struct task_struct *task = current;
50211 + const struct cred *cred = current_cred();
50212 +
50213 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
50214 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
50215 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
50216 + 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
50217 +
50218 + return;
50219 +}
50220 +
50221 +static void
50222 +gr_log_learn_sysctl(const char *path, const __u32 mode)
50223 +{
50224 + struct task_struct *task = current;
50225 + const struct cred *cred = current_cred();
50226 +
50227 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
50228 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
50229 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
50230 + 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
50231 +
50232 + return;
50233 +}
50234 +
50235 +static void
50236 +gr_log_learn_id_change(const char type, const unsigned int real,
50237 + const unsigned int effective, const unsigned int fs)
50238 +{
50239 + struct task_struct *task = current;
50240 + const struct cred *cred = current_cred();
50241 +
50242 + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
50243 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
50244 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
50245 + type, real, effective, fs, &task->signal->saved_ip);
50246 +
50247 + return;
50248 +}
50249 +
50250 +__u32
50251 +gr_search_file(const struct dentry * dentry, const __u32 mode,
50252 + const struct vfsmount * mnt)
50253 +{
50254 + __u32 retval = mode;
50255 + struct acl_subject_label *curracl;
50256 + struct acl_object_label *currobj;
50257 +
50258 + if (unlikely(!(gr_status & GR_READY)))
50259 + return (mode & ~GR_AUDITS);
50260 +
50261 + curracl = current->acl;
50262 +
50263 + currobj = chk_obj_label(dentry, mnt, curracl);
50264 + retval = currobj->mode & mode;
50265 +
50266 + /* if we're opening a specified transfer file for writing
50267 + (e.g. /dev/initctl), then transfer our role to init
50268 + */
50269 + if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
50270 + current->role->roletype & GR_ROLE_PERSIST)) {
50271 + struct task_struct *task = init_pid_ns.child_reaper;
50272 +
50273 + if (task->role != current->role) {
50274 + task->acl_sp_role = 0;
50275 + task->acl_role_id = current->acl_role_id;
50276 + task->role = current->role;
50277 + rcu_read_lock();
50278 + read_lock(&grsec_exec_file_lock);
50279 + gr_apply_subject_to_task(task);
50280 + read_unlock(&grsec_exec_file_lock);
50281 + rcu_read_unlock();
50282 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
50283 + }
50284 + }
50285 +
50286 + if (unlikely
50287 + ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
50288 + && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
50289 + __u32 new_mode = mode;
50290 +
50291 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
50292 +
50293 + retval = new_mode;
50294 +
50295 + if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
50296 + new_mode |= GR_INHERIT;
50297 +
50298 + if (!(mode & GR_NOLEARN))
50299 + gr_log_learn(dentry, mnt, new_mode);
50300 + }
50301 +
50302 + return retval;
50303 +}
50304 +
50305 +struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
50306 + const struct dentry *parent,
50307 + const struct vfsmount *mnt)
50308 +{
50309 + struct name_entry *match;
50310 + struct acl_object_label *matchpo;
50311 + struct acl_subject_label *curracl;
50312 + char *path;
50313 +
50314 + if (unlikely(!(gr_status & GR_READY)))
50315 + return NULL;
50316 +
50317 + preempt_disable();
50318 + path = gr_to_filename_rbac(new_dentry, mnt);
50319 + match = lookup_name_entry_create(path);
50320 +
50321 + curracl = current->acl;
50322 +
50323 + if (match) {
50324 + read_lock(&gr_inode_lock);
50325 + matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
50326 + read_unlock(&gr_inode_lock);
50327 +
50328 + if (matchpo) {
50329 + preempt_enable();
50330 + return matchpo;
50331 + }
50332 + }
50333 +
50334 + // lookup parent
50335 +
50336 + matchpo = chk_obj_create_label(parent, mnt, curracl, path);
50337 +
50338 + preempt_enable();
50339 + return matchpo;
50340 +}
50341 +
50342 +__u32
50343 +gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
50344 + const struct vfsmount * mnt, const __u32 mode)
50345 +{
50346 + struct acl_object_label *matchpo;
50347 + __u32 retval;
50348 +
50349 + if (unlikely(!(gr_status & GR_READY)))
50350 + return (mode & ~GR_AUDITS);
50351 +
50352 + matchpo = gr_get_create_object(new_dentry, parent, mnt);
50353 +
50354 + retval = matchpo->mode & mode;
50355 +
50356 + if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
50357 + && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
50358 + __u32 new_mode = mode;
50359 +
50360 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
50361 +
50362 + gr_log_learn(new_dentry, mnt, new_mode);
50363 + return new_mode;
50364 + }
50365 +
50366 + return retval;
50367 +}
50368 +
50369 +__u32
50370 +gr_check_link(const struct dentry * new_dentry,
50371 + const struct dentry * parent_dentry,
50372 + const struct vfsmount * parent_mnt,
50373 + const struct dentry * old_dentry, const struct vfsmount * old_mnt)
50374 +{
50375 + struct acl_object_label *obj;
50376 + __u32 oldmode, newmode;
50377 + __u32 needmode;
50378 + __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
50379 + GR_DELETE | GR_INHERIT;
50380 +
50381 + if (unlikely(!(gr_status & GR_READY)))
50382 + return (GR_CREATE | GR_LINK);
50383 +
50384 + obj = chk_obj_label(old_dentry, old_mnt, current->acl);
50385 + oldmode = obj->mode;
50386 +
50387 + obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
50388 + newmode = obj->mode;
50389 +
50390 + needmode = newmode & checkmodes;
50391 +
50392 + // old name for hardlink must have at least the permissions of the new name
50393 + if ((oldmode & needmode) != needmode)
50394 + goto bad;
50395 +
50396 + // if old name had restrictions/auditing, make sure the new name does as well
50397 + needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
50398 +
50399 + // don't allow hardlinking of suid/sgid files without permission
50400 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
50401 + needmode |= GR_SETID;
50402 +
50403 + if ((newmode & needmode) != needmode)
50404 + goto bad;
50405 +
50406 + // enforce minimum permissions
50407 + if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
50408 + return newmode;
50409 +bad:
50410 + needmode = oldmode;
50411 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
50412 + needmode |= GR_SETID;
50413 +
50414 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
50415 + gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
50416 + return (GR_CREATE | GR_LINK);
50417 + } else if (newmode & GR_SUPPRESS)
50418 + return GR_SUPPRESS;
50419 + else
50420 + return 0;
50421 +}
50422 +
50423 +int
50424 +gr_check_hidden_task(const struct task_struct *task)
50425 +{
50426 + if (unlikely(!(gr_status & GR_READY)))
50427 + return 0;
50428 +
50429 + if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
50430 + return 1;
50431 +
50432 + return 0;
50433 +}
50434 +
50435 +int
50436 +gr_check_protected_task(const struct task_struct *task)
50437 +{
50438 + if (unlikely(!(gr_status & GR_READY) || !task))
50439 + return 0;
50440 +
50441 + if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
50442 + task->acl != current->acl)
50443 + return 1;
50444 +
50445 + return 0;
50446 +}
50447 +
50448 +int
50449 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
50450 +{
50451 + struct task_struct *p;
50452 + int ret = 0;
50453 +
50454 + if (unlikely(!(gr_status & GR_READY) || !pid))
50455 + return ret;
50456 +
50457 + read_lock(&tasklist_lock);
50458 + do_each_pid_task(pid, type, p) {
50459 + if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
50460 + p->acl != current->acl) {
50461 + ret = 1;
50462 + goto out;
50463 + }
50464 + } while_each_pid_task(pid, type, p);
50465 +out:
50466 + read_unlock(&tasklist_lock);
50467 +
50468 + return ret;
50469 +}
50470 +
50471 +void
50472 +gr_copy_label(struct task_struct *tsk)
50473 +{
50474 + /* plain copying of fields is already done by dup_task_struct */
50475 + tsk->signal->used_accept = 0;
50476 + tsk->acl_sp_role = 0;
50477 + //tsk->acl_role_id = current->acl_role_id;
50478 + //tsk->acl = current->acl;
50479 + //tsk->role = current->role;
50480 + tsk->signal->curr_ip = current->signal->curr_ip;
50481 + tsk->signal->saved_ip = current->signal->saved_ip;
50482 + if (current->exec_file)
50483 + get_file(current->exec_file);
50484 + //tsk->exec_file = current->exec_file;
50485 + //tsk->is_writable = current->is_writable;
50486 + if (unlikely(current->signal->used_accept)) {
50487 + current->signal->curr_ip = 0;
50488 + current->signal->saved_ip = 0;
50489 + }
50490 +
50491 + return;
50492 +}
50493 +
50494 +static void
50495 +gr_set_proc_res(struct task_struct *task)
50496 +{
50497 + struct acl_subject_label *proc;
50498 + unsigned short i;
50499 +
50500 + proc = task->acl;
50501 +
50502 + if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
50503 + return;
50504 +
50505 + for (i = 0; i < RLIM_NLIMITS; i++) {
50506 + if (!(proc->resmask & (1 << i)))
50507 + continue;
50508 +
50509 + task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
50510 + task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
50511 + }
50512 +
50513 + return;
50514 +}
50515 +
50516 +extern int __gr_process_user_ban(struct user_struct *user);
50517 +
50518 +int
50519 +gr_check_user_change(int real, int effective, int fs)
50520 +{
50521 + unsigned int i;
50522 + __u16 num;
50523 + uid_t *uidlist;
50524 + int curuid;
50525 + int realok = 0;
50526 + int effectiveok = 0;
50527 + int fsok = 0;
50528 +
50529 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
50530 + struct user_struct *user;
50531 +
50532 + if (real == -1)
50533 + goto skipit;
50534 +
50535 + user = find_user(real);
50536 + if (user == NULL)
50537 + goto skipit;
50538 +
50539 + if (__gr_process_user_ban(user)) {
50540 + /* for find_user */
50541 + free_uid(user);
50542 + return 1;
50543 + }
50544 +
50545 + /* for find_user */
50546 + free_uid(user);
50547 +
50548 +skipit:
50549 +#endif
50550 +
50551 + if (unlikely(!(gr_status & GR_READY)))
50552 + return 0;
50553 +
50554 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
50555 + gr_log_learn_id_change('u', real, effective, fs);
50556 +
50557 + num = current->acl->user_trans_num;
50558 + uidlist = current->acl->user_transitions;
50559 +
50560 + if (uidlist == NULL)
50561 + return 0;
50562 +
50563 + if (real == -1)
50564 + realok = 1;
50565 + if (effective == -1)
50566 + effectiveok = 1;
50567 + if (fs == -1)
50568 + fsok = 1;
50569 +
50570 + if (current->acl->user_trans_type & GR_ID_ALLOW) {
50571 + for (i = 0; i < num; i++) {
50572 + curuid = (int)uidlist[i];
50573 + if (real == curuid)
50574 + realok = 1;
50575 + if (effective == curuid)
50576 + effectiveok = 1;
50577 + if (fs == curuid)
50578 + fsok = 1;
50579 + }
50580 + } else if (current->acl->user_trans_type & GR_ID_DENY) {
50581 + for (i = 0; i < num; i++) {
50582 + curuid = (int)uidlist[i];
50583 + if (real == curuid)
50584 + break;
50585 + if (effective == curuid)
50586 + break;
50587 + if (fs == curuid)
50588 + break;
50589 + }
50590 + /* not in deny list */
50591 + if (i == num) {
50592 + realok = 1;
50593 + effectiveok = 1;
50594 + fsok = 1;
50595 + }
50596 + }
50597 +
50598 + if (realok && effectiveok && fsok)
50599 + return 0;
50600 + else {
50601 + gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
50602 + return 1;
50603 + }
50604 +}
50605 +
50606 +int
50607 +gr_check_group_change(int real, int effective, int fs)
50608 +{
50609 + unsigned int i;
50610 + __u16 num;
50611 + gid_t *gidlist;
50612 + int curgid;
50613 + int realok = 0;
50614 + int effectiveok = 0;
50615 + int fsok = 0;
50616 +
50617 + if (unlikely(!(gr_status & GR_READY)))
50618 + return 0;
50619 +
50620 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
50621 + gr_log_learn_id_change('g', real, effective, fs);
50622 +
50623 + num = current->acl->group_trans_num;
50624 + gidlist = current->acl->group_transitions;
50625 +
50626 + if (gidlist == NULL)
50627 + return 0;
50628 +
50629 + if (real == -1)
50630 + realok = 1;
50631 + if (effective == -1)
50632 + effectiveok = 1;
50633 + if (fs == -1)
50634 + fsok = 1;
50635 +
50636 + if (current->acl->group_trans_type & GR_ID_ALLOW) {
50637 + for (i = 0; i < num; i++) {
50638 + curgid = (int)gidlist[i];
50639 + if (real == curgid)
50640 + realok = 1;
50641 + if (effective == curgid)
50642 + effectiveok = 1;
50643 + if (fs == curgid)
50644 + fsok = 1;
50645 + }
50646 + } else if (current->acl->group_trans_type & GR_ID_DENY) {
50647 + for (i = 0; i < num; i++) {
50648 + curgid = (int)gidlist[i];
50649 + if (real == curgid)
50650 + break;
50651 + if (effective == curgid)
50652 + break;
50653 + if (fs == curgid)
50654 + break;
50655 + }
50656 + /* not in deny list */
50657 + if (i == num) {
50658 + realok = 1;
50659 + effectiveok = 1;
50660 + fsok = 1;
50661 + }
50662 + }
50663 +
50664 + if (realok && effectiveok && fsok)
50665 + return 0;
50666 + else {
50667 + gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
50668 + return 1;
50669 + }
50670 +}
50671 +
50672 +extern int gr_acl_is_capable(const int cap);
50673 +
50674 +void
50675 +gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
50676 +{
50677 + struct acl_role_label *role = task->role;
50678 + struct acl_subject_label *subj = NULL;
50679 + struct acl_object_label *obj;
50680 + struct file *filp;
50681 +
50682 + if (unlikely(!(gr_status & GR_READY)))
50683 + return;
50684 +
50685 + filp = task->exec_file;
50686 +
50687 + /* kernel process, we'll give them the kernel role */
50688 + if (unlikely(!filp)) {
50689 + task->role = kernel_role;
50690 + task->acl = kernel_role->root_label;
50691 + return;
50692 + } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
50693 + role = lookup_acl_role_label(task, uid, gid);
50694 +
50695 + /* don't change the role if we're not a privileged process */
50696 + if (role && task->role != role &&
50697 + (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
50698 + ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
50699 + return;
50700 +
50701 + /* perform subject lookup in possibly new role
50702 + we can use this result below in the case where role == task->role
50703 + */
50704 + subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
50705 +
50706 + /* if we changed uid/gid, but result in the same role
50707 + and are using inheritance, don't lose the inherited subject
50708 + if current subject is other than what normal lookup
50709 + would result in, we arrived via inheritance, don't
50710 + lose subject
50711 + */
50712 + if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
50713 + (subj == task->acl)))
50714 + task->acl = subj;
50715 +
50716 + task->role = role;
50717 +
50718 + task->is_writable = 0;
50719 +
50720 + /* ignore additional mmap checks for processes that are writable
50721 + by the default ACL */
50722 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
50723 + if (unlikely(obj->mode & GR_WRITE))
50724 + task->is_writable = 1;
50725 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
50726 + if (unlikely(obj->mode & GR_WRITE))
50727 + task->is_writable = 1;
50728 +
50729 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
50730 + printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
50731 +#endif
50732 +
50733 + gr_set_proc_res(task);
50734 +
50735 + return;
50736 +}
50737 +
50738 +int
50739 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
50740 + const int unsafe_flags)
50741 +{
50742 + struct task_struct *task = current;
50743 + struct acl_subject_label *newacl;
50744 + struct acl_object_label *obj;
50745 + __u32 retmode;
50746 +
50747 + if (unlikely(!(gr_status & GR_READY)))
50748 + return 0;
50749 +
50750 + newacl = chk_subj_label(dentry, mnt, task->role);
50751 +
50752 + task_lock(task);
50753 + if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
50754 + !(task->role->roletype & GR_ROLE_GOD) &&
50755 + !gr_search_file(dentry, GR_PTRACERD, mnt) &&
50756 + !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
50757 + task_unlock(task);
50758 + if (unsafe_flags & LSM_UNSAFE_SHARE)
50759 + gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
50760 + else
50761 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
50762 + return -EACCES;
50763 + }
50764 + task_unlock(task);
50765 +
50766 + obj = chk_obj_label(dentry, mnt, task->acl);
50767 + retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
50768 +
50769 + if (!(task->acl->mode & GR_INHERITLEARN) &&
50770 + ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
50771 + if (obj->nested)
50772 + task->acl = obj->nested;
50773 + else
50774 + task->acl = newacl;
50775 + } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
50776 + gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
50777 +
50778 + task->is_writable = 0;
50779 +
50780 + /* ignore additional mmap checks for processes that are writable
50781 + by the default ACL */
50782 + obj = chk_obj_label(dentry, mnt, default_role->root_label);
50783 + if (unlikely(obj->mode & GR_WRITE))
50784 + task->is_writable = 1;
50785 + obj = chk_obj_label(dentry, mnt, task->role->root_label);
50786 + if (unlikely(obj->mode & GR_WRITE))
50787 + task->is_writable = 1;
50788 +
50789 + gr_set_proc_res(task);
50790 +
50791 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
50792 + printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
50793 +#endif
50794 + return 0;
50795 +}
50796 +
50797 +/* always called with valid inodev ptr */
50798 +static void
50799 +do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
50800 +{
50801 + struct acl_object_label *matchpo;
50802 + struct acl_subject_label *matchps;
50803 + struct acl_subject_label *subj;
50804 + struct acl_role_label *role;
50805 + unsigned int x;
50806 +
50807 + FOR_EACH_ROLE_START(role)
50808 + FOR_EACH_SUBJECT_START(role, subj, x)
50809 + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
50810 + matchpo->mode |= GR_DELETED;
50811 + FOR_EACH_SUBJECT_END(subj,x)
50812 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
50813 + if (subj->inode == ino && subj->device == dev)
50814 + subj->mode |= GR_DELETED;
50815 + FOR_EACH_NESTED_SUBJECT_END(subj)
50816 + if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
50817 + matchps->mode |= GR_DELETED;
50818 + FOR_EACH_ROLE_END(role)
50819 +
50820 + inodev->nentry->deleted = 1;
50821 +
50822 + return;
50823 +}
50824 +
50825 +void
50826 +gr_handle_delete(const ino_t ino, const dev_t dev)
50827 +{
50828 + struct inodev_entry *inodev;
50829 +
50830 + if (unlikely(!(gr_status & GR_READY)))
50831 + return;
50832 +
50833 + write_lock(&gr_inode_lock);
50834 + inodev = lookup_inodev_entry(ino, dev);
50835 + if (inodev != NULL)
50836 + do_handle_delete(inodev, ino, dev);
50837 + write_unlock(&gr_inode_lock);
50838 +
50839 + return;
50840 +}
50841 +
50842 +static void
50843 +update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
50844 + const ino_t newinode, const dev_t newdevice,
50845 + struct acl_subject_label *subj)
50846 +{
50847 + unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
50848 + struct acl_object_label *match;
50849 +
50850 + match = subj->obj_hash[index];
50851 +
50852 + while (match && (match->inode != oldinode ||
50853 + match->device != olddevice ||
50854 + !(match->mode & GR_DELETED)))
50855 + match = match->next;
50856 +
50857 + if (match && (match->inode == oldinode)
50858 + && (match->device == olddevice)
50859 + && (match->mode & GR_DELETED)) {
50860 + if (match->prev == NULL) {
50861 + subj->obj_hash[index] = match->next;
50862 + if (match->next != NULL)
50863 + match->next->prev = NULL;
50864 + } else {
50865 + match->prev->next = match->next;
50866 + if (match->next != NULL)
50867 + match->next->prev = match->prev;
50868 + }
50869 + match->prev = NULL;
50870 + match->next = NULL;
50871 + match->inode = newinode;
50872 + match->device = newdevice;
50873 + match->mode &= ~GR_DELETED;
50874 +
50875 + insert_acl_obj_label(match, subj);
50876 + }
50877 +
50878 + return;
50879 +}
50880 +
50881 +static void
50882 +update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
50883 + const ino_t newinode, const dev_t newdevice,
50884 + struct acl_role_label *role)
50885 +{
50886 + unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
50887 + struct acl_subject_label *match;
50888 +
50889 + match = role->subj_hash[index];
50890 +
50891 + while (match && (match->inode != oldinode ||
50892 + match->device != olddevice ||
50893 + !(match->mode & GR_DELETED)))
50894 + match = match->next;
50895 +
50896 + if (match && (match->inode == oldinode)
50897 + && (match->device == olddevice)
50898 + && (match->mode & GR_DELETED)) {
50899 + if (match->prev == NULL) {
50900 + role->subj_hash[index] = match->next;
50901 + if (match->next != NULL)
50902 + match->next->prev = NULL;
50903 + } else {
50904 + match->prev->next = match->next;
50905 + if (match->next != NULL)
50906 + match->next->prev = match->prev;
50907 + }
50908 + match->prev = NULL;
50909 + match->next = NULL;
50910 + match->inode = newinode;
50911 + match->device = newdevice;
50912 + match->mode &= ~GR_DELETED;
50913 +
50914 + insert_acl_subj_label(match, role);
50915 + }
50916 +
50917 + return;
50918 +}
50919 +
50920 +static void
50921 +update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
50922 + const ino_t newinode, const dev_t newdevice)
50923 +{
50924 + unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
50925 + struct inodev_entry *match;
50926 +
50927 + match = inodev_set.i_hash[index];
50928 +
50929 + while (match && (match->nentry->inode != oldinode ||
50930 + match->nentry->device != olddevice || !match->nentry->deleted))
50931 + match = match->next;
50932 +
50933 + if (match && (match->nentry->inode == oldinode)
50934 + && (match->nentry->device == olddevice) &&
50935 + match->nentry->deleted) {
50936 + if (match->prev == NULL) {
50937 + inodev_set.i_hash[index] = match->next;
50938 + if (match->next != NULL)
50939 + match->next->prev = NULL;
50940 + } else {
50941 + match->prev->next = match->next;
50942 + if (match->next != NULL)
50943 + match->next->prev = match->prev;
50944 + }
50945 + match->prev = NULL;
50946 + match->next = NULL;
50947 + match->nentry->inode = newinode;
50948 + match->nentry->device = newdevice;
50949 + match->nentry->deleted = 0;
50950 +
50951 + insert_inodev_entry(match);
50952 + }
50953 +
50954 + return;
50955 +}
50956 +
50957 +static void
50958 +__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
50959 +{
50960 + struct acl_subject_label *subj;
50961 + struct acl_role_label *role;
50962 + unsigned int x;
50963 +
50964 + FOR_EACH_ROLE_START(role)
50965 + update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
50966 +
50967 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
50968 + if ((subj->inode == ino) && (subj->device == dev)) {
50969 + subj->inode = ino;
50970 + subj->device = dev;
50971 + }
50972 + FOR_EACH_NESTED_SUBJECT_END(subj)
50973 + FOR_EACH_SUBJECT_START(role, subj, x)
50974 + update_acl_obj_label(matchn->inode, matchn->device,
50975 + ino, dev, subj);
50976 + FOR_EACH_SUBJECT_END(subj,x)
50977 + FOR_EACH_ROLE_END(role)
50978 +
50979 + update_inodev_entry(matchn->inode, matchn->device, ino, dev);
50980 +
50981 + return;
50982 +}
50983 +
50984 +static void
50985 +do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
50986 + const struct vfsmount *mnt)
50987 +{
50988 + ino_t ino = dentry->d_inode->i_ino;
50989 + dev_t dev = __get_dev(dentry);
50990 +
50991 + __do_handle_create(matchn, ino, dev);
50992 +
50993 + return;
50994 +}
50995 +
50996 +void
50997 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
50998 +{
50999 + struct name_entry *matchn;
51000 +
51001 + if (unlikely(!(gr_status & GR_READY)))
51002 + return;
51003 +
51004 + preempt_disable();
51005 + matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
51006 +
51007 + if (unlikely((unsigned long)matchn)) {
51008 + write_lock(&gr_inode_lock);
51009 + do_handle_create(matchn, dentry, mnt);
51010 + write_unlock(&gr_inode_lock);
51011 + }
51012 + preempt_enable();
51013 +
51014 + return;
51015 +}
51016 +
51017 +void
51018 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
51019 +{
51020 + struct name_entry *matchn;
51021 +
51022 + if (unlikely(!(gr_status & GR_READY)))
51023 + return;
51024 +
51025 + preempt_disable();
51026 + matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
51027 +
51028 + if (unlikely((unsigned long)matchn)) {
51029 + write_lock(&gr_inode_lock);
51030 + __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
51031 + write_unlock(&gr_inode_lock);
51032 + }
51033 + preempt_enable();
51034 +
51035 + return;
51036 +}
51037 +
51038 +void
51039 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
51040 + struct dentry *old_dentry,
51041 + struct dentry *new_dentry,
51042 + struct vfsmount *mnt, const __u8 replace)
51043 +{
51044 + struct name_entry *matchn;
51045 + struct inodev_entry *inodev;
51046 + struct inode *inode = new_dentry->d_inode;
51047 + ino_t old_ino = old_dentry->d_inode->i_ino;
51048 + dev_t old_dev = __get_dev(old_dentry);
51049 +
51050 + /* vfs_rename swaps the name and parent link for old_dentry and
51051 + new_dentry
51052 + at this point, old_dentry has the new name, parent link, and inode
51053 + for the renamed file
51054 + if a file is being replaced by a rename, new_dentry has the inode
51055 + and name for the replaced file
51056 + */
51057 +
51058 + if (unlikely(!(gr_status & GR_READY)))
51059 + return;
51060 +
51061 + preempt_disable();
51062 + matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
51063 +
51064 + /* we wouldn't have to check d_inode if it weren't for
51065 + NFS silly-renaming
51066 + */
51067 +
51068 + write_lock(&gr_inode_lock);
51069 + if (unlikely(replace && inode)) {
51070 + ino_t new_ino = inode->i_ino;
51071 + dev_t new_dev = __get_dev(new_dentry);
51072 +
51073 + inodev = lookup_inodev_entry(new_ino, new_dev);
51074 + if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
51075 + do_handle_delete(inodev, new_ino, new_dev);
51076 + }
51077 +
51078 + inodev = lookup_inodev_entry(old_ino, old_dev);
51079 + if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
51080 + do_handle_delete(inodev, old_ino, old_dev);
51081 +
51082 + if (unlikely((unsigned long)matchn))
51083 + do_handle_create(matchn, old_dentry, mnt);
51084 +
51085 + write_unlock(&gr_inode_lock);
51086 + preempt_enable();
51087 +
51088 + return;
51089 +}
51090 +
51091 +static int
51092 +lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
51093 + unsigned char **sum)
51094 +{
51095 + struct acl_role_label *r;
51096 + struct role_allowed_ip *ipp;
51097 + struct role_transition *trans;
51098 + unsigned int i;
51099 + int found = 0;
51100 + u32 curr_ip = current->signal->curr_ip;
51101 +
51102 + current->signal->saved_ip = curr_ip;
51103 +
51104 + /* check transition table */
51105 +
51106 + for (trans = current->role->transitions; trans; trans = trans->next) {
51107 + if (!strcmp(rolename, trans->rolename)) {
51108 + found = 1;
51109 + break;
51110 + }
51111 + }
51112 +
51113 + if (!found)
51114 + return 0;
51115 +
51116 + /* handle special roles that do not require authentication
51117 + and check ip */
51118 +
51119 + FOR_EACH_ROLE_START(r)
51120 + if (!strcmp(rolename, r->rolename) &&
51121 + (r->roletype & GR_ROLE_SPECIAL)) {
51122 + found = 0;
51123 + if (r->allowed_ips != NULL) {
51124 + for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
51125 + if ((ntohl(curr_ip) & ipp->netmask) ==
51126 + (ntohl(ipp->addr) & ipp->netmask))
51127 + found = 1;
51128 + }
51129 + } else
51130 + found = 2;
51131 + if (!found)
51132 + return 0;
51133 +
51134 + if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
51135 + ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
51136 + *salt = NULL;
51137 + *sum = NULL;
51138 + return 1;
51139 + }
51140 + }
51141 + FOR_EACH_ROLE_END(r)
51142 +
51143 + for (i = 0; i < num_sprole_pws; i++) {
51144 + if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
51145 + *salt = acl_special_roles[i]->salt;
51146 + *sum = acl_special_roles[i]->sum;
51147 + return 1;
51148 + }
51149 + }
51150 +
51151 + return 0;
51152 +}
51153 +
51154 +static void
51155 +assign_special_role(char *rolename)
51156 +{
51157 + struct acl_object_label *obj;
51158 + struct acl_role_label *r;
51159 + struct acl_role_label *assigned = NULL;
51160 + struct task_struct *tsk;
51161 + struct file *filp;
51162 +
51163 + FOR_EACH_ROLE_START(r)
51164 + if (!strcmp(rolename, r->rolename) &&
51165 + (r->roletype & GR_ROLE_SPECIAL)) {
51166 + assigned = r;
51167 + break;
51168 + }
51169 + FOR_EACH_ROLE_END(r)
51170 +
51171 + if (!assigned)
51172 + return;
51173 +
51174 + read_lock(&tasklist_lock);
51175 + read_lock(&grsec_exec_file_lock);
51176 +
51177 + tsk = current->real_parent;
51178 + if (tsk == NULL)
51179 + goto out_unlock;
51180 +
51181 + filp = tsk->exec_file;
51182 + if (filp == NULL)
51183 + goto out_unlock;
51184 +
51185 + tsk->is_writable = 0;
51186 +
51187 + tsk->acl_sp_role = 1;
51188 + tsk->acl_role_id = ++acl_sp_role_value;
51189 + tsk->role = assigned;
51190 + tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
51191 +
51192 + /* ignore additional mmap checks for processes that are writable
51193 + by the default ACL */
51194 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
51195 + if (unlikely(obj->mode & GR_WRITE))
51196 + tsk->is_writable = 1;
51197 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
51198 + if (unlikely(obj->mode & GR_WRITE))
51199 + tsk->is_writable = 1;
51200 +
51201 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
51202 + printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
51203 +#endif
51204 +
51205 +out_unlock:
51206 + read_unlock(&grsec_exec_file_lock);
51207 + read_unlock(&tasklist_lock);
51208 + return;
51209 +}
51210 +
51211 +int gr_check_secure_terminal(struct task_struct *task)
51212 +{
51213 + struct task_struct *p, *p2, *p3;
51214 + struct files_struct *files;
51215 + struct fdtable *fdt;
51216 + struct file *our_file = NULL, *file;
51217 + int i;
51218 +
51219 + if (task->signal->tty == NULL)
51220 + return 1;
51221 +
51222 + files = get_files_struct(task);
51223 + if (files != NULL) {
51224 + rcu_read_lock();
51225 + fdt = files_fdtable(files);
51226 + for (i=0; i < fdt->max_fds; i++) {
51227 + file = fcheck_files(files, i);
51228 + if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
51229 + get_file(file);
51230 + our_file = file;
51231 + }
51232 + }
51233 + rcu_read_unlock();
51234 + put_files_struct(files);
51235 + }
51236 +
51237 + if (our_file == NULL)
51238 + return 1;
51239 +
51240 + read_lock(&tasklist_lock);
51241 + do_each_thread(p2, p) {
51242 + files = get_files_struct(p);
51243 + if (files == NULL ||
51244 + (p->signal && p->signal->tty == task->signal->tty)) {
51245 + if (files != NULL)
51246 + put_files_struct(files);
51247 + continue;
51248 + }
51249 + rcu_read_lock();
51250 + fdt = files_fdtable(files);
51251 + for (i=0; i < fdt->max_fds; i++) {
51252 + file = fcheck_files(files, i);
51253 + if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
51254 + file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
51255 + p3 = task;
51256 + while (p3->pid > 0) {
51257 + if (p3 == p)
51258 + break;
51259 + p3 = p3->real_parent;
51260 + }
51261 + if (p3 == p)
51262 + break;
51263 + gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
51264 + gr_handle_alertkill(p);
51265 + rcu_read_unlock();
51266 + put_files_struct(files);
51267 + read_unlock(&tasklist_lock);
51268 + fput(our_file);
51269 + return 0;
51270 + }
51271 + }
51272 + rcu_read_unlock();
51273 + put_files_struct(files);
51274 + } while_each_thread(p2, p);
51275 + read_unlock(&tasklist_lock);
51276 +
51277 + fput(our_file);
51278 + return 1;
51279 +}
51280 +
51281 +ssize_t
51282 +write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
51283 +{
51284 + struct gr_arg_wrapper uwrap;
51285 + unsigned char *sprole_salt = NULL;
51286 + unsigned char *sprole_sum = NULL;
51287 + int error = sizeof (struct gr_arg_wrapper);
51288 + int error2 = 0;
51289 +
51290 + mutex_lock(&gr_dev_mutex);
51291 +
51292 + if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
51293 + error = -EPERM;
51294 + goto out;
51295 + }
51296 +
51297 + if (count != sizeof (struct gr_arg_wrapper)) {
51298 + gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
51299 + error = -EINVAL;
51300 + goto out;
51301 + }
51302 +
51303 +
51304 + if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
51305 + gr_auth_expires = 0;
51306 + gr_auth_attempts = 0;
51307 + }
51308 +
51309 + if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
51310 + error = -EFAULT;
51311 + goto out;
51312 + }
51313 +
51314 + if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
51315 + error = -EINVAL;
51316 + goto out;
51317 + }
51318 +
51319 + if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
51320 + error = -EFAULT;
51321 + goto out;
51322 + }
51323 +
51324 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
51325 + gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
51326 + time_after(gr_auth_expires, get_seconds())) {
51327 + error = -EBUSY;
51328 + goto out;
51329 + }
51330 +
51331 + /* if non-root trying to do anything other than use a special role,
51332 + do not attempt authentication, do not count towards authentication
51333 + locking
51334 + */
51335 +
51336 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
51337 + gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
51338 + current_uid()) {
51339 + error = -EPERM;
51340 + goto out;
51341 + }
51342 +
51343 + /* ensure pw and special role name are null terminated */
51344 +
51345 + gr_usermode->pw[GR_PW_LEN - 1] = '\0';
51346 + gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
51347 +
51348 + /* Okay.
51349 + * We have our enough of the argument structure..(we have yet
51350 + * to copy_from_user the tables themselves) . Copy the tables
51351 + * only if we need them, i.e. for loading operations. */
51352 +
51353 + switch (gr_usermode->mode) {
51354 + case GR_STATUS:
51355 + if (gr_status & GR_READY) {
51356 + error = 1;
51357 + if (!gr_check_secure_terminal(current))
51358 + error = 3;
51359 + } else
51360 + error = 2;
51361 + goto out;
51362 + case GR_SHUTDOWN:
51363 + if ((gr_status & GR_READY)
51364 + && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
51365 + pax_open_kernel();
51366 + gr_status &= ~GR_READY;
51367 + pax_close_kernel();
51368 +
51369 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
51370 + free_variables();
51371 + memset(gr_usermode, 0, sizeof (struct gr_arg));
51372 + memset(gr_system_salt, 0, GR_SALT_LEN);
51373 + memset(gr_system_sum, 0, GR_SHA_LEN);
51374 + } else if (gr_status & GR_READY) {
51375 + gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
51376 + error = -EPERM;
51377 + } else {
51378 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
51379 + error = -EAGAIN;
51380 + }
51381 + break;
51382 + case GR_ENABLE:
51383 + if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
51384 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
51385 + else {
51386 + if (gr_status & GR_READY)
51387 + error = -EAGAIN;
51388 + else
51389 + error = error2;
51390 + gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
51391 + }
51392 + break;
51393 + case GR_RELOAD:
51394 + if (!(gr_status & GR_READY)) {
51395 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
51396 + error = -EAGAIN;
51397 + } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
51398 + preempt_disable();
51399 +
51400 + pax_open_kernel();
51401 + gr_status &= ~GR_READY;
51402 + pax_close_kernel();
51403 +
51404 + free_variables();
51405 + if (!(error2 = gracl_init(gr_usermode))) {
51406 + preempt_enable();
51407 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
51408 + } else {
51409 + preempt_enable();
51410 + error = error2;
51411 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
51412 + }
51413 + } else {
51414 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
51415 + error = -EPERM;
51416 + }
51417 + break;
51418 + case GR_SEGVMOD:
51419 + if (unlikely(!(gr_status & GR_READY))) {
51420 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
51421 + error = -EAGAIN;
51422 + break;
51423 + }
51424 +
51425 + if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
51426 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
51427 + if (gr_usermode->segv_device && gr_usermode->segv_inode) {
51428 + struct acl_subject_label *segvacl;
51429 + segvacl =
51430 + lookup_acl_subj_label(gr_usermode->segv_inode,
51431 + gr_usermode->segv_device,
51432 + current->role);
51433 + if (segvacl) {
51434 + segvacl->crashes = 0;
51435 + segvacl->expires = 0;
51436 + }
51437 + } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
51438 + gr_remove_uid(gr_usermode->segv_uid);
51439 + }
51440 + } else {
51441 + gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
51442 + error = -EPERM;
51443 + }
51444 + break;
51445 + case GR_SPROLE:
51446 + case GR_SPROLEPAM:
51447 + if (unlikely(!(gr_status & GR_READY))) {
51448 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
51449 + error = -EAGAIN;
51450 + break;
51451 + }
51452 +
51453 + if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
51454 + current->role->expires = 0;
51455 + current->role->auth_attempts = 0;
51456 + }
51457 +
51458 + if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
51459 + time_after(current->role->expires, get_seconds())) {
51460 + error = -EBUSY;
51461 + goto out;
51462 + }
51463 +
51464 + if (lookup_special_role_auth
51465 + (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
51466 + && ((!sprole_salt && !sprole_sum)
51467 + || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
51468 + char *p = "";
51469 + assign_special_role(gr_usermode->sp_role);
51470 + read_lock(&tasklist_lock);
51471 + if (current->real_parent)
51472 + p = current->real_parent->role->rolename;
51473 + read_unlock(&tasklist_lock);
51474 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
51475 + p, acl_sp_role_value);
51476 + } else {
51477 + gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
51478 + error = -EPERM;
51479 + if(!(current->role->auth_attempts++))
51480 + current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
51481 +
51482 + goto out;
51483 + }
51484 + break;
51485 + case GR_UNSPROLE:
51486 + if (unlikely(!(gr_status & GR_READY))) {
51487 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
51488 + error = -EAGAIN;
51489 + break;
51490 + }
51491 +
51492 + if (current->role->roletype & GR_ROLE_SPECIAL) {
51493 + char *p = "";
51494 + int i = 0;
51495 +
51496 + read_lock(&tasklist_lock);
51497 + if (current->real_parent) {
51498 + p = current->real_parent->role->rolename;
51499 + i = current->real_parent->acl_role_id;
51500 + }
51501 + read_unlock(&tasklist_lock);
51502 +
51503 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
51504 + gr_set_acls(1);
51505 + } else {
51506 + error = -EPERM;
51507 + goto out;
51508 + }
51509 + break;
51510 + default:
51511 + gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
51512 + error = -EINVAL;
51513 + break;
51514 + }
51515 +
51516 + if (error != -EPERM)
51517 + goto out;
51518 +
51519 + if(!(gr_auth_attempts++))
51520 + gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
51521 +
51522 + out:
51523 + mutex_unlock(&gr_dev_mutex);
51524 + return error;
51525 +}
51526 +
51527 +/* must be called with
51528 + rcu_read_lock();
51529 + read_lock(&tasklist_lock);
51530 + read_lock(&grsec_exec_file_lock);
51531 +*/
51532 +int gr_apply_subject_to_task(struct task_struct *task)
51533 +{
51534 + struct acl_object_label *obj;
51535 + char *tmpname;
51536 + struct acl_subject_label *tmpsubj;
51537 + struct file *filp;
51538 + struct name_entry *nmatch;
51539 +
51540 + filp = task->exec_file;
51541 + if (filp == NULL)
51542 + return 0;
51543 +
51544 + /* the following is to apply the correct subject
51545 + on binaries running when the RBAC system
51546 + is enabled, when the binaries have been
51547 + replaced or deleted since their execution
51548 + -----
51549 + when the RBAC system starts, the inode/dev
51550 + from exec_file will be one the RBAC system
51551 + is unaware of. It only knows the inode/dev
51552 + of the present file on disk, or the absence
51553 + of it.
51554 + */
51555 + preempt_disable();
51556 + tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
51557 +
51558 + nmatch = lookup_name_entry(tmpname);
51559 + preempt_enable();
51560 + tmpsubj = NULL;
51561 + if (nmatch) {
51562 + if (nmatch->deleted)
51563 + tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
51564 + else
51565 + tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
51566 + if (tmpsubj != NULL)
51567 + task->acl = tmpsubj;
51568 + }
51569 + if (tmpsubj == NULL)
51570 + task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
51571 + task->role);
51572 + if (task->acl) {
51573 + task->is_writable = 0;
51574 + /* ignore additional mmap checks for processes that are writable
51575 + by the default ACL */
51576 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
51577 + if (unlikely(obj->mode & GR_WRITE))
51578 + task->is_writable = 1;
51579 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
51580 + if (unlikely(obj->mode & GR_WRITE))
51581 + task->is_writable = 1;
51582 +
51583 + gr_set_proc_res(task);
51584 +
51585 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
51586 + printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
51587 +#endif
51588 + } else {
51589 + return 1;
51590 + }
51591 +
51592 + return 0;
51593 +}
51594 +
51595 +int
51596 +gr_set_acls(const int type)
51597 +{
51598 + struct task_struct *task, *task2;
51599 + struct acl_role_label *role = current->role;
51600 + __u16 acl_role_id = current->acl_role_id;
51601 + const struct cred *cred;
51602 + int ret;
51603 +
51604 + rcu_read_lock();
51605 + read_lock(&tasklist_lock);
51606 + read_lock(&grsec_exec_file_lock);
51607 + do_each_thread(task2, task) {
51608 + /* check to see if we're called from the exit handler,
51609 + if so, only replace ACLs that have inherited the admin
51610 + ACL */
51611 +
51612 + if (type && (task->role != role ||
51613 + task->acl_role_id != acl_role_id))
51614 + continue;
51615 +
51616 + task->acl_role_id = 0;
51617 + task->acl_sp_role = 0;
51618 +
51619 + if (task->exec_file) {
51620 + cred = __task_cred(task);
51621 + task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
51622 + ret = gr_apply_subject_to_task(task);
51623 + if (ret) {
51624 + read_unlock(&grsec_exec_file_lock);
51625 + read_unlock(&tasklist_lock);
51626 + rcu_read_unlock();
51627 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
51628 + return ret;
51629 + }
51630 + } else {
51631 + // it's a kernel process
51632 + task->role = kernel_role;
51633 + task->acl = kernel_role->root_label;
51634 +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
51635 + task->acl->mode &= ~GR_PROCFIND;
51636 +#endif
51637 + }
51638 + } while_each_thread(task2, task);
51639 + read_unlock(&grsec_exec_file_lock);
51640 + read_unlock(&tasklist_lock);
51641 + rcu_read_unlock();
51642 +
51643 + return 0;
51644 +}
51645 +
51646 +void
51647 +gr_learn_resource(const struct task_struct *task,
51648 + const int res, const unsigned long wanted, const int gt)
51649 +{
51650 + struct acl_subject_label *acl;
51651 + const struct cred *cred;
51652 +
51653 + if (unlikely((gr_status & GR_READY) &&
51654 + task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
51655 + goto skip_reslog;
51656 +
51657 +#ifdef CONFIG_GRKERNSEC_RESLOG
51658 + gr_log_resource(task, res, wanted, gt);
51659 +#endif
51660 + skip_reslog:
51661 +
51662 + if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
51663 + return;
51664 +
51665 + acl = task->acl;
51666 +
51667 + if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
51668 + !(acl->resmask & (1 << (unsigned short) res))))
51669 + return;
51670 +
51671 + if (wanted >= acl->res[res].rlim_cur) {
51672 + unsigned long res_add;
51673 +
51674 + res_add = wanted;
51675 + switch (res) {
51676 + case RLIMIT_CPU:
51677 + res_add += GR_RLIM_CPU_BUMP;
51678 + break;
51679 + case RLIMIT_FSIZE:
51680 + res_add += GR_RLIM_FSIZE_BUMP;
51681 + break;
51682 + case RLIMIT_DATA:
51683 + res_add += GR_RLIM_DATA_BUMP;
51684 + break;
51685 + case RLIMIT_STACK:
51686 + res_add += GR_RLIM_STACK_BUMP;
51687 + break;
51688 + case RLIMIT_CORE:
51689 + res_add += GR_RLIM_CORE_BUMP;
51690 + break;
51691 + case RLIMIT_RSS:
51692 + res_add += GR_RLIM_RSS_BUMP;
51693 + break;
51694 + case RLIMIT_NPROC:
51695 + res_add += GR_RLIM_NPROC_BUMP;
51696 + break;
51697 + case RLIMIT_NOFILE:
51698 + res_add += GR_RLIM_NOFILE_BUMP;
51699 + break;
51700 + case RLIMIT_MEMLOCK:
51701 + res_add += GR_RLIM_MEMLOCK_BUMP;
51702 + break;
51703 + case RLIMIT_AS:
51704 + res_add += GR_RLIM_AS_BUMP;
51705 + break;
51706 + case RLIMIT_LOCKS:
51707 + res_add += GR_RLIM_LOCKS_BUMP;
51708 + break;
51709 + case RLIMIT_SIGPENDING:
51710 + res_add += GR_RLIM_SIGPENDING_BUMP;
51711 + break;
51712 + case RLIMIT_MSGQUEUE:
51713 + res_add += GR_RLIM_MSGQUEUE_BUMP;
51714 + break;
51715 + case RLIMIT_NICE:
51716 + res_add += GR_RLIM_NICE_BUMP;
51717 + break;
51718 + case RLIMIT_RTPRIO:
51719 + res_add += GR_RLIM_RTPRIO_BUMP;
51720 + break;
51721 + case RLIMIT_RTTIME:
51722 + res_add += GR_RLIM_RTTIME_BUMP;
51723 + break;
51724 + }
51725 +
51726 + acl->res[res].rlim_cur = res_add;
51727 +
51728 + if (wanted > acl->res[res].rlim_max)
51729 + acl->res[res].rlim_max = res_add;
51730 +
51731 + /* only log the subject filename, since resource logging is supported for
51732 + single-subject learning only */
51733 + rcu_read_lock();
51734 + cred = __task_cred(task);
51735 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
51736 + task->role->roletype, cred->uid, cred->gid, acl->filename,
51737 + acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
51738 + "", (unsigned long) res, &task->signal->saved_ip);
51739 + rcu_read_unlock();
51740 + }
51741 +
51742 + return;
51743 +}
51744 +
51745 +#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
51746 +void
51747 +pax_set_initial_flags(struct linux_binprm *bprm)
51748 +{
51749 + struct task_struct *task = current;
51750 + struct acl_subject_label *proc;
51751 + unsigned long flags;
51752 +
51753 + if (unlikely(!(gr_status & GR_READY)))
51754 + return;
51755 +
51756 + flags = pax_get_flags(task);
51757 +
51758 + proc = task->acl;
51759 +
51760 + if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
51761 + flags &= ~MF_PAX_PAGEEXEC;
51762 + if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
51763 + flags &= ~MF_PAX_SEGMEXEC;
51764 + if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
51765 + flags &= ~MF_PAX_RANDMMAP;
51766 + if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
51767 + flags &= ~MF_PAX_EMUTRAMP;
51768 + if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
51769 + flags &= ~MF_PAX_MPROTECT;
51770 +
51771 + if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
51772 + flags |= MF_PAX_PAGEEXEC;
51773 + if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
51774 + flags |= MF_PAX_SEGMEXEC;
51775 + if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
51776 + flags |= MF_PAX_RANDMMAP;
51777 + if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
51778 + flags |= MF_PAX_EMUTRAMP;
51779 + if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
51780 + flags |= MF_PAX_MPROTECT;
51781 +
51782 + pax_set_flags(task, flags);
51783 +
51784 + return;
51785 +}
51786 +#endif
51787 +
51788 +#ifdef CONFIG_SYSCTL
51789 +/* Eric Biederman likes breaking userland ABI and every inode-based security
51790 + system to save 35kb of memory */
51791 +
51792 +/* we modify the passed in filename, but adjust it back before returning */
51793 +static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
51794 +{
51795 + struct name_entry *nmatch;
51796 + char *p, *lastp = NULL;
51797 + struct acl_object_label *obj = NULL, *tmp;
51798 + struct acl_subject_label *tmpsubj;
51799 + char c = '\0';
51800 +
51801 + read_lock(&gr_inode_lock);
51802 +
51803 + p = name + len - 1;
51804 + do {
51805 + nmatch = lookup_name_entry(name);
51806 + if (lastp != NULL)
51807 + *lastp = c;
51808 +
51809 + if (nmatch == NULL)
51810 + goto next_component;
51811 + tmpsubj = current->acl;
51812 + do {
51813 + obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
51814 + if (obj != NULL) {
51815 + tmp = obj->globbed;
51816 + while (tmp) {
51817 + if (!glob_match(tmp->filename, name)) {
51818 + obj = tmp;
51819 + goto found_obj;
51820 + }
51821 + tmp = tmp->next;
51822 + }
51823 + goto found_obj;
51824 + }
51825 + } while ((tmpsubj = tmpsubj->parent_subject));
51826 +next_component:
51827 + /* end case */
51828 + if (p == name)
51829 + break;
51830 +
51831 + while (*p != '/')
51832 + p--;
51833 + if (p == name)
51834 + lastp = p + 1;
51835 + else {
51836 + lastp = p;
51837 + p--;
51838 + }
51839 + c = *lastp;
51840 + *lastp = '\0';
51841 + } while (1);
51842 +found_obj:
51843 + read_unlock(&gr_inode_lock);
51844 + /* obj returned will always be non-null */
51845 + return obj;
51846 +}
51847 +
51848 +/* returns 0 when allowing, non-zero on error
51849 + op of 0 is used for readdir, so we don't log the names of hidden files
51850 +*/
51851 +__u32
51852 +gr_handle_sysctl(const struct ctl_table *table, const int op)
51853 +{
51854 + struct ctl_table *tmp;
51855 + const char *proc_sys = "/proc/sys";
51856 + char *path;
51857 + struct acl_object_label *obj;
51858 + unsigned short len = 0, pos = 0, depth = 0, i;
51859 + __u32 err = 0;
51860 + __u32 mode = 0;
51861 +
51862 + if (unlikely(!(gr_status & GR_READY)))
51863 + return 0;
51864 +
51865 + /* for now, ignore operations on non-sysctl entries if it's not a
51866 + readdir*/
51867 + if (table->child != NULL && op != 0)
51868 + return 0;
51869 +
51870 + mode |= GR_FIND;
51871 + /* it's only a read if it's an entry, read on dirs is for readdir */
51872 + if (op & MAY_READ)
51873 + mode |= GR_READ;
51874 + if (op & MAY_WRITE)
51875 + mode |= GR_WRITE;
51876 +
51877 + preempt_disable();
51878 +
51879 + path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
51880 +
51881 + /* it's only a read/write if it's an actual entry, not a dir
51882 + (which are opened for readdir)
51883 + */
51884 +
51885 + /* convert the requested sysctl entry into a pathname */
51886 +
51887 + for (tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
51888 + len += strlen(tmp->procname);
51889 + len++;
51890 + depth++;
51891 + }
51892 +
51893 + if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
51894 + /* deny */
51895 + goto out;
51896 + }
51897 +
51898 + memset(path, 0, PAGE_SIZE);
51899 +
51900 + memcpy(path, proc_sys, strlen(proc_sys));
51901 +
51902 + pos += strlen(proc_sys);
51903 +
51904 + for (; depth > 0; depth--) {
51905 + path[pos] = '/';
51906 + pos++;
51907 + for (i = 1, tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
51908 + if (depth == i) {
51909 + memcpy(path + pos, tmp->procname,
51910 + strlen(tmp->procname));
51911 + pos += strlen(tmp->procname);
51912 + }
51913 + i++;
51914 + }
51915 + }
51916 +
51917 + obj = gr_lookup_by_name(path, pos);
51918 + err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
51919 +
51920 + if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
51921 + ((err & mode) != mode))) {
51922 + __u32 new_mode = mode;
51923 +
51924 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
51925 +
51926 + err = 0;
51927 + gr_log_learn_sysctl(path, new_mode);
51928 + } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
51929 + gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
51930 + err = -ENOENT;
51931 + } else if (!(err & GR_FIND)) {
51932 + err = -ENOENT;
51933 + } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
51934 + gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
51935 + path, (mode & GR_READ) ? " reading" : "",
51936 + (mode & GR_WRITE) ? " writing" : "");
51937 + err = -EACCES;
51938 + } else if ((err & mode) != mode) {
51939 + err = -EACCES;
51940 + } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
51941 + gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
51942 + path, (mode & GR_READ) ? " reading" : "",
51943 + (mode & GR_WRITE) ? " writing" : "");
51944 + err = 0;
51945 + } else
51946 + err = 0;
51947 +
51948 + out:
51949 + preempt_enable();
51950 +
51951 + return err;
51952 +}
51953 +#endif
51954 +
51955 +int
51956 +gr_handle_proc_ptrace(struct task_struct *task)
51957 +{
51958 + struct file *filp;
51959 + struct task_struct *tmp = task;
51960 + struct task_struct *curtemp = current;
51961 + __u32 retmode;
51962 +
51963 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
51964 + if (unlikely(!(gr_status & GR_READY)))
51965 + return 0;
51966 +#endif
51967 +
51968 + read_lock(&tasklist_lock);
51969 + read_lock(&grsec_exec_file_lock);
51970 + filp = task->exec_file;
51971 +
51972 + while (tmp->pid > 0) {
51973 + if (tmp == curtemp)
51974 + break;
51975 + tmp = tmp->real_parent;
51976 + }
51977 +
51978 + if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
51979 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
51980 + read_unlock(&grsec_exec_file_lock);
51981 + read_unlock(&tasklist_lock);
51982 + return 1;
51983 + }
51984 +
51985 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
51986 + if (!(gr_status & GR_READY)) {
51987 + read_unlock(&grsec_exec_file_lock);
51988 + read_unlock(&tasklist_lock);
51989 + return 0;
51990 + }
51991 +#endif
51992 +
51993 + retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
51994 + read_unlock(&grsec_exec_file_lock);
51995 + read_unlock(&tasklist_lock);
51996 +
51997 + if (retmode & GR_NOPTRACE)
51998 + return 1;
51999 +
52000 + if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
52001 + && (current->acl != task->acl || (current->acl != current->role->root_label
52002 + && current->pid != task->pid)))
52003 + return 1;
52004 +
52005 + return 0;
52006 +}
52007 +
52008 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
52009 +{
52010 + if (unlikely(!(gr_status & GR_READY)))
52011 + return;
52012 +
52013 + if (!(current->role->roletype & GR_ROLE_GOD))
52014 + return;
52015 +
52016 + seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
52017 + p->role->rolename, gr_task_roletype_to_char(p),
52018 + p->acl->filename);
52019 +}
52020 +
52021 +int
52022 +gr_handle_ptrace(struct task_struct *task, const long request)
52023 +{
52024 + struct task_struct *tmp = task;
52025 + struct task_struct *curtemp = current;
52026 + __u32 retmode;
52027 +
52028 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
52029 + if (unlikely(!(gr_status & GR_READY)))
52030 + return 0;
52031 +#endif
52032 +
52033 + read_lock(&tasklist_lock);
52034 + while (tmp->pid > 0) {
52035 + if (tmp == curtemp)
52036 + break;
52037 + tmp = tmp->real_parent;
52038 + }
52039 +
52040 + if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
52041 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
52042 + read_unlock(&tasklist_lock);
52043 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
52044 + return 1;
52045 + }
52046 + read_unlock(&tasklist_lock);
52047 +
52048 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
52049 + if (!(gr_status & GR_READY))
52050 + return 0;
52051 +#endif
52052 +
52053 + read_lock(&grsec_exec_file_lock);
52054 + if (unlikely(!task->exec_file)) {
52055 + read_unlock(&grsec_exec_file_lock);
52056 + return 0;
52057 + }
52058 +
52059 + retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
52060 + read_unlock(&grsec_exec_file_lock);
52061 +
52062 + if (retmode & GR_NOPTRACE) {
52063 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
52064 + return 1;
52065 + }
52066 +
52067 + if (retmode & GR_PTRACERD) {
52068 + switch (request) {
52069 + case PTRACE_SEIZE:
52070 + case PTRACE_POKETEXT:
52071 + case PTRACE_POKEDATA:
52072 + case PTRACE_POKEUSR:
52073 +#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
52074 + case PTRACE_SETREGS:
52075 + case PTRACE_SETFPREGS:
52076 +#endif
52077 +#ifdef CONFIG_X86
52078 + case PTRACE_SETFPXREGS:
52079 +#endif
52080 +#ifdef CONFIG_ALTIVEC
52081 + case PTRACE_SETVRREGS:
52082 +#endif
52083 + return 1;
52084 + default:
52085 + return 0;
52086 + }
52087 + } else if (!(current->acl->mode & GR_POVERRIDE) &&
52088 + !(current->role->roletype & GR_ROLE_GOD) &&
52089 + (current->acl != task->acl)) {
52090 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
52091 + return 1;
52092 + }
52093 +
52094 + return 0;
52095 +}
52096 +
52097 +static int is_writable_mmap(const struct file *filp)
52098 +{
52099 + struct task_struct *task = current;
52100 + struct acl_object_label *obj, *obj2;
52101 +
52102 + if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
52103 + !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
52104 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
52105 + obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
52106 + task->role->root_label);
52107 + if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
52108 + gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
52109 + return 1;
52110 + }
52111 + }
52112 + return 0;
52113 +}
52114 +
52115 +int
52116 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
52117 +{
52118 + __u32 mode;
52119 +
52120 + if (unlikely(!file || !(prot & PROT_EXEC)))
52121 + return 1;
52122 +
52123 + if (is_writable_mmap(file))
52124 + return 0;
52125 +
52126 + mode =
52127 + gr_search_file(file->f_path.dentry,
52128 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
52129 + file->f_path.mnt);
52130 +
52131 + if (!gr_tpe_allow(file))
52132 + return 0;
52133 +
52134 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
52135 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
52136 + return 0;
52137 + } else if (unlikely(!(mode & GR_EXEC))) {
52138 + return 0;
52139 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
52140 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
52141 + return 1;
52142 + }
52143 +
52144 + return 1;
52145 +}
52146 +
52147 +int
52148 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
52149 +{
52150 + __u32 mode;
52151 +
52152 + if (unlikely(!file || !(prot & PROT_EXEC)))
52153 + return 1;
52154 +
52155 + if (is_writable_mmap(file))
52156 + return 0;
52157 +
52158 + mode =
52159 + gr_search_file(file->f_path.dentry,
52160 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
52161 + file->f_path.mnt);
52162 +
52163 + if (!gr_tpe_allow(file))
52164 + return 0;
52165 +
52166 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
52167 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
52168 + return 0;
52169 + } else if (unlikely(!(mode & GR_EXEC))) {
52170 + return 0;
52171 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
52172 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
52173 + return 1;
52174 + }
52175 +
52176 + return 1;
52177 +}
52178 +
52179 +void
52180 +gr_acl_handle_psacct(struct task_struct *task, const long code)
52181 +{
52182 + unsigned long runtime;
52183 + unsigned long cputime;
52184 + unsigned int wday, cday;
52185 + __u8 whr, chr;
52186 + __u8 wmin, cmin;
52187 + __u8 wsec, csec;
52188 + struct timespec timeval;
52189 +
52190 + if (unlikely(!(gr_status & GR_READY) || !task->acl ||
52191 + !(task->acl->mode & GR_PROCACCT)))
52192 + return;
52193 +
52194 + do_posix_clock_monotonic_gettime(&timeval);
52195 + runtime = timeval.tv_sec - task->start_time.tv_sec;
52196 + wday = runtime / (3600 * 24);
52197 + runtime -= wday * (3600 * 24);
52198 + whr = runtime / 3600;
52199 + runtime -= whr * 3600;
52200 + wmin = runtime / 60;
52201 + runtime -= wmin * 60;
52202 + wsec = runtime;
52203 +
52204 + cputime = (task->utime + task->stime) / HZ;
52205 + cday = cputime / (3600 * 24);
52206 + cputime -= cday * (3600 * 24);
52207 + chr = cputime / 3600;
52208 + cputime -= chr * 3600;
52209 + cmin = cputime / 60;
52210 + cputime -= cmin * 60;
52211 + csec = cputime;
52212 +
52213 + gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
52214 +
52215 + return;
52216 +}
52217 +
52218 +void gr_set_kernel_label(struct task_struct *task)
52219 +{
52220 + if (gr_status & GR_READY) {
52221 + task->role = kernel_role;
52222 + task->acl = kernel_role->root_label;
52223 + }
52224 + return;
52225 +}
52226 +
52227 +#ifdef CONFIG_TASKSTATS
52228 +int gr_is_taskstats_denied(int pid)
52229 +{
52230 + struct task_struct *task;
52231 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52232 + const struct cred *cred;
52233 +#endif
52234 + int ret = 0;
52235 +
52236 + /* restrict taskstats viewing to un-chrooted root users
52237 + who have the 'view' subject flag if the RBAC system is enabled
52238 + */
52239 +
52240 + rcu_read_lock();
52241 + read_lock(&tasklist_lock);
52242 + task = find_task_by_vpid(pid);
52243 + if (task) {
52244 +#ifdef CONFIG_GRKERNSEC_CHROOT
52245 + if (proc_is_chrooted(task))
52246 + ret = -EACCES;
52247 +#endif
52248 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52249 + cred = __task_cred(task);
52250 +#ifdef CONFIG_GRKERNSEC_PROC_USER
52251 + if (cred->uid != 0)
52252 + ret = -EACCES;
52253 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52254 + if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
52255 + ret = -EACCES;
52256 +#endif
52257 +#endif
52258 + if (gr_status & GR_READY) {
52259 + if (!(task->acl->mode & GR_VIEW))
52260 + ret = -EACCES;
52261 + }
52262 + } else
52263 + ret = -ENOENT;
52264 +
52265 + read_unlock(&tasklist_lock);
52266 + rcu_read_unlock();
52267 +
52268 + return ret;
52269 +}
52270 +#endif
52271 +
52272 +/* AUXV entries are filled via a descendant of search_binary_handler
52273 + after we've already applied the subject for the target
52274 +*/
52275 +int gr_acl_enable_at_secure(void)
52276 +{
52277 + if (unlikely(!(gr_status & GR_READY)))
52278 + return 0;
52279 +
52280 + if (current->acl->mode & GR_ATSECURE)
52281 + return 1;
52282 +
52283 + return 0;
52284 +}
52285 +
52286 +int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
52287 +{
52288 + struct task_struct *task = current;
52289 + struct dentry *dentry = file->f_path.dentry;
52290 + struct vfsmount *mnt = file->f_path.mnt;
52291 + struct acl_object_label *obj, *tmp;
52292 + struct acl_subject_label *subj;
52293 + unsigned int bufsize;
52294 + int is_not_root;
52295 + char *path;
52296 + dev_t dev = __get_dev(dentry);
52297 +
52298 + if (unlikely(!(gr_status & GR_READY)))
52299 + return 1;
52300 +
52301 + if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
52302 + return 1;
52303 +
52304 + /* ignore Eric Biederman */
52305 + if (IS_PRIVATE(dentry->d_inode))
52306 + return 1;
52307 +
52308 + subj = task->acl;
52309 + do {
52310 + obj = lookup_acl_obj_label(ino, dev, subj);
52311 + if (obj != NULL)
52312 + return (obj->mode & GR_FIND) ? 1 : 0;
52313 + } while ((subj = subj->parent_subject));
52314 +
52315 + /* this is purely an optimization since we're looking for an object
52316 + for the directory we're doing a readdir on
52317 + if it's possible for any globbed object to match the entry we're
52318 + filling into the directory, then the object we find here will be
52319 + an anchor point with attached globbed objects
52320 + */
52321 + obj = chk_obj_label_noglob(dentry, mnt, task->acl);
52322 + if (obj->globbed == NULL)
52323 + return (obj->mode & GR_FIND) ? 1 : 0;
52324 +
52325 + is_not_root = ((obj->filename[0] == '/') &&
52326 + (obj->filename[1] == '\0')) ? 0 : 1;
52327 + bufsize = PAGE_SIZE - namelen - is_not_root;
52328 +
52329 + /* check bufsize > PAGE_SIZE || bufsize == 0 */
52330 + if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
52331 + return 1;
52332 +
52333 + preempt_disable();
52334 + path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
52335 + bufsize);
52336 +
52337 + bufsize = strlen(path);
52338 +
52339 + /* if base is "/", don't append an additional slash */
52340 + if (is_not_root)
52341 + *(path + bufsize) = '/';
52342 + memcpy(path + bufsize + is_not_root, name, namelen);
52343 + *(path + bufsize + namelen + is_not_root) = '\0';
52344 +
52345 + tmp = obj->globbed;
52346 + while (tmp) {
52347 + if (!glob_match(tmp->filename, path)) {
52348 + preempt_enable();
52349 + return (tmp->mode & GR_FIND) ? 1 : 0;
52350 + }
52351 + tmp = tmp->next;
52352 + }
52353 + preempt_enable();
52354 + return (obj->mode & GR_FIND) ? 1 : 0;
52355 +}
52356 +
52357 +#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
52358 +EXPORT_SYMBOL(gr_acl_is_enabled);
52359 +#endif
52360 +EXPORT_SYMBOL(gr_learn_resource);
52361 +EXPORT_SYMBOL(gr_set_kernel_label);
52362 +#ifdef CONFIG_SECURITY
52363 +EXPORT_SYMBOL(gr_check_user_change);
52364 +EXPORT_SYMBOL(gr_check_group_change);
52365 +#endif
52366 +
52367 diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
52368 new file mode 100644
52369 index 0000000..34fefda
52370 --- /dev/null
52371 +++ b/grsecurity/gracl_alloc.c
52372 @@ -0,0 +1,105 @@
52373 +#include <linux/kernel.h>
52374 +#include <linux/mm.h>
52375 +#include <linux/slab.h>
52376 +#include <linux/vmalloc.h>
52377 +#include <linux/gracl.h>
52378 +#include <linux/grsecurity.h>
52379 +
52380 +static unsigned long alloc_stack_next = 1;
52381 +static unsigned long alloc_stack_size = 1;
52382 +static void **alloc_stack;
52383 +
52384 +static __inline__ int
52385 +alloc_pop(void)
52386 +{
52387 + if (alloc_stack_next == 1)
52388 + return 0;
52389 +
52390 + kfree(alloc_stack[alloc_stack_next - 2]);
52391 +
52392 + alloc_stack_next--;
52393 +
52394 + return 1;
52395 +}
52396 +
52397 +static __inline__ int
52398 +alloc_push(void *buf)
52399 +{
52400 + if (alloc_stack_next >= alloc_stack_size)
52401 + return 1;
52402 +
52403 + alloc_stack[alloc_stack_next - 1] = buf;
52404 +
52405 + alloc_stack_next++;
52406 +
52407 + return 0;
52408 +}
52409 +
52410 +void *
52411 +acl_alloc(unsigned long len)
52412 +{
52413 + void *ret = NULL;
52414 +
52415 + if (!len || len > PAGE_SIZE)
52416 + goto out;
52417 +
52418 + ret = kmalloc(len, GFP_KERNEL);
52419 +
52420 + if (ret) {
52421 + if (alloc_push(ret)) {
52422 + kfree(ret);
52423 + ret = NULL;
52424 + }
52425 + }
52426 +
52427 +out:
52428 + return ret;
52429 +}
52430 +
52431 +void *
52432 +acl_alloc_num(unsigned long num, unsigned long len)
52433 +{
52434 + if (!len || (num > (PAGE_SIZE / len)))
52435 + return NULL;
52436 +
52437 + return acl_alloc(num * len);
52438 +}
52439 +
52440 +void
52441 +acl_free_all(void)
52442 +{
52443 + if (gr_acl_is_enabled() || !alloc_stack)
52444 + return;
52445 +
52446 + while (alloc_pop()) ;
52447 +
52448 + if (alloc_stack) {
52449 + if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
52450 + kfree(alloc_stack);
52451 + else
52452 + vfree(alloc_stack);
52453 + }
52454 +
52455 + alloc_stack = NULL;
52456 + alloc_stack_size = 1;
52457 + alloc_stack_next = 1;
52458 +
52459 + return;
52460 +}
52461 +
52462 +int
52463 +acl_alloc_stack_init(unsigned long size)
52464 +{
52465 + if ((size * sizeof (void *)) <= PAGE_SIZE)
52466 + alloc_stack =
52467 + (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
52468 + else
52469 + alloc_stack = (void **) vmalloc(size * sizeof (void *));
52470 +
52471 + alloc_stack_size = size;
52472 +
52473 + if (!alloc_stack)
52474 + return 0;
52475 + else
52476 + return 1;
52477 +}
52478 diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
52479 new file mode 100644
52480 index 0000000..955ddfb
52481 --- /dev/null
52482 +++ b/grsecurity/gracl_cap.c
52483 @@ -0,0 +1,101 @@
52484 +#include <linux/kernel.h>
52485 +#include <linux/module.h>
52486 +#include <linux/sched.h>
52487 +#include <linux/gracl.h>
52488 +#include <linux/grsecurity.h>
52489 +#include <linux/grinternal.h>
52490 +
52491 +extern const char *captab_log[];
52492 +extern int captab_log_entries;
52493 +
52494 +int
52495 +gr_acl_is_capable(const int cap)
52496 +{
52497 + struct task_struct *task = current;
52498 + const struct cred *cred = current_cred();
52499 + struct acl_subject_label *curracl;
52500 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
52501 + kernel_cap_t cap_audit = __cap_empty_set;
52502 +
52503 + if (!gr_acl_is_enabled())
52504 + return 1;
52505 +
52506 + curracl = task->acl;
52507 +
52508 + cap_drop = curracl->cap_lower;
52509 + cap_mask = curracl->cap_mask;
52510 + cap_audit = curracl->cap_invert_audit;
52511 +
52512 + while ((curracl = curracl->parent_subject)) {
52513 + /* if the cap isn't specified in the current computed mask but is specified in the
52514 + current level subject, and is lowered in the current level subject, then add
52515 + it to the set of dropped capabilities
52516 + otherwise, add the current level subject's mask to the current computed mask
52517 + */
52518 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
52519 + cap_raise(cap_mask, cap);
52520 + if (cap_raised(curracl->cap_lower, cap))
52521 + cap_raise(cap_drop, cap);
52522 + if (cap_raised(curracl->cap_invert_audit, cap))
52523 + cap_raise(cap_audit, cap);
52524 + }
52525 + }
52526 +
52527 + if (!cap_raised(cap_drop, cap)) {
52528 + if (cap_raised(cap_audit, cap))
52529 + gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
52530 + return 1;
52531 + }
52532 +
52533 + curracl = task->acl;
52534 +
52535 + if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
52536 + && cap_raised(cred->cap_effective, cap)) {
52537 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
52538 + task->role->roletype, cred->uid,
52539 + cred->gid, task->exec_file ?
52540 + gr_to_filename(task->exec_file->f_path.dentry,
52541 + task->exec_file->f_path.mnt) : curracl->filename,
52542 + curracl->filename, 0UL,
52543 + 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
52544 + return 1;
52545 + }
52546 +
52547 + if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
52548 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
52549 + return 0;
52550 +}
52551 +
52552 +int
52553 +gr_acl_is_capable_nolog(const int cap)
52554 +{
52555 + struct acl_subject_label *curracl;
52556 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
52557 +
52558 + if (!gr_acl_is_enabled())
52559 + return 1;
52560 +
52561 + curracl = current->acl;
52562 +
52563 + cap_drop = curracl->cap_lower;
52564 + cap_mask = curracl->cap_mask;
52565 +
52566 + while ((curracl = curracl->parent_subject)) {
52567 + /* if the cap isn't specified in the current computed mask but is specified in the
52568 + current level subject, and is lowered in the current level subject, then add
52569 + it to the set of dropped capabilities
52570 + otherwise, add the current level subject's mask to the current computed mask
52571 + */
52572 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
52573 + cap_raise(cap_mask, cap);
52574 + if (cap_raised(curracl->cap_lower, cap))
52575 + cap_raise(cap_drop, cap);
52576 + }
52577 + }
52578 +
52579 + if (!cap_raised(cap_drop, cap))
52580 + return 1;
52581 +
52582 + return 0;
52583 +}
52584 +
52585 diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
52586 new file mode 100644
52587 index 0000000..88d0e87
52588 --- /dev/null
52589 +++ b/grsecurity/gracl_fs.c
52590 @@ -0,0 +1,435 @@
52591 +#include <linux/kernel.h>
52592 +#include <linux/sched.h>
52593 +#include <linux/types.h>
52594 +#include <linux/fs.h>
52595 +#include <linux/file.h>
52596 +#include <linux/stat.h>
52597 +#include <linux/grsecurity.h>
52598 +#include <linux/grinternal.h>
52599 +#include <linux/gracl.h>
52600 +
52601 +umode_t
52602 +gr_acl_umask(void)
52603 +{
52604 + if (unlikely(!gr_acl_is_enabled()))
52605 + return 0;
52606 +
52607 + return current->role->umask;
52608 +}
52609 +
52610 +__u32
52611 +gr_acl_handle_hidden_file(const struct dentry * dentry,
52612 + const struct vfsmount * mnt)
52613 +{
52614 + __u32 mode;
52615 +
52616 + if (unlikely(!dentry->d_inode))
52617 + return GR_FIND;
52618 +
52619 + mode =
52620 + gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
52621 +
52622 + if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
52623 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
52624 + return mode;
52625 + } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
52626 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
52627 + return 0;
52628 + } else if (unlikely(!(mode & GR_FIND)))
52629 + return 0;
52630 +
52631 + return GR_FIND;
52632 +}
52633 +
52634 +__u32
52635 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
52636 + int acc_mode)
52637 +{
52638 + __u32 reqmode = GR_FIND;
52639 + __u32 mode;
52640 +
52641 + if (unlikely(!dentry->d_inode))
52642 + return reqmode;
52643 +
52644 + if (acc_mode & MAY_APPEND)
52645 + reqmode |= GR_APPEND;
52646 + else if (acc_mode & MAY_WRITE)
52647 + reqmode |= GR_WRITE;
52648 + if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
52649 + reqmode |= GR_READ;
52650 +
52651 + mode =
52652 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
52653 + mnt);
52654 +
52655 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
52656 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
52657 + reqmode & GR_READ ? " reading" : "",
52658 + reqmode & GR_WRITE ? " writing" : reqmode &
52659 + GR_APPEND ? " appending" : "");
52660 + return reqmode;
52661 + } else
52662 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
52663 + {
52664 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
52665 + reqmode & GR_READ ? " reading" : "",
52666 + reqmode & GR_WRITE ? " writing" : reqmode &
52667 + GR_APPEND ? " appending" : "");
52668 + return 0;
52669 + } else if (unlikely((mode & reqmode) != reqmode))
52670 + return 0;
52671 +
52672 + return reqmode;
52673 +}
52674 +
52675 +__u32
52676 +gr_acl_handle_creat(const struct dentry * dentry,
52677 + const struct dentry * p_dentry,
52678 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
52679 + const int imode)
52680 +{
52681 + __u32 reqmode = GR_WRITE | GR_CREATE;
52682 + __u32 mode;
52683 +
52684 + if (acc_mode & MAY_APPEND)
52685 + reqmode |= GR_APPEND;
52686 + // if a directory was required or the directory already exists, then
52687 + // don't count this open as a read
52688 + if ((acc_mode & MAY_READ) &&
52689 + !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
52690 + reqmode |= GR_READ;
52691 + if ((open_flags & O_CREAT) && (imode & (S_ISUID | S_ISGID)))
52692 + reqmode |= GR_SETID;
52693 +
52694 + mode =
52695 + gr_check_create(dentry, p_dentry, p_mnt,
52696 + reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
52697 +
52698 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
52699 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
52700 + reqmode & GR_READ ? " reading" : "",
52701 + reqmode & GR_WRITE ? " writing" : reqmode &
52702 + GR_APPEND ? " appending" : "");
52703 + return reqmode;
52704 + } else
52705 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
52706 + {
52707 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
52708 + reqmode & GR_READ ? " reading" : "",
52709 + reqmode & GR_WRITE ? " writing" : reqmode &
52710 + GR_APPEND ? " appending" : "");
52711 + return 0;
52712 + } else if (unlikely((mode & reqmode) != reqmode))
52713 + return 0;
52714 +
52715 + return reqmode;
52716 +}
52717 +
52718 +__u32
52719 +gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
52720 + const int fmode)
52721 +{
52722 + __u32 mode, reqmode = GR_FIND;
52723 +
52724 + if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
52725 + reqmode |= GR_EXEC;
52726 + if (fmode & S_IWOTH)
52727 + reqmode |= GR_WRITE;
52728 + if (fmode & S_IROTH)
52729 + reqmode |= GR_READ;
52730 +
52731 + mode =
52732 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
52733 + mnt);
52734 +
52735 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
52736 + gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
52737 + reqmode & GR_READ ? " reading" : "",
52738 + reqmode & GR_WRITE ? " writing" : "",
52739 + reqmode & GR_EXEC ? " executing" : "");
52740 + return reqmode;
52741 + } else
52742 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
52743 + {
52744 + gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
52745 + reqmode & GR_READ ? " reading" : "",
52746 + reqmode & GR_WRITE ? " writing" : "",
52747 + reqmode & GR_EXEC ? " executing" : "");
52748 + return 0;
52749 + } else if (unlikely((mode & reqmode) != reqmode))
52750 + return 0;
52751 +
52752 + return reqmode;
52753 +}
52754 +
52755 +static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
52756 +{
52757 + __u32 mode;
52758 +
52759 + mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
52760 +
52761 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
52762 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
52763 + return mode;
52764 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
52765 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
52766 + return 0;
52767 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
52768 + return 0;
52769 +
52770 + return (reqmode);
52771 +}
52772 +
52773 +__u32
52774 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
52775 +{
52776 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
52777 +}
52778 +
52779 +__u32
52780 +gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
52781 +{
52782 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
52783 +}
52784 +
52785 +__u32
52786 +gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
52787 +{
52788 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
52789 +}
52790 +
52791 +__u32
52792 +gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
52793 +{
52794 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
52795 +}
52796 +
52797 +__u32
52798 +gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
52799 + umode_t *modeptr)
52800 +{
52801 + umode_t mode;
52802 +
52803 + *modeptr &= ~gr_acl_umask();
52804 + mode = *modeptr;
52805 +
52806 + if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
52807 + return 1;
52808 +
52809 + if (unlikely(mode & (S_ISUID | S_ISGID))) {
52810 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
52811 + GR_CHMOD_ACL_MSG);
52812 + } else {
52813 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
52814 + }
52815 +}
52816 +
52817 +__u32
52818 +gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
52819 +{
52820 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
52821 +}
52822 +
52823 +__u32
52824 +gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
52825 +{
52826 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
52827 +}
52828 +
52829 +__u32
52830 +gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
52831 +{
52832 + return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
52833 +}
52834 +
52835 +__u32
52836 +gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
52837 +{
52838 + return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
52839 + GR_UNIXCONNECT_ACL_MSG);
52840 +}
52841 +
52842 +/* hardlinks require at minimum create and link permission,
52843 + any additional privilege required is based on the
52844 + privilege of the file being linked to
52845 +*/
52846 +__u32
52847 +gr_acl_handle_link(const struct dentry * new_dentry,
52848 + const struct dentry * parent_dentry,
52849 + const struct vfsmount * parent_mnt,
52850 + const struct dentry * old_dentry,
52851 + const struct vfsmount * old_mnt, const char *to)
52852 +{
52853 + __u32 mode;
52854 + __u32 needmode = GR_CREATE | GR_LINK;
52855 + __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
52856 +
52857 + mode =
52858 + gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
52859 + old_mnt);
52860 +
52861 + if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
52862 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
52863 + return mode;
52864 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
52865 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
52866 + return 0;
52867 + } else if (unlikely((mode & needmode) != needmode))
52868 + return 0;
52869 +
52870 + return 1;
52871 +}
52872 +
52873 +__u32
52874 +gr_acl_handle_symlink(const struct dentry * new_dentry,
52875 + const struct dentry * parent_dentry,
52876 + const struct vfsmount * parent_mnt, const char *from)
52877 +{
52878 + __u32 needmode = GR_WRITE | GR_CREATE;
52879 + __u32 mode;
52880 +
52881 + mode =
52882 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
52883 + GR_CREATE | GR_AUDIT_CREATE |
52884 + GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
52885 +
52886 + if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
52887 + gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
52888 + return mode;
52889 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
52890 + gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
52891 + return 0;
52892 + } else if (unlikely((mode & needmode) != needmode))
52893 + return 0;
52894 +
52895 + return (GR_WRITE | GR_CREATE);
52896 +}
52897 +
52898 +static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
52899 +{
52900 + __u32 mode;
52901 +
52902 + mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
52903 +
52904 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
52905 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
52906 + return mode;
52907 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
52908 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
52909 + return 0;
52910 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
52911 + return 0;
52912 +
52913 + return (reqmode);
52914 +}
52915 +
52916 +__u32
52917 +gr_acl_handle_mknod(const struct dentry * new_dentry,
52918 + const struct dentry * parent_dentry,
52919 + const struct vfsmount * parent_mnt,
52920 + const int mode)
52921 +{
52922 + __u32 reqmode = GR_WRITE | GR_CREATE;
52923 + if (unlikely(mode & (S_ISUID | S_ISGID)))
52924 + reqmode |= GR_SETID;
52925 +
52926 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
52927 + reqmode, GR_MKNOD_ACL_MSG);
52928 +}
52929 +
52930 +__u32
52931 +gr_acl_handle_mkdir(const struct dentry *new_dentry,
52932 + const struct dentry *parent_dentry,
52933 + const struct vfsmount *parent_mnt)
52934 +{
52935 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
52936 + GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
52937 +}
52938 +
52939 +#define RENAME_CHECK_SUCCESS(old, new) \
52940 + (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
52941 + ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
52942 +
52943 +int
52944 +gr_acl_handle_rename(struct dentry *new_dentry,
52945 + struct dentry *parent_dentry,
52946 + const struct vfsmount *parent_mnt,
52947 + struct dentry *old_dentry,
52948 + struct inode *old_parent_inode,
52949 + struct vfsmount *old_mnt, const char *newname)
52950 +{
52951 + __u32 comp1, comp2;
52952 + int error = 0;
52953 +
52954 + if (unlikely(!gr_acl_is_enabled()))
52955 + return 0;
52956 +
52957 + if (!new_dentry->d_inode) {
52958 + comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
52959 + GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
52960 + GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
52961 + comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
52962 + GR_DELETE | GR_AUDIT_DELETE |
52963 + GR_AUDIT_READ | GR_AUDIT_WRITE |
52964 + GR_SUPPRESS, old_mnt);
52965 + } else {
52966 + comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
52967 + GR_CREATE | GR_DELETE |
52968 + GR_AUDIT_CREATE | GR_AUDIT_DELETE |
52969 + GR_AUDIT_READ | GR_AUDIT_WRITE |
52970 + GR_SUPPRESS, parent_mnt);
52971 + comp2 =
52972 + gr_search_file(old_dentry,
52973 + GR_READ | GR_WRITE | GR_AUDIT_READ |
52974 + GR_DELETE | GR_AUDIT_DELETE |
52975 + GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
52976 + }
52977 +
52978 + if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
52979 + ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
52980 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
52981 + else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
52982 + && !(comp2 & GR_SUPPRESS)) {
52983 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
52984 + error = -EACCES;
52985 + } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
52986 + error = -EACCES;
52987 +
52988 + return error;
52989 +}
52990 +
52991 +void
52992 +gr_acl_handle_exit(void)
52993 +{
52994 + u16 id;
52995 + char *rolename;
52996 + struct file *exec_file;
52997 +
52998 + if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
52999 + !(current->role->roletype & GR_ROLE_PERSIST))) {
53000 + id = current->acl_role_id;
53001 + rolename = current->role->rolename;
53002 + gr_set_acls(1);
53003 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
53004 + }
53005 +
53006 + write_lock(&grsec_exec_file_lock);
53007 + exec_file = current->exec_file;
53008 + current->exec_file = NULL;
53009 + write_unlock(&grsec_exec_file_lock);
53010 +
53011 + if (exec_file)
53012 + fput(exec_file);
53013 +}
53014 +
53015 +int
53016 +gr_acl_handle_procpidmem(const struct task_struct *task)
53017 +{
53018 + if (unlikely(!gr_acl_is_enabled()))
53019 + return 0;
53020 +
53021 + if (task != current && task->acl->mode & GR_PROTPROCFD)
53022 + return -EACCES;
53023 +
53024 + return 0;
53025 +}
53026 diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
53027 new file mode 100644
53028 index 0000000..17050ca
53029 --- /dev/null
53030 +++ b/grsecurity/gracl_ip.c
53031 @@ -0,0 +1,381 @@
53032 +#include <linux/kernel.h>
53033 +#include <asm/uaccess.h>
53034 +#include <asm/errno.h>
53035 +#include <net/sock.h>
53036 +#include <linux/file.h>
53037 +#include <linux/fs.h>
53038 +#include <linux/net.h>
53039 +#include <linux/in.h>
53040 +#include <linux/skbuff.h>
53041 +#include <linux/ip.h>
53042 +#include <linux/udp.h>
53043 +#include <linux/types.h>
53044 +#include <linux/sched.h>
53045 +#include <linux/netdevice.h>
53046 +#include <linux/inetdevice.h>
53047 +#include <linux/gracl.h>
53048 +#include <linux/grsecurity.h>
53049 +#include <linux/grinternal.h>
53050 +
53051 +#define GR_BIND 0x01
53052 +#define GR_CONNECT 0x02
53053 +#define GR_INVERT 0x04
53054 +#define GR_BINDOVERRIDE 0x08
53055 +#define GR_CONNECTOVERRIDE 0x10
53056 +#define GR_SOCK_FAMILY 0x20
53057 +
53058 +static const char * gr_protocols[IPPROTO_MAX] = {
53059 + "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
53060 + "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
53061 + "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
53062 + "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
53063 + "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
53064 + "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
53065 + "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
53066 + "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
53067 + "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
53068 + "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
53069 + "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
53070 + "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
53071 + "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
53072 + "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
53073 + "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
53074 + "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
53075 + "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
53076 + "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
53077 + "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
53078 + "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
53079 + "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
53080 + "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
53081 + "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
53082 + "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
53083 + "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
53084 + "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
53085 + "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
53086 + "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
53087 + "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
53088 + "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
53089 + "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
53090 + "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
53091 + };
53092 +
53093 +static const char * gr_socktypes[SOCK_MAX] = {
53094 + "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
53095 + "unknown:7", "unknown:8", "unknown:9", "packet"
53096 + };
53097 +
53098 +static const char * gr_sockfamilies[AF_MAX+1] = {
53099 + "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
53100 + "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
53101 + "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
53102 + "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
53103 + };
53104 +
53105 +const char *
53106 +gr_proto_to_name(unsigned char proto)
53107 +{
53108 + return gr_protocols[proto];
53109 +}
53110 +
53111 +const char *
53112 +gr_socktype_to_name(unsigned char type)
53113 +{
53114 + return gr_socktypes[type];
53115 +}
53116 +
53117 +const char *
53118 +gr_sockfamily_to_name(unsigned char family)
53119 +{
53120 + return gr_sockfamilies[family];
53121 +}
53122 +
53123 +int
53124 +gr_search_socket(const int domain, const int type, const int protocol)
53125 +{
53126 + struct acl_subject_label *curr;
53127 + const struct cred *cred = current_cred();
53128 +
53129 + if (unlikely(!gr_acl_is_enabled()))
53130 + goto exit;
53131 +
53132 + if ((domain < 0) || (type < 0) || (protocol < 0) ||
53133 + (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
53134 + goto exit; // let the kernel handle it
53135 +
53136 + curr = current->acl;
53137 +
53138 + if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
53139 + /* the family is allowed, if this is PF_INET allow it only if
53140 + the extra sock type/protocol checks pass */
53141 + if (domain == PF_INET)
53142 + goto inet_check;
53143 + goto exit;
53144 + } else {
53145 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
53146 + __u32 fakeip = 0;
53147 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
53148 + current->role->roletype, cred->uid,
53149 + cred->gid, current->exec_file ?
53150 + gr_to_filename(current->exec_file->f_path.dentry,
53151 + current->exec_file->f_path.mnt) :
53152 + curr->filename, curr->filename,
53153 + &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
53154 + &current->signal->saved_ip);
53155 + goto exit;
53156 + }
53157 + goto exit_fail;
53158 + }
53159 +
53160 +inet_check:
53161 + /* the rest of this checking is for IPv4 only */
53162 + if (!curr->ips)
53163 + goto exit;
53164 +
53165 + if ((curr->ip_type & (1 << type)) &&
53166 + (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
53167 + goto exit;
53168 +
53169 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
53170 + /* we don't place acls on raw sockets , and sometimes
53171 + dgram/ip sockets are opened for ioctl and not
53172 + bind/connect, so we'll fake a bind learn log */
53173 + if (type == SOCK_RAW || type == SOCK_PACKET) {
53174 + __u32 fakeip = 0;
53175 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
53176 + current->role->roletype, cred->uid,
53177 + cred->gid, current->exec_file ?
53178 + gr_to_filename(current->exec_file->f_path.dentry,
53179 + current->exec_file->f_path.mnt) :
53180 + curr->filename, curr->filename,
53181 + &fakeip, 0, type,
53182 + protocol, GR_CONNECT, &current->signal->saved_ip);
53183 + } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
53184 + __u32 fakeip = 0;
53185 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
53186 + current->role->roletype, cred->uid,
53187 + cred->gid, current->exec_file ?
53188 + gr_to_filename(current->exec_file->f_path.dentry,
53189 + current->exec_file->f_path.mnt) :
53190 + curr->filename, curr->filename,
53191 + &fakeip, 0, type,
53192 + protocol, GR_BIND, &current->signal->saved_ip);
53193 + }
53194 + /* we'll log when they use connect or bind */
53195 + goto exit;
53196 + }
53197 +
53198 +exit_fail:
53199 + if (domain == PF_INET)
53200 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
53201 + gr_socktype_to_name(type), gr_proto_to_name(protocol));
53202 + else
53203 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
53204 + gr_socktype_to_name(type), protocol);
53205 +
53206 + return 0;
53207 +exit:
53208 + return 1;
53209 +}
53210 +
53211 +int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
53212 +{
53213 + if ((ip->mode & mode) &&
53214 + (ip_port >= ip->low) &&
53215 + (ip_port <= ip->high) &&
53216 + ((ntohl(ip_addr) & our_netmask) ==
53217 + (ntohl(our_addr) & our_netmask))
53218 + && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
53219 + && (ip->type & (1 << type))) {
53220 + if (ip->mode & GR_INVERT)
53221 + return 2; // specifically denied
53222 + else
53223 + return 1; // allowed
53224 + }
53225 +
53226 + return 0; // not specifically allowed, may continue parsing
53227 +}
53228 +
53229 +static int
53230 +gr_search_connectbind(const int full_mode, struct sock *sk,
53231 + struct sockaddr_in *addr, const int type)
53232 +{
53233 + char iface[IFNAMSIZ] = {0};
53234 + struct acl_subject_label *curr;
53235 + struct acl_ip_label *ip;
53236 + struct inet_sock *isk;
53237 + struct net_device *dev;
53238 + struct in_device *idev;
53239 + unsigned long i;
53240 + int ret;
53241 + int mode = full_mode & (GR_BIND | GR_CONNECT);
53242 + __u32 ip_addr = 0;
53243 + __u32 our_addr;
53244 + __u32 our_netmask;
53245 + char *p;
53246 + __u16 ip_port = 0;
53247 + const struct cred *cred = current_cred();
53248 +
53249 + if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
53250 + return 0;
53251 +
53252 + curr = current->acl;
53253 + isk = inet_sk(sk);
53254 +
53255 + /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
53256 + if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
53257 + addr->sin_addr.s_addr = curr->inaddr_any_override;
53258 + if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
53259 + struct sockaddr_in saddr;
53260 + int err;
53261 +
53262 + saddr.sin_family = AF_INET;
53263 + saddr.sin_addr.s_addr = curr->inaddr_any_override;
53264 + saddr.sin_port = isk->inet_sport;
53265 +
53266 + err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
53267 + if (err)
53268 + return err;
53269 +
53270 + err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
53271 + if (err)
53272 + return err;
53273 + }
53274 +
53275 + if (!curr->ips)
53276 + return 0;
53277 +
53278 + ip_addr = addr->sin_addr.s_addr;
53279 + ip_port = ntohs(addr->sin_port);
53280 +
53281 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
53282 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
53283 + current->role->roletype, cred->uid,
53284 + cred->gid, current->exec_file ?
53285 + gr_to_filename(current->exec_file->f_path.dentry,
53286 + current->exec_file->f_path.mnt) :
53287 + curr->filename, curr->filename,
53288 + &ip_addr, ip_port, type,
53289 + sk->sk_protocol, mode, &current->signal->saved_ip);
53290 + return 0;
53291 + }
53292 +
53293 + for (i = 0; i < curr->ip_num; i++) {
53294 + ip = *(curr->ips + i);
53295 + if (ip->iface != NULL) {
53296 + strncpy(iface, ip->iface, IFNAMSIZ - 1);
53297 + p = strchr(iface, ':');
53298 + if (p != NULL)
53299 + *p = '\0';
53300 + dev = dev_get_by_name(sock_net(sk), iface);
53301 + if (dev == NULL)
53302 + continue;
53303 + idev = in_dev_get(dev);
53304 + if (idev == NULL) {
53305 + dev_put(dev);
53306 + continue;
53307 + }
53308 + rcu_read_lock();
53309 + for_ifa(idev) {
53310 + if (!strcmp(ip->iface, ifa->ifa_label)) {
53311 + our_addr = ifa->ifa_address;
53312 + our_netmask = 0xffffffff;
53313 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
53314 + if (ret == 1) {
53315 + rcu_read_unlock();
53316 + in_dev_put(idev);
53317 + dev_put(dev);
53318 + return 0;
53319 + } else if (ret == 2) {
53320 + rcu_read_unlock();
53321 + in_dev_put(idev);
53322 + dev_put(dev);
53323 + goto denied;
53324 + }
53325 + }
53326 + } endfor_ifa(idev);
53327 + rcu_read_unlock();
53328 + in_dev_put(idev);
53329 + dev_put(dev);
53330 + } else {
53331 + our_addr = ip->addr;
53332 + our_netmask = ip->netmask;
53333 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
53334 + if (ret == 1)
53335 + return 0;
53336 + else if (ret == 2)
53337 + goto denied;
53338 + }
53339 + }
53340 +
53341 +denied:
53342 + if (mode == GR_BIND)
53343 + gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
53344 + else if (mode == GR_CONNECT)
53345 + gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
53346 +
53347 + return -EACCES;
53348 +}
53349 +
53350 +int
53351 +gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
53352 +{
53353 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
53354 +}
53355 +
53356 +int
53357 +gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
53358 +{
53359 + return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
53360 +}
53361 +
53362 +int gr_search_listen(struct socket *sock)
53363 +{
53364 + struct sock *sk = sock->sk;
53365 + struct sockaddr_in addr;
53366 +
53367 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
53368 + addr.sin_port = inet_sk(sk)->inet_sport;
53369 +
53370 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
53371 +}
53372 +
53373 +int gr_search_accept(struct socket *sock)
53374 +{
53375 + struct sock *sk = sock->sk;
53376 + struct sockaddr_in addr;
53377 +
53378 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
53379 + addr.sin_port = inet_sk(sk)->inet_sport;
53380 +
53381 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
53382 +}
53383 +
53384 +int
53385 +gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
53386 +{
53387 + if (addr)
53388 + return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
53389 + else {
53390 + struct sockaddr_in sin;
53391 + const struct inet_sock *inet = inet_sk(sk);
53392 +
53393 + sin.sin_addr.s_addr = inet->inet_daddr;
53394 + sin.sin_port = inet->inet_dport;
53395 +
53396 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
53397 + }
53398 +}
53399 +
53400 +int
53401 +gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
53402 +{
53403 + struct sockaddr_in sin;
53404 +
53405 + if (unlikely(skb->len < sizeof (struct udphdr)))
53406 + return 0; // skip this packet
53407 +
53408 + sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
53409 + sin.sin_port = udp_hdr(skb)->source;
53410 +
53411 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
53412 +}
53413 diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
53414 new file mode 100644
53415 index 0000000..25f54ef
53416 --- /dev/null
53417 +++ b/grsecurity/gracl_learn.c
53418 @@ -0,0 +1,207 @@
53419 +#include <linux/kernel.h>
53420 +#include <linux/mm.h>
53421 +#include <linux/sched.h>
53422 +#include <linux/poll.h>
53423 +#include <linux/string.h>
53424 +#include <linux/file.h>
53425 +#include <linux/types.h>
53426 +#include <linux/vmalloc.h>
53427 +#include <linux/grinternal.h>
53428 +
53429 +extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
53430 + size_t count, loff_t *ppos);
53431 +extern int gr_acl_is_enabled(void);
53432 +
53433 +static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
53434 +static int gr_learn_attached;
53435 +
53436 +/* use a 512k buffer */
53437 +#define LEARN_BUFFER_SIZE (512 * 1024)
53438 +
53439 +static DEFINE_SPINLOCK(gr_learn_lock);
53440 +static DEFINE_MUTEX(gr_learn_user_mutex);
53441 +
53442 +/* we need to maintain two buffers, so that the kernel context of grlearn
53443 + uses a semaphore around the userspace copying, and the other kernel contexts
53444 + use a spinlock when copying into the buffer, since they cannot sleep
53445 +*/
53446 +static char *learn_buffer;
53447 +static char *learn_buffer_user;
53448 +static int learn_buffer_len;
53449 +static int learn_buffer_user_len;
53450 +
53451 +static ssize_t
53452 +read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
53453 +{
53454 + DECLARE_WAITQUEUE(wait, current);
53455 + ssize_t retval = 0;
53456 +
53457 + add_wait_queue(&learn_wait, &wait);
53458 + set_current_state(TASK_INTERRUPTIBLE);
53459 + do {
53460 + mutex_lock(&gr_learn_user_mutex);
53461 + spin_lock(&gr_learn_lock);
53462 + if (learn_buffer_len)
53463 + break;
53464 + spin_unlock(&gr_learn_lock);
53465 + mutex_unlock(&gr_learn_user_mutex);
53466 + if (file->f_flags & O_NONBLOCK) {
53467 + retval = -EAGAIN;
53468 + goto out;
53469 + }
53470 + if (signal_pending(current)) {
53471 + retval = -ERESTARTSYS;
53472 + goto out;
53473 + }
53474 +
53475 + schedule();
53476 + } while (1);
53477 +
53478 + memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
53479 + learn_buffer_user_len = learn_buffer_len;
53480 + retval = learn_buffer_len;
53481 + learn_buffer_len = 0;
53482 +
53483 + spin_unlock(&gr_learn_lock);
53484 +
53485 + if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
53486 + retval = -EFAULT;
53487 +
53488 + mutex_unlock(&gr_learn_user_mutex);
53489 +out:
53490 + set_current_state(TASK_RUNNING);
53491 + remove_wait_queue(&learn_wait, &wait);
53492 + return retval;
53493 +}
53494 +
53495 +static unsigned int
53496 +poll_learn(struct file * file, poll_table * wait)
53497 +{
53498 + poll_wait(file, &learn_wait, wait);
53499 +
53500 + if (learn_buffer_len)
53501 + return (POLLIN | POLLRDNORM);
53502 +
53503 + return 0;
53504 +}
53505 +
53506 +void
53507 +gr_clear_learn_entries(void)
53508 +{
53509 + char *tmp;
53510 +
53511 + mutex_lock(&gr_learn_user_mutex);
53512 + spin_lock(&gr_learn_lock);
53513 + tmp = learn_buffer;
53514 + learn_buffer = NULL;
53515 + spin_unlock(&gr_learn_lock);
53516 + if (tmp)
53517 + vfree(tmp);
53518 + if (learn_buffer_user != NULL) {
53519 + vfree(learn_buffer_user);
53520 + learn_buffer_user = NULL;
53521 + }
53522 + learn_buffer_len = 0;
53523 + mutex_unlock(&gr_learn_user_mutex);
53524 +
53525 + return;
53526 +}
53527 +
53528 +void
53529 +gr_add_learn_entry(const char *fmt, ...)
53530 +{
53531 + va_list args;
53532 + unsigned int len;
53533 +
53534 + if (!gr_learn_attached)
53535 + return;
53536 +
53537 + spin_lock(&gr_learn_lock);
53538 +
53539 + /* leave a gap at the end so we know when it's "full" but don't have to
53540 + compute the exact length of the string we're trying to append
53541 + */
53542 + if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
53543 + spin_unlock(&gr_learn_lock);
53544 + wake_up_interruptible(&learn_wait);
53545 + return;
53546 + }
53547 + if (learn_buffer == NULL) {
53548 + spin_unlock(&gr_learn_lock);
53549 + return;
53550 + }
53551 +
53552 + va_start(args, fmt);
53553 + len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
53554 + va_end(args);
53555 +
53556 + learn_buffer_len += len + 1;
53557 +
53558 + spin_unlock(&gr_learn_lock);
53559 + wake_up_interruptible(&learn_wait);
53560 +
53561 + return;
53562 +}
53563 +
53564 +static int
53565 +open_learn(struct inode *inode, struct file *file)
53566 +{
53567 + if (file->f_mode & FMODE_READ && gr_learn_attached)
53568 + return -EBUSY;
53569 + if (file->f_mode & FMODE_READ) {
53570 + int retval = 0;
53571 + mutex_lock(&gr_learn_user_mutex);
53572 + if (learn_buffer == NULL)
53573 + learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
53574 + if (learn_buffer_user == NULL)
53575 + learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
53576 + if (learn_buffer == NULL) {
53577 + retval = -ENOMEM;
53578 + goto out_error;
53579 + }
53580 + if (learn_buffer_user == NULL) {
53581 + retval = -ENOMEM;
53582 + goto out_error;
53583 + }
53584 + learn_buffer_len = 0;
53585 + learn_buffer_user_len = 0;
53586 + gr_learn_attached = 1;
53587 +out_error:
53588 + mutex_unlock(&gr_learn_user_mutex);
53589 + return retval;
53590 + }
53591 + return 0;
53592 +}
53593 +
53594 +static int
53595 +close_learn(struct inode *inode, struct file *file)
53596 +{
53597 + if (file->f_mode & FMODE_READ) {
53598 + char *tmp = NULL;
53599 + mutex_lock(&gr_learn_user_mutex);
53600 + spin_lock(&gr_learn_lock);
53601 + tmp = learn_buffer;
53602 + learn_buffer = NULL;
53603 + spin_unlock(&gr_learn_lock);
53604 + if (tmp)
53605 + vfree(tmp);
53606 + if (learn_buffer_user != NULL) {
53607 + vfree(learn_buffer_user);
53608 + learn_buffer_user = NULL;
53609 + }
53610 + learn_buffer_len = 0;
53611 + learn_buffer_user_len = 0;
53612 + gr_learn_attached = 0;
53613 + mutex_unlock(&gr_learn_user_mutex);
53614 + }
53615 +
53616 + return 0;
53617 +}
53618 +
53619 +const struct file_operations grsec_fops = {
53620 + .read = read_learn,
53621 + .write = write_grsec_handler,
53622 + .open = open_learn,
53623 + .release = close_learn,
53624 + .poll = poll_learn,
53625 +};
53626 diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
53627 new file mode 100644
53628 index 0000000..39645c9
53629 --- /dev/null
53630 +++ b/grsecurity/gracl_res.c
53631 @@ -0,0 +1,68 @@
53632 +#include <linux/kernel.h>
53633 +#include <linux/sched.h>
53634 +#include <linux/gracl.h>
53635 +#include <linux/grinternal.h>
53636 +
53637 +static const char *restab_log[] = {
53638 + [RLIMIT_CPU] = "RLIMIT_CPU",
53639 + [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
53640 + [RLIMIT_DATA] = "RLIMIT_DATA",
53641 + [RLIMIT_STACK] = "RLIMIT_STACK",
53642 + [RLIMIT_CORE] = "RLIMIT_CORE",
53643 + [RLIMIT_RSS] = "RLIMIT_RSS",
53644 + [RLIMIT_NPROC] = "RLIMIT_NPROC",
53645 + [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
53646 + [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
53647 + [RLIMIT_AS] = "RLIMIT_AS",
53648 + [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
53649 + [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
53650 + [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
53651 + [RLIMIT_NICE] = "RLIMIT_NICE",
53652 + [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
53653 + [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
53654 + [GR_CRASH_RES] = "RLIMIT_CRASH"
53655 +};
53656 +
53657 +void
53658 +gr_log_resource(const struct task_struct *task,
53659 + const int res, const unsigned long wanted, const int gt)
53660 +{
53661 + const struct cred *cred;
53662 + unsigned long rlim;
53663 +
53664 + if (!gr_acl_is_enabled() && !grsec_resource_logging)
53665 + return;
53666 +
53667 + // not yet supported resource
53668 + if (unlikely(!restab_log[res]))
53669 + return;
53670 +
53671 + if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
53672 + rlim = task_rlimit_max(task, res);
53673 + else
53674 + rlim = task_rlimit(task, res);
53675 +
53676 + if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
53677 + return;
53678 +
53679 + rcu_read_lock();
53680 + cred = __task_cred(task);
53681 +
53682 + if (res == RLIMIT_NPROC &&
53683 + (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
53684 + cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
53685 + goto out_rcu_unlock;
53686 + else if (res == RLIMIT_MEMLOCK &&
53687 + cap_raised(cred->cap_effective, CAP_IPC_LOCK))
53688 + goto out_rcu_unlock;
53689 + else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
53690 + goto out_rcu_unlock;
53691 + rcu_read_unlock();
53692 +
53693 + gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
53694 +
53695 + return;
53696 +out_rcu_unlock:
53697 + rcu_read_unlock();
53698 + return;
53699 +}
53700 diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
53701 new file mode 100644
53702 index 0000000..5556be3
53703 --- /dev/null
53704 +++ b/grsecurity/gracl_segv.c
53705 @@ -0,0 +1,299 @@
53706 +#include <linux/kernel.h>
53707 +#include <linux/mm.h>
53708 +#include <asm/uaccess.h>
53709 +#include <asm/errno.h>
53710 +#include <asm/mman.h>
53711 +#include <net/sock.h>
53712 +#include <linux/file.h>
53713 +#include <linux/fs.h>
53714 +#include <linux/net.h>
53715 +#include <linux/in.h>
53716 +#include <linux/slab.h>
53717 +#include <linux/types.h>
53718 +#include <linux/sched.h>
53719 +#include <linux/timer.h>
53720 +#include <linux/gracl.h>
53721 +#include <linux/grsecurity.h>
53722 +#include <linux/grinternal.h>
53723 +
53724 +static struct crash_uid *uid_set;
53725 +static unsigned short uid_used;
53726 +static DEFINE_SPINLOCK(gr_uid_lock);
53727 +extern rwlock_t gr_inode_lock;
53728 +extern struct acl_subject_label *
53729 + lookup_acl_subj_label(const ino_t inode, const dev_t dev,
53730 + struct acl_role_label *role);
53731 +
53732 +#ifdef CONFIG_BTRFS_FS
53733 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
53734 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
53735 +#endif
53736 +
53737 +static inline dev_t __get_dev(const struct dentry *dentry)
53738 +{
53739 +#ifdef CONFIG_BTRFS_FS
53740 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
53741 + return get_btrfs_dev_from_inode(dentry->d_inode);
53742 + else
53743 +#endif
53744 + return dentry->d_inode->i_sb->s_dev;
53745 +}
53746 +
53747 +int
53748 +gr_init_uidset(void)
53749 +{
53750 + uid_set =
53751 + kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
53752 + uid_used = 0;
53753 +
53754 + return uid_set ? 1 : 0;
53755 +}
53756 +
53757 +void
53758 +gr_free_uidset(void)
53759 +{
53760 + if (uid_set)
53761 + kfree(uid_set);
53762 +
53763 + return;
53764 +}
53765 +
53766 +int
53767 +gr_find_uid(const uid_t uid)
53768 +{
53769 + struct crash_uid *tmp = uid_set;
53770 + uid_t buid;
53771 + int low = 0, high = uid_used - 1, mid;
53772 +
53773 + while (high >= low) {
53774 + mid = (low + high) >> 1;
53775 + buid = tmp[mid].uid;
53776 + if (buid == uid)
53777 + return mid;
53778 + if (buid > uid)
53779 + high = mid - 1;
53780 + if (buid < uid)
53781 + low = mid + 1;
53782 + }
53783 +
53784 + return -1;
53785 +}
53786 +
53787 +static __inline__ void
53788 +gr_insertsort(void)
53789 +{
53790 + unsigned short i, j;
53791 + struct crash_uid index;
53792 +
53793 + for (i = 1; i < uid_used; i++) {
53794 + index = uid_set[i];
53795 + j = i;
53796 + while ((j > 0) && uid_set[j - 1].uid > index.uid) {
53797 + uid_set[j] = uid_set[j - 1];
53798 + j--;
53799 + }
53800 + uid_set[j] = index;
53801 + }
53802 +
53803 + return;
53804 +}
53805 +
53806 +static __inline__ void
53807 +gr_insert_uid(const uid_t uid, const unsigned long expires)
53808 +{
53809 + int loc;
53810 +
53811 + if (uid_used == GR_UIDTABLE_MAX)
53812 + return;
53813 +
53814 + loc = gr_find_uid(uid);
53815 +
53816 + if (loc >= 0) {
53817 + uid_set[loc].expires = expires;
53818 + return;
53819 + }
53820 +
53821 + uid_set[uid_used].uid = uid;
53822 + uid_set[uid_used].expires = expires;
53823 + uid_used++;
53824 +
53825 + gr_insertsort();
53826 +
53827 + return;
53828 +}
53829 +
53830 +void
53831 +gr_remove_uid(const unsigned short loc)
53832 +{
53833 + unsigned short i;
53834 +
53835 + for (i = loc + 1; i < uid_used; i++)
53836 + uid_set[i - 1] = uid_set[i];
53837 +
53838 + uid_used--;
53839 +
53840 + return;
53841 +}
53842 +
53843 +int
53844 +gr_check_crash_uid(const uid_t uid)
53845 +{
53846 + int loc;
53847 + int ret = 0;
53848 +
53849 + if (unlikely(!gr_acl_is_enabled()))
53850 + return 0;
53851 +
53852 + spin_lock(&gr_uid_lock);
53853 + loc = gr_find_uid(uid);
53854 +
53855 + if (loc < 0)
53856 + goto out_unlock;
53857 +
53858 + if (time_before_eq(uid_set[loc].expires, get_seconds()))
53859 + gr_remove_uid(loc);
53860 + else
53861 + ret = 1;
53862 +
53863 +out_unlock:
53864 + spin_unlock(&gr_uid_lock);
53865 + return ret;
53866 +}
53867 +
53868 +static __inline__ int
53869 +proc_is_setxid(const struct cred *cred)
53870 +{
53871 + if (cred->uid != cred->euid || cred->uid != cred->suid ||
53872 + cred->uid != cred->fsuid)
53873 + return 1;
53874 + if (cred->gid != cred->egid || cred->gid != cred->sgid ||
53875 + cred->gid != cred->fsgid)
53876 + return 1;
53877 +
53878 + return 0;
53879 +}
53880 +
53881 +extern int gr_fake_force_sig(int sig, struct task_struct *t);
53882 +
53883 +void
53884 +gr_handle_crash(struct task_struct *task, const int sig)
53885 +{
53886 + struct acl_subject_label *curr;
53887 + struct task_struct *tsk, *tsk2;
53888 + const struct cred *cred;
53889 + const struct cred *cred2;
53890 +
53891 + if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
53892 + return;
53893 +
53894 + if (unlikely(!gr_acl_is_enabled()))
53895 + return;
53896 +
53897 + curr = task->acl;
53898 +
53899 + if (!(curr->resmask & (1 << GR_CRASH_RES)))
53900 + return;
53901 +
53902 + if (time_before_eq(curr->expires, get_seconds())) {
53903 + curr->expires = 0;
53904 + curr->crashes = 0;
53905 + }
53906 +
53907 + curr->crashes++;
53908 +
53909 + if (!curr->expires)
53910 + curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
53911 +
53912 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
53913 + time_after(curr->expires, get_seconds())) {
53914 + rcu_read_lock();
53915 + cred = __task_cred(task);
53916 + if (cred->uid && proc_is_setxid(cred)) {
53917 + gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
53918 + spin_lock(&gr_uid_lock);
53919 + gr_insert_uid(cred->uid, curr->expires);
53920 + spin_unlock(&gr_uid_lock);
53921 + curr->expires = 0;
53922 + curr->crashes = 0;
53923 + read_lock(&tasklist_lock);
53924 + do_each_thread(tsk2, tsk) {
53925 + cred2 = __task_cred(tsk);
53926 + if (tsk != task && cred2->uid == cred->uid)
53927 + gr_fake_force_sig(SIGKILL, tsk);
53928 + } while_each_thread(tsk2, tsk);
53929 + read_unlock(&tasklist_lock);
53930 + } else {
53931 + gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
53932 + read_lock(&tasklist_lock);
53933 + read_lock(&grsec_exec_file_lock);
53934 + do_each_thread(tsk2, tsk) {
53935 + if (likely(tsk != task)) {
53936 + // if this thread has the same subject as the one that triggered
53937 + // RES_CRASH and it's the same binary, kill it
53938 + if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
53939 + gr_fake_force_sig(SIGKILL, tsk);
53940 + }
53941 + } while_each_thread(tsk2, tsk);
53942 + read_unlock(&grsec_exec_file_lock);
53943 + read_unlock(&tasklist_lock);
53944 + }
53945 + rcu_read_unlock();
53946 + }
53947 +
53948 + return;
53949 +}
53950 +
53951 +int
53952 +gr_check_crash_exec(const struct file *filp)
53953 +{
53954 + struct acl_subject_label *curr;
53955 +
53956 + if (unlikely(!gr_acl_is_enabled()))
53957 + return 0;
53958 +
53959 + read_lock(&gr_inode_lock);
53960 + curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
53961 + __get_dev(filp->f_path.dentry),
53962 + current->role);
53963 + read_unlock(&gr_inode_lock);
53964 +
53965 + if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
53966 + (!curr->crashes && !curr->expires))
53967 + return 0;
53968 +
53969 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
53970 + time_after(curr->expires, get_seconds()))
53971 + return 1;
53972 + else if (time_before_eq(curr->expires, get_seconds())) {
53973 + curr->crashes = 0;
53974 + curr->expires = 0;
53975 + }
53976 +
53977 + return 0;
53978 +}
53979 +
53980 +void
53981 +gr_handle_alertkill(struct task_struct *task)
53982 +{
53983 + struct acl_subject_label *curracl;
53984 + __u32 curr_ip;
53985 + struct task_struct *p, *p2;
53986 +
53987 + if (unlikely(!gr_acl_is_enabled()))
53988 + return;
53989 +
53990 + curracl = task->acl;
53991 + curr_ip = task->signal->curr_ip;
53992 +
53993 + if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
53994 + read_lock(&tasklist_lock);
53995 + do_each_thread(p2, p) {
53996 + if (p->signal->curr_ip == curr_ip)
53997 + gr_fake_force_sig(SIGKILL, p);
53998 + } while_each_thread(p2, p);
53999 + read_unlock(&tasklist_lock);
54000 + } else if (curracl->mode & GR_KILLPROC)
54001 + gr_fake_force_sig(SIGKILL, task);
54002 +
54003 + return;
54004 +}
54005 diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
54006 new file mode 100644
54007 index 0000000..9d83a69
54008 --- /dev/null
54009 +++ b/grsecurity/gracl_shm.c
54010 @@ -0,0 +1,40 @@
54011 +#include <linux/kernel.h>
54012 +#include <linux/mm.h>
54013 +#include <linux/sched.h>
54014 +#include <linux/file.h>
54015 +#include <linux/ipc.h>
54016 +#include <linux/gracl.h>
54017 +#include <linux/grsecurity.h>
54018 +#include <linux/grinternal.h>
54019 +
54020 +int
54021 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
54022 + const time_t shm_createtime, const uid_t cuid, const int shmid)
54023 +{
54024 + struct task_struct *task;
54025 +
54026 + if (!gr_acl_is_enabled())
54027 + return 1;
54028 +
54029 + rcu_read_lock();
54030 + read_lock(&tasklist_lock);
54031 +
54032 + task = find_task_by_vpid(shm_cprid);
54033 +
54034 + if (unlikely(!task))
54035 + task = find_task_by_vpid(shm_lapid);
54036 +
54037 + if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
54038 + (task->pid == shm_lapid)) &&
54039 + (task->acl->mode & GR_PROTSHM) &&
54040 + (task->acl != current->acl))) {
54041 + read_unlock(&tasklist_lock);
54042 + rcu_read_unlock();
54043 + gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
54044 + return 0;
54045 + }
54046 + read_unlock(&tasklist_lock);
54047 + rcu_read_unlock();
54048 +
54049 + return 1;
54050 +}
54051 diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
54052 new file mode 100644
54053 index 0000000..bc0be01
54054 --- /dev/null
54055 +++ b/grsecurity/grsec_chdir.c
54056 @@ -0,0 +1,19 @@
54057 +#include <linux/kernel.h>
54058 +#include <linux/sched.h>
54059 +#include <linux/fs.h>
54060 +#include <linux/file.h>
54061 +#include <linux/grsecurity.h>
54062 +#include <linux/grinternal.h>
54063 +
54064 +void
54065 +gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
54066 +{
54067 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
54068 + if ((grsec_enable_chdir && grsec_enable_group &&
54069 + in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
54070 + !grsec_enable_group)) {
54071 + gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
54072 + }
54073 +#endif
54074 + return;
54075 +}
54076 diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
54077 new file mode 100644
54078 index 0000000..a2dc675
54079 --- /dev/null
54080 +++ b/grsecurity/grsec_chroot.c
54081 @@ -0,0 +1,351 @@
54082 +#include <linux/kernel.h>
54083 +#include <linux/module.h>
54084 +#include <linux/sched.h>
54085 +#include <linux/file.h>
54086 +#include <linux/fs.h>
54087 +#include <linux/mount.h>
54088 +#include <linux/types.h>
54089 +#include <linux/pid_namespace.h>
54090 +#include <linux/grsecurity.h>
54091 +#include <linux/grinternal.h>
54092 +
54093 +void gr_set_chroot_entries(struct task_struct *task, struct path *path)
54094 +{
54095 +#ifdef CONFIG_GRKERNSEC
54096 + if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
54097 + path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
54098 + task->gr_is_chrooted = 1;
54099 + else
54100 + task->gr_is_chrooted = 0;
54101 +
54102 + task->gr_chroot_dentry = path->dentry;
54103 +#endif
54104 + return;
54105 +}
54106 +
54107 +void gr_clear_chroot_entries(struct task_struct *task)
54108 +{
54109 +#ifdef CONFIG_GRKERNSEC
54110 + task->gr_is_chrooted = 0;
54111 + task->gr_chroot_dentry = NULL;
54112 +#endif
54113 + return;
54114 +}
54115 +
54116 +int
54117 +gr_handle_chroot_unix(const pid_t pid)
54118 +{
54119 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
54120 + struct task_struct *p;
54121 +
54122 + if (unlikely(!grsec_enable_chroot_unix))
54123 + return 1;
54124 +
54125 + if (likely(!proc_is_chrooted(current)))
54126 + return 1;
54127 +
54128 + rcu_read_lock();
54129 + read_lock(&tasklist_lock);
54130 + p = find_task_by_vpid_unrestricted(pid);
54131 + if (unlikely(p && !have_same_root(current, p))) {
54132 + read_unlock(&tasklist_lock);
54133 + rcu_read_unlock();
54134 + gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
54135 + return 0;
54136 + }
54137 + read_unlock(&tasklist_lock);
54138 + rcu_read_unlock();
54139 +#endif
54140 + return 1;
54141 +}
54142 +
54143 +int
54144 +gr_handle_chroot_nice(void)
54145 +{
54146 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
54147 + if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
54148 + gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
54149 + return -EPERM;
54150 + }
54151 +#endif
54152 + return 0;
54153 +}
54154 +
54155 +int
54156 +gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
54157 +{
54158 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
54159 + if (grsec_enable_chroot_nice && (niceval < task_nice(p))
54160 + && proc_is_chrooted(current)) {
54161 + gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
54162 + return -EACCES;
54163 + }
54164 +#endif
54165 + return 0;
54166 +}
54167 +
54168 +int
54169 +gr_handle_chroot_rawio(const struct inode *inode)
54170 +{
54171 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
54172 + if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
54173 + inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
54174 + return 1;
54175 +#endif
54176 + return 0;
54177 +}
54178 +
54179 +int
54180 +gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
54181 +{
54182 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
54183 + struct task_struct *p;
54184 + int ret = 0;
54185 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
54186 + return ret;
54187 +
54188 + read_lock(&tasklist_lock);
54189 + do_each_pid_task(pid, type, p) {
54190 + if (!have_same_root(current, p)) {
54191 + ret = 1;
54192 + goto out;
54193 + }
54194 + } while_each_pid_task(pid, type, p);
54195 +out:
54196 + read_unlock(&tasklist_lock);
54197 + return ret;
54198 +#endif
54199 + return 0;
54200 +}
54201 +
54202 +int
54203 +gr_pid_is_chrooted(struct task_struct *p)
54204 +{
54205 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
54206 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
54207 + return 0;
54208 +
54209 + if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
54210 + !have_same_root(current, p)) {
54211 + return 1;
54212 + }
54213 +#endif
54214 + return 0;
54215 +}
54216 +
54217 +EXPORT_SYMBOL(gr_pid_is_chrooted);
54218 +
54219 +#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
54220 +int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
54221 +{
54222 + struct path path, currentroot;
54223 + int ret = 0;
54224 +
54225 + path.dentry = (struct dentry *)u_dentry;
54226 + path.mnt = (struct vfsmount *)u_mnt;
54227 + get_fs_root(current->fs, &currentroot);
54228 + if (path_is_under(&path, &currentroot))
54229 + ret = 1;
54230 + path_put(&currentroot);
54231 +
54232 + return ret;
54233 +}
54234 +#endif
54235 +
54236 +int
54237 +gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
54238 +{
54239 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
54240 + if (!grsec_enable_chroot_fchdir)
54241 + return 1;
54242 +
54243 + if (!proc_is_chrooted(current))
54244 + return 1;
54245 + else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
54246 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
54247 + return 0;
54248 + }
54249 +#endif
54250 + return 1;
54251 +}
54252 +
54253 +int
54254 +gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
54255 + const time_t shm_createtime)
54256 +{
54257 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
54258 + struct task_struct *p;
54259 + time_t starttime;
54260 +
54261 + if (unlikely(!grsec_enable_chroot_shmat))
54262 + return 1;
54263 +
54264 + if (likely(!proc_is_chrooted(current)))
54265 + return 1;
54266 +
54267 + rcu_read_lock();
54268 + read_lock(&tasklist_lock);
54269 +
54270 + if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
54271 + starttime = p->start_time.tv_sec;
54272 + if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
54273 + if (have_same_root(current, p)) {
54274 + goto allow;
54275 + } else {
54276 + read_unlock(&tasklist_lock);
54277 + rcu_read_unlock();
54278 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
54279 + return 0;
54280 + }
54281 + }
54282 + /* creator exited, pid reuse, fall through to next check */
54283 + }
54284 + if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
54285 + if (unlikely(!have_same_root(current, p))) {
54286 + read_unlock(&tasklist_lock);
54287 + rcu_read_unlock();
54288 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
54289 + return 0;
54290 + }
54291 + }
54292 +
54293 +allow:
54294 + read_unlock(&tasklist_lock);
54295 + rcu_read_unlock();
54296 +#endif
54297 + return 1;
54298 +}
54299 +
54300 +void
54301 +gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
54302 +{
54303 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
54304 + if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
54305 + gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
54306 +#endif
54307 + return;
54308 +}
54309 +
54310 +int
54311 +gr_handle_chroot_mknod(const struct dentry *dentry,
54312 + const struct vfsmount *mnt, const int mode)
54313 +{
54314 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
54315 + if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
54316 + proc_is_chrooted(current)) {
54317 + gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
54318 + return -EPERM;
54319 + }
54320 +#endif
54321 + return 0;
54322 +}
54323 +
54324 +int
54325 +gr_handle_chroot_mount(const struct dentry *dentry,
54326 + const struct vfsmount *mnt, const char *dev_name)
54327 +{
54328 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
54329 + if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
54330 + gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
54331 + return -EPERM;
54332 + }
54333 +#endif
54334 + return 0;
54335 +}
54336 +
54337 +int
54338 +gr_handle_chroot_pivot(void)
54339 +{
54340 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
54341 + if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
54342 + gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
54343 + return -EPERM;
54344 + }
54345 +#endif
54346 + return 0;
54347 +}
54348 +
54349 +int
54350 +gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
54351 +{
54352 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
54353 + if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
54354 + !gr_is_outside_chroot(dentry, mnt)) {
54355 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
54356 + return -EPERM;
54357 + }
54358 +#endif
54359 + return 0;
54360 +}
54361 +
54362 +extern const char *captab_log[];
54363 +extern int captab_log_entries;
54364 +
54365 +int
54366 +gr_chroot_is_capable(const int cap)
54367 +{
54368 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
54369 + if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
54370 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
54371 + if (cap_raised(chroot_caps, cap)) {
54372 + const struct cred *creds = current_cred();
54373 + if (cap_raised(creds->cap_effective, cap) && cap < captab_log_entries) {
54374 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, current, captab_log[cap]);
54375 + }
54376 + return 0;
54377 + }
54378 + }
54379 +#endif
54380 + return 1;
54381 +}
54382 +
54383 +int
54384 +gr_chroot_is_capable_nolog(const int cap)
54385 +{
54386 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
54387 + if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
54388 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
54389 + if (cap_raised(chroot_caps, cap)) {
54390 + return 0;
54391 + }
54392 + }
54393 +#endif
54394 + return 1;
54395 +}
54396 +
54397 +int
54398 +gr_handle_chroot_sysctl(const int op)
54399 +{
54400 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
54401 + if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
54402 + proc_is_chrooted(current))
54403 + return -EACCES;
54404 +#endif
54405 + return 0;
54406 +}
54407 +
54408 +void
54409 +gr_handle_chroot_chdir(struct path *path)
54410 +{
54411 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
54412 + if (grsec_enable_chroot_chdir)
54413 + set_fs_pwd(current->fs, path);
54414 +#endif
54415 + return;
54416 +}
54417 +
54418 +int
54419 +gr_handle_chroot_chmod(const struct dentry *dentry,
54420 + const struct vfsmount *mnt, const int mode)
54421 +{
54422 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
54423 + /* allow chmod +s on directories, but not files */
54424 + if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
54425 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
54426 + proc_is_chrooted(current)) {
54427 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
54428 + return -EPERM;
54429 + }
54430 +#endif
54431 + return 0;
54432 +}
54433 diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
54434 new file mode 100644
54435 index 0000000..213ad8b
54436 --- /dev/null
54437 +++ b/grsecurity/grsec_disabled.c
54438 @@ -0,0 +1,437 @@
54439 +#include <linux/kernel.h>
54440 +#include <linux/module.h>
54441 +#include <linux/sched.h>
54442 +#include <linux/file.h>
54443 +#include <linux/fs.h>
54444 +#include <linux/kdev_t.h>
54445 +#include <linux/net.h>
54446 +#include <linux/in.h>
54447 +#include <linux/ip.h>
54448 +#include <linux/skbuff.h>
54449 +#include <linux/sysctl.h>
54450 +
54451 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
54452 +void
54453 +pax_set_initial_flags(struct linux_binprm *bprm)
54454 +{
54455 + return;
54456 +}
54457 +#endif
54458 +
54459 +#ifdef CONFIG_SYSCTL
54460 +__u32
54461 +gr_handle_sysctl(const struct ctl_table * table, const int op)
54462 +{
54463 + return 0;
54464 +}
54465 +#endif
54466 +
54467 +#ifdef CONFIG_TASKSTATS
54468 +int gr_is_taskstats_denied(int pid)
54469 +{
54470 + return 0;
54471 +}
54472 +#endif
54473 +
54474 +int
54475 +gr_acl_is_enabled(void)
54476 +{
54477 + return 0;
54478 +}
54479 +
54480 +void
54481 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
54482 +{
54483 + return;
54484 +}
54485 +
54486 +int
54487 +gr_handle_rawio(const struct inode *inode)
54488 +{
54489 + return 0;
54490 +}
54491 +
54492 +void
54493 +gr_acl_handle_psacct(struct task_struct *task, const long code)
54494 +{
54495 + return;
54496 +}
54497 +
54498 +int
54499 +gr_handle_ptrace(struct task_struct *task, const long request)
54500 +{
54501 + return 0;
54502 +}
54503 +
54504 +int
54505 +gr_handle_proc_ptrace(struct task_struct *task)
54506 +{
54507 + return 0;
54508 +}
54509 +
54510 +void
54511 +gr_learn_resource(const struct task_struct *task,
54512 + const int res, const unsigned long wanted, const int gt)
54513 +{
54514 + return;
54515 +}
54516 +
54517 +int
54518 +gr_set_acls(const int type)
54519 +{
54520 + return 0;
54521 +}
54522 +
54523 +int
54524 +gr_check_hidden_task(const struct task_struct *tsk)
54525 +{
54526 + return 0;
54527 +}
54528 +
54529 +int
54530 +gr_check_protected_task(const struct task_struct *task)
54531 +{
54532 + return 0;
54533 +}
54534 +
54535 +int
54536 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
54537 +{
54538 + return 0;
54539 +}
54540 +
54541 +void
54542 +gr_copy_label(struct task_struct *tsk)
54543 +{
54544 + return;
54545 +}
54546 +
54547 +void
54548 +gr_set_pax_flags(struct task_struct *task)
54549 +{
54550 + return;
54551 +}
54552 +
54553 +int
54554 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
54555 + const int unsafe_share)
54556 +{
54557 + return 0;
54558 +}
54559 +
54560 +void
54561 +gr_handle_delete(const ino_t ino, const dev_t dev)
54562 +{
54563 + return;
54564 +}
54565 +
54566 +void
54567 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
54568 +{
54569 + return;
54570 +}
54571 +
54572 +void
54573 +gr_handle_crash(struct task_struct *task, const int sig)
54574 +{
54575 + return;
54576 +}
54577 +
54578 +int
54579 +gr_check_crash_exec(const struct file *filp)
54580 +{
54581 + return 0;
54582 +}
54583 +
54584 +int
54585 +gr_check_crash_uid(const uid_t uid)
54586 +{
54587 + return 0;
54588 +}
54589 +
54590 +void
54591 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
54592 + struct dentry *old_dentry,
54593 + struct dentry *new_dentry,
54594 + struct vfsmount *mnt, const __u8 replace)
54595 +{
54596 + return;
54597 +}
54598 +
54599 +int
54600 +gr_search_socket(const int family, const int type, const int protocol)
54601 +{
54602 + return 1;
54603 +}
54604 +
54605 +int
54606 +gr_search_connectbind(const int mode, const struct socket *sock,
54607 + const struct sockaddr_in *addr)
54608 +{
54609 + return 0;
54610 +}
54611 +
54612 +void
54613 +gr_handle_alertkill(struct task_struct *task)
54614 +{
54615 + return;
54616 +}
54617 +
54618 +__u32
54619 +gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
54620 +{
54621 + return 1;
54622 +}
54623 +
54624 +__u32
54625 +gr_acl_handle_hidden_file(const struct dentry * dentry,
54626 + const struct vfsmount * mnt)
54627 +{
54628 + return 1;
54629 +}
54630 +
54631 +__u32
54632 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
54633 + int acc_mode)
54634 +{
54635 + return 1;
54636 +}
54637 +
54638 +__u32
54639 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
54640 +{
54641 + return 1;
54642 +}
54643 +
54644 +__u32
54645 +gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
54646 +{
54647 + return 1;
54648 +}
54649 +
54650 +int
54651 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
54652 + unsigned int *vm_flags)
54653 +{
54654 + return 1;
54655 +}
54656 +
54657 +__u32
54658 +gr_acl_handle_truncate(const struct dentry * dentry,
54659 + const struct vfsmount * mnt)
54660 +{
54661 + return 1;
54662 +}
54663 +
54664 +__u32
54665 +gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
54666 +{
54667 + return 1;
54668 +}
54669 +
54670 +__u32
54671 +gr_acl_handle_access(const struct dentry * dentry,
54672 + const struct vfsmount * mnt, const int fmode)
54673 +{
54674 + return 1;
54675 +}
54676 +
54677 +__u32
54678 +gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
54679 + umode_t *mode)
54680 +{
54681 + return 1;
54682 +}
54683 +
54684 +__u32
54685 +gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
54686 +{
54687 + return 1;
54688 +}
54689 +
54690 +__u32
54691 +gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
54692 +{
54693 + return 1;
54694 +}
54695 +
54696 +void
54697 +grsecurity_init(void)
54698 +{
54699 + return;
54700 +}
54701 +
54702 +umode_t gr_acl_umask(void)
54703 +{
54704 + return 0;
54705 +}
54706 +
54707 +__u32
54708 +gr_acl_handle_mknod(const struct dentry * new_dentry,
54709 + const struct dentry * parent_dentry,
54710 + const struct vfsmount * parent_mnt,
54711 + const int mode)
54712 +{
54713 + return 1;
54714 +}
54715 +
54716 +__u32
54717 +gr_acl_handle_mkdir(const struct dentry * new_dentry,
54718 + const struct dentry * parent_dentry,
54719 + const struct vfsmount * parent_mnt)
54720 +{
54721 + return 1;
54722 +}
54723 +
54724 +__u32
54725 +gr_acl_handle_symlink(const struct dentry * new_dentry,
54726 + const struct dentry * parent_dentry,
54727 + const struct vfsmount * parent_mnt, const char *from)
54728 +{
54729 + return 1;
54730 +}
54731 +
54732 +__u32
54733 +gr_acl_handle_link(const struct dentry * new_dentry,
54734 + const struct dentry * parent_dentry,
54735 + const struct vfsmount * parent_mnt,
54736 + const struct dentry * old_dentry,
54737 + const struct vfsmount * old_mnt, const char *to)
54738 +{
54739 + return 1;
54740 +}
54741 +
54742 +int
54743 +gr_acl_handle_rename(const struct dentry *new_dentry,
54744 + const struct dentry *parent_dentry,
54745 + const struct vfsmount *parent_mnt,
54746 + const struct dentry *old_dentry,
54747 + const struct inode *old_parent_inode,
54748 + const struct vfsmount *old_mnt, const char *newname)
54749 +{
54750 + return 0;
54751 +}
54752 +
54753 +int
54754 +gr_acl_handle_filldir(const struct file *file, const char *name,
54755 + const int namelen, const ino_t ino)
54756 +{
54757 + return 1;
54758 +}
54759 +
54760 +int
54761 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
54762 + const time_t shm_createtime, const uid_t cuid, const int shmid)
54763 +{
54764 + return 1;
54765 +}
54766 +
54767 +int
54768 +gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
54769 +{
54770 + return 0;
54771 +}
54772 +
54773 +int
54774 +gr_search_accept(const struct socket *sock)
54775 +{
54776 + return 0;
54777 +}
54778 +
54779 +int
54780 +gr_search_listen(const struct socket *sock)
54781 +{
54782 + return 0;
54783 +}
54784 +
54785 +int
54786 +gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
54787 +{
54788 + return 0;
54789 +}
54790 +
54791 +__u32
54792 +gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
54793 +{
54794 + return 1;
54795 +}
54796 +
54797 +__u32
54798 +gr_acl_handle_creat(const struct dentry * dentry,
54799 + const struct dentry * p_dentry,
54800 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
54801 + const int imode)
54802 +{
54803 + return 1;
54804 +}
54805 +
54806 +void
54807 +gr_acl_handle_exit(void)
54808 +{
54809 + return;
54810 +}
54811 +
54812 +int
54813 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
54814 +{
54815 + return 1;
54816 +}
54817 +
54818 +void
54819 +gr_set_role_label(const uid_t uid, const gid_t gid)
54820 +{
54821 + return;
54822 +}
54823 +
54824 +int
54825 +gr_acl_handle_procpidmem(const struct task_struct *task)
54826 +{
54827 + return 0;
54828 +}
54829 +
54830 +int
54831 +gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
54832 +{
54833 + return 0;
54834 +}
54835 +
54836 +int
54837 +gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
54838 +{
54839 + return 0;
54840 +}
54841 +
54842 +void
54843 +gr_set_kernel_label(struct task_struct *task)
54844 +{
54845 + return;
54846 +}
54847 +
54848 +int
54849 +gr_check_user_change(int real, int effective, int fs)
54850 +{
54851 + return 0;
54852 +}
54853 +
54854 +int
54855 +gr_check_group_change(int real, int effective, int fs)
54856 +{
54857 + return 0;
54858 +}
54859 +
54860 +int gr_acl_enable_at_secure(void)
54861 +{
54862 + return 0;
54863 +}
54864 +
54865 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
54866 +{
54867 + return dentry->d_inode->i_sb->s_dev;
54868 +}
54869 +
54870 +EXPORT_SYMBOL(gr_learn_resource);
54871 +EXPORT_SYMBOL(gr_set_kernel_label);
54872 +#ifdef CONFIG_SECURITY
54873 +EXPORT_SYMBOL(gr_check_user_change);
54874 +EXPORT_SYMBOL(gr_check_group_change);
54875 +#endif
54876 diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
54877 new file mode 100644
54878 index 0000000..2b05ada
54879 --- /dev/null
54880 +++ b/grsecurity/grsec_exec.c
54881 @@ -0,0 +1,146 @@
54882 +#include <linux/kernel.h>
54883 +#include <linux/sched.h>
54884 +#include <linux/file.h>
54885 +#include <linux/binfmts.h>
54886 +#include <linux/fs.h>
54887 +#include <linux/types.h>
54888 +#include <linux/grdefs.h>
54889 +#include <linux/grsecurity.h>
54890 +#include <linux/grinternal.h>
54891 +#include <linux/capability.h>
54892 +#include <linux/module.h>
54893 +
54894 +#include <asm/uaccess.h>
54895 +
54896 +#ifdef CONFIG_GRKERNSEC_EXECLOG
54897 +static char gr_exec_arg_buf[132];
54898 +static DEFINE_MUTEX(gr_exec_arg_mutex);
54899 +#endif
54900 +
54901 +extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
54902 +
54903 +void
54904 +gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
54905 +{
54906 +#ifdef CONFIG_GRKERNSEC_EXECLOG
54907 + char *grarg = gr_exec_arg_buf;
54908 + unsigned int i, x, execlen = 0;
54909 + char c;
54910 +
54911 + if (!((grsec_enable_execlog && grsec_enable_group &&
54912 + in_group_p(grsec_audit_gid))
54913 + || (grsec_enable_execlog && !grsec_enable_group)))
54914 + return;
54915 +
54916 + mutex_lock(&gr_exec_arg_mutex);
54917 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
54918 +
54919 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
54920 + const char __user *p;
54921 + unsigned int len;
54922 +
54923 + p = get_user_arg_ptr(argv, i);
54924 + if (IS_ERR(p))
54925 + goto log;
54926 +
54927 + len = strnlen_user(p, 128 - execlen);
54928 + if (len > 128 - execlen)
54929 + len = 128 - execlen;
54930 + else if (len > 0)
54931 + len--;
54932 + if (copy_from_user(grarg + execlen, p, len))
54933 + goto log;
54934 +
54935 + /* rewrite unprintable characters */
54936 + for (x = 0; x < len; x++) {
54937 + c = *(grarg + execlen + x);
54938 + if (c < 32 || c > 126)
54939 + *(grarg + execlen + x) = ' ';
54940 + }
54941 +
54942 + execlen += len;
54943 + *(grarg + execlen) = ' ';
54944 + *(grarg + execlen + 1) = '\0';
54945 + execlen++;
54946 + }
54947 +
54948 + log:
54949 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
54950 + bprm->file->f_path.mnt, grarg);
54951 + mutex_unlock(&gr_exec_arg_mutex);
54952 +#endif
54953 + return;
54954 +}
54955 +
54956 +#ifdef CONFIG_GRKERNSEC
54957 +extern int gr_acl_is_capable(const int cap);
54958 +extern int gr_acl_is_capable_nolog(const int cap);
54959 +extern int gr_chroot_is_capable(const int cap);
54960 +extern int gr_chroot_is_capable_nolog(const int cap);
54961 +#endif
54962 +
54963 +const char *captab_log[] = {
54964 + "CAP_CHOWN",
54965 + "CAP_DAC_OVERRIDE",
54966 + "CAP_DAC_READ_SEARCH",
54967 + "CAP_FOWNER",
54968 + "CAP_FSETID",
54969 + "CAP_KILL",
54970 + "CAP_SETGID",
54971 + "CAP_SETUID",
54972 + "CAP_SETPCAP",
54973 + "CAP_LINUX_IMMUTABLE",
54974 + "CAP_NET_BIND_SERVICE",
54975 + "CAP_NET_BROADCAST",
54976 + "CAP_NET_ADMIN",
54977 + "CAP_NET_RAW",
54978 + "CAP_IPC_LOCK",
54979 + "CAP_IPC_OWNER",
54980 + "CAP_SYS_MODULE",
54981 + "CAP_SYS_RAWIO",
54982 + "CAP_SYS_CHROOT",
54983 + "CAP_SYS_PTRACE",
54984 + "CAP_SYS_PACCT",
54985 + "CAP_SYS_ADMIN",
54986 + "CAP_SYS_BOOT",
54987 + "CAP_SYS_NICE",
54988 + "CAP_SYS_RESOURCE",
54989 + "CAP_SYS_TIME",
54990 + "CAP_SYS_TTY_CONFIG",
54991 + "CAP_MKNOD",
54992 + "CAP_LEASE",
54993 + "CAP_AUDIT_WRITE",
54994 + "CAP_AUDIT_CONTROL",
54995 + "CAP_SETFCAP",
54996 + "CAP_MAC_OVERRIDE",
54997 + "CAP_MAC_ADMIN",
54998 + "CAP_SYSLOG",
54999 + "CAP_WAKE_ALARM"
55000 +};
55001 +
55002 +int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
55003 +
55004 +int gr_is_capable(const int cap)
55005 +{
55006 +#ifdef CONFIG_GRKERNSEC
55007 + if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
55008 + return 1;
55009 + return 0;
55010 +#else
55011 + return 1;
55012 +#endif
55013 +}
55014 +
55015 +int gr_is_capable_nolog(const int cap)
55016 +{
55017 +#ifdef CONFIG_GRKERNSEC
55018 + if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
55019 + return 1;
55020 + return 0;
55021 +#else
55022 + return 1;
55023 +#endif
55024 +}
55025 +
55026 +EXPORT_SYMBOL(gr_is_capable);
55027 +EXPORT_SYMBOL(gr_is_capable_nolog);
55028 diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
55029 new file mode 100644
55030 index 0000000..d3ee748
55031 --- /dev/null
55032 +++ b/grsecurity/grsec_fifo.c
55033 @@ -0,0 +1,24 @@
55034 +#include <linux/kernel.h>
55035 +#include <linux/sched.h>
55036 +#include <linux/fs.h>
55037 +#include <linux/file.h>
55038 +#include <linux/grinternal.h>
55039 +
55040 +int
55041 +gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
55042 + const struct dentry *dir, const int flag, const int acc_mode)
55043 +{
55044 +#ifdef CONFIG_GRKERNSEC_FIFO
55045 + const struct cred *cred = current_cred();
55046 +
55047 + if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
55048 + !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
55049 + (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
55050 + (cred->fsuid != dentry->d_inode->i_uid)) {
55051 + if (!inode_permission(dentry->d_inode, acc_mode))
55052 + gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
55053 + return -EACCES;
55054 + }
55055 +#endif
55056 + return 0;
55057 +}
55058 diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
55059 new file mode 100644
55060 index 0000000..8ca18bf
55061 --- /dev/null
55062 +++ b/grsecurity/grsec_fork.c
55063 @@ -0,0 +1,23 @@
55064 +#include <linux/kernel.h>
55065 +#include <linux/sched.h>
55066 +#include <linux/grsecurity.h>
55067 +#include <linux/grinternal.h>
55068 +#include <linux/errno.h>
55069 +
55070 +void
55071 +gr_log_forkfail(const int retval)
55072 +{
55073 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
55074 + if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
55075 + switch (retval) {
55076 + case -EAGAIN:
55077 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
55078 + break;
55079 + case -ENOMEM:
55080 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
55081 + break;
55082 + }
55083 + }
55084 +#endif
55085 + return;
55086 +}
55087 diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
55088 new file mode 100644
55089 index 0000000..01ddde4
55090 --- /dev/null
55091 +++ b/grsecurity/grsec_init.c
55092 @@ -0,0 +1,277 @@
55093 +#include <linux/kernel.h>
55094 +#include <linux/sched.h>
55095 +#include <linux/mm.h>
55096 +#include <linux/gracl.h>
55097 +#include <linux/slab.h>
55098 +#include <linux/vmalloc.h>
55099 +#include <linux/percpu.h>
55100 +#include <linux/module.h>
55101 +
55102 +int grsec_enable_ptrace_readexec;
55103 +int grsec_enable_setxid;
55104 +int grsec_enable_brute;
55105 +int grsec_enable_link;
55106 +int grsec_enable_dmesg;
55107 +int grsec_enable_harden_ptrace;
55108 +int grsec_enable_fifo;
55109 +int grsec_enable_execlog;
55110 +int grsec_enable_signal;
55111 +int grsec_enable_forkfail;
55112 +int grsec_enable_audit_ptrace;
55113 +int grsec_enable_time;
55114 +int grsec_enable_audit_textrel;
55115 +int grsec_enable_group;
55116 +int grsec_audit_gid;
55117 +int grsec_enable_chdir;
55118 +int grsec_enable_mount;
55119 +int grsec_enable_rofs;
55120 +int grsec_enable_chroot_findtask;
55121 +int grsec_enable_chroot_mount;
55122 +int grsec_enable_chroot_shmat;
55123 +int grsec_enable_chroot_fchdir;
55124 +int grsec_enable_chroot_double;
55125 +int grsec_enable_chroot_pivot;
55126 +int grsec_enable_chroot_chdir;
55127 +int grsec_enable_chroot_chmod;
55128 +int grsec_enable_chroot_mknod;
55129 +int grsec_enable_chroot_nice;
55130 +int grsec_enable_chroot_execlog;
55131 +int grsec_enable_chroot_caps;
55132 +int grsec_enable_chroot_sysctl;
55133 +int grsec_enable_chroot_unix;
55134 +int grsec_enable_tpe;
55135 +int grsec_tpe_gid;
55136 +int grsec_enable_blackhole;
55137 +#ifdef CONFIG_IPV6_MODULE
55138 +EXPORT_SYMBOL(grsec_enable_blackhole);
55139 +#endif
55140 +int grsec_lastack_retries;
55141 +int grsec_enable_tpe_all;
55142 +int grsec_enable_tpe_invert;
55143 +int grsec_enable_socket_all;
55144 +int grsec_socket_all_gid;
55145 +int grsec_enable_socket_client;
55146 +int grsec_socket_client_gid;
55147 +int grsec_enable_socket_server;
55148 +int grsec_socket_server_gid;
55149 +int grsec_resource_logging;
55150 +int grsec_disable_privio;
55151 +int grsec_enable_log_rwxmaps;
55152 +int grsec_lock;
55153 +
55154 +DEFINE_SPINLOCK(grsec_alert_lock);
55155 +unsigned long grsec_alert_wtime = 0;
55156 +unsigned long grsec_alert_fyet = 0;
55157 +
55158 +DEFINE_SPINLOCK(grsec_audit_lock);
55159 +
55160 +DEFINE_RWLOCK(grsec_exec_file_lock);
55161 +
55162 +char *gr_shared_page[4];
55163 +
55164 +char *gr_alert_log_fmt;
55165 +char *gr_audit_log_fmt;
55166 +char *gr_alert_log_buf;
55167 +char *gr_audit_log_buf;
55168 +
55169 +extern struct gr_arg *gr_usermode;
55170 +extern unsigned char *gr_system_salt;
55171 +extern unsigned char *gr_system_sum;
55172 +
55173 +void __init
55174 +grsecurity_init(void)
55175 +{
55176 + int j;
55177 + /* create the per-cpu shared pages */
55178 +
55179 +#ifdef CONFIG_X86
55180 + memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
55181 +#endif
55182 +
55183 + for (j = 0; j < 4; j++) {
55184 + gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
55185 + if (gr_shared_page[j] == NULL) {
55186 + panic("Unable to allocate grsecurity shared page");
55187 + return;
55188 + }
55189 + }
55190 +
55191 + /* allocate log buffers */
55192 + gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
55193 + if (!gr_alert_log_fmt) {
55194 + panic("Unable to allocate grsecurity alert log format buffer");
55195 + return;
55196 + }
55197 + gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
55198 + if (!gr_audit_log_fmt) {
55199 + panic("Unable to allocate grsecurity audit log format buffer");
55200 + return;
55201 + }
55202 + gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
55203 + if (!gr_alert_log_buf) {
55204 + panic("Unable to allocate grsecurity alert log buffer");
55205 + return;
55206 + }
55207 + gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
55208 + if (!gr_audit_log_buf) {
55209 + panic("Unable to allocate grsecurity audit log buffer");
55210 + return;
55211 + }
55212 +
55213 + /* allocate memory for authentication structure */
55214 + gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
55215 + gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
55216 + gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
55217 +
55218 + if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
55219 + panic("Unable to allocate grsecurity authentication structure");
55220 + return;
55221 + }
55222 +
55223 +
55224 +#ifdef CONFIG_GRKERNSEC_IO
55225 +#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
55226 + grsec_disable_privio = 1;
55227 +#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
55228 + grsec_disable_privio = 1;
55229 +#else
55230 + grsec_disable_privio = 0;
55231 +#endif
55232 +#endif
55233 +
55234 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
55235 + /* for backward compatibility, tpe_invert always defaults to on if
55236 + enabled in the kernel
55237 + */
55238 + grsec_enable_tpe_invert = 1;
55239 +#endif
55240 +
55241 +#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
55242 +#ifndef CONFIG_GRKERNSEC_SYSCTL
55243 + grsec_lock = 1;
55244 +#endif
55245 +
55246 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
55247 + grsec_enable_audit_textrel = 1;
55248 +#endif
55249 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
55250 + grsec_enable_log_rwxmaps = 1;
55251 +#endif
55252 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
55253 + grsec_enable_group = 1;
55254 + grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
55255 +#endif
55256 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
55257 + grsec_enable_ptrace_readexec = 1;
55258 +#endif
55259 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
55260 + grsec_enable_chdir = 1;
55261 +#endif
55262 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
55263 + grsec_enable_harden_ptrace = 1;
55264 +#endif
55265 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
55266 + grsec_enable_mount = 1;
55267 +#endif
55268 +#ifdef CONFIG_GRKERNSEC_LINK
55269 + grsec_enable_link = 1;
55270 +#endif
55271 +#ifdef CONFIG_GRKERNSEC_BRUTE
55272 + grsec_enable_brute = 1;
55273 +#endif
55274 +#ifdef CONFIG_GRKERNSEC_DMESG
55275 + grsec_enable_dmesg = 1;
55276 +#endif
55277 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
55278 + grsec_enable_blackhole = 1;
55279 + grsec_lastack_retries = 4;
55280 +#endif
55281 +#ifdef CONFIG_GRKERNSEC_FIFO
55282 + grsec_enable_fifo = 1;
55283 +#endif
55284 +#ifdef CONFIG_GRKERNSEC_EXECLOG
55285 + grsec_enable_execlog = 1;
55286 +#endif
55287 +#ifdef CONFIG_GRKERNSEC_SETXID
55288 + grsec_enable_setxid = 1;
55289 +#endif
55290 +#ifdef CONFIG_GRKERNSEC_SIGNAL
55291 + grsec_enable_signal = 1;
55292 +#endif
55293 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
55294 + grsec_enable_forkfail = 1;
55295 +#endif
55296 +#ifdef CONFIG_GRKERNSEC_TIME
55297 + grsec_enable_time = 1;
55298 +#endif
55299 +#ifdef CONFIG_GRKERNSEC_RESLOG
55300 + grsec_resource_logging = 1;
55301 +#endif
55302 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
55303 + grsec_enable_chroot_findtask = 1;
55304 +#endif
55305 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
55306 + grsec_enable_chroot_unix = 1;
55307 +#endif
55308 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
55309 + grsec_enable_chroot_mount = 1;
55310 +#endif
55311 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
55312 + grsec_enable_chroot_fchdir = 1;
55313 +#endif
55314 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
55315 + grsec_enable_chroot_shmat = 1;
55316 +#endif
55317 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
55318 + grsec_enable_audit_ptrace = 1;
55319 +#endif
55320 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
55321 + grsec_enable_chroot_double = 1;
55322 +#endif
55323 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
55324 + grsec_enable_chroot_pivot = 1;
55325 +#endif
55326 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
55327 + grsec_enable_chroot_chdir = 1;
55328 +#endif
55329 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
55330 + grsec_enable_chroot_chmod = 1;
55331 +#endif
55332 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
55333 + grsec_enable_chroot_mknod = 1;
55334 +#endif
55335 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
55336 + grsec_enable_chroot_nice = 1;
55337 +#endif
55338 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
55339 + grsec_enable_chroot_execlog = 1;
55340 +#endif
55341 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
55342 + grsec_enable_chroot_caps = 1;
55343 +#endif
55344 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
55345 + grsec_enable_chroot_sysctl = 1;
55346 +#endif
55347 +#ifdef CONFIG_GRKERNSEC_TPE
55348 + grsec_enable_tpe = 1;
55349 + grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
55350 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
55351 + grsec_enable_tpe_all = 1;
55352 +#endif
55353 +#endif
55354 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
55355 + grsec_enable_socket_all = 1;
55356 + grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
55357 +#endif
55358 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
55359 + grsec_enable_socket_client = 1;
55360 + grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
55361 +#endif
55362 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
55363 + grsec_enable_socket_server = 1;
55364 + grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
55365 +#endif
55366 +#endif
55367 +
55368 + return;
55369 +}
55370 diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
55371 new file mode 100644
55372 index 0000000..3efe141
55373 --- /dev/null
55374 +++ b/grsecurity/grsec_link.c
55375 @@ -0,0 +1,43 @@
55376 +#include <linux/kernel.h>
55377 +#include <linux/sched.h>
55378 +#include <linux/fs.h>
55379 +#include <linux/file.h>
55380 +#include <linux/grinternal.h>
55381 +
55382 +int
55383 +gr_handle_follow_link(const struct inode *parent,
55384 + const struct inode *inode,
55385 + const struct dentry *dentry, const struct vfsmount *mnt)
55386 +{
55387 +#ifdef CONFIG_GRKERNSEC_LINK
55388 + const struct cred *cred = current_cred();
55389 +
55390 + if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
55391 + (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
55392 + (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
55393 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
55394 + return -EACCES;
55395 + }
55396 +#endif
55397 + return 0;
55398 +}
55399 +
55400 +int
55401 +gr_handle_hardlink(const struct dentry *dentry,
55402 + const struct vfsmount *mnt,
55403 + struct inode *inode, const int mode, const char *to)
55404 +{
55405 +#ifdef CONFIG_GRKERNSEC_LINK
55406 + const struct cred *cred = current_cred();
55407 +
55408 + if (grsec_enable_link && cred->fsuid != inode->i_uid &&
55409 + (!S_ISREG(mode) || (mode & S_ISUID) ||
55410 + ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
55411 + (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
55412 + !capable(CAP_FOWNER) && cred->uid) {
55413 + gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
55414 + return -EPERM;
55415 + }
55416 +#endif
55417 + return 0;
55418 +}
55419 diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
55420 new file mode 100644
55421 index 0000000..a45d2e9
55422 --- /dev/null
55423 +++ b/grsecurity/grsec_log.c
55424 @@ -0,0 +1,322 @@
55425 +#include <linux/kernel.h>
55426 +#include <linux/sched.h>
55427 +#include <linux/file.h>
55428 +#include <linux/tty.h>
55429 +#include <linux/fs.h>
55430 +#include <linux/grinternal.h>
55431 +
55432 +#ifdef CONFIG_TREE_PREEMPT_RCU
55433 +#define DISABLE_PREEMPT() preempt_disable()
55434 +#define ENABLE_PREEMPT() preempt_enable()
55435 +#else
55436 +#define DISABLE_PREEMPT()
55437 +#define ENABLE_PREEMPT()
55438 +#endif
55439 +
55440 +#define BEGIN_LOCKS(x) \
55441 + DISABLE_PREEMPT(); \
55442 + rcu_read_lock(); \
55443 + read_lock(&tasklist_lock); \
55444 + read_lock(&grsec_exec_file_lock); \
55445 + if (x != GR_DO_AUDIT) \
55446 + spin_lock(&grsec_alert_lock); \
55447 + else \
55448 + spin_lock(&grsec_audit_lock)
55449 +
55450 +#define END_LOCKS(x) \
55451 + if (x != GR_DO_AUDIT) \
55452 + spin_unlock(&grsec_alert_lock); \
55453 + else \
55454 + spin_unlock(&grsec_audit_lock); \
55455 + read_unlock(&grsec_exec_file_lock); \
55456 + read_unlock(&tasklist_lock); \
55457 + rcu_read_unlock(); \
55458 + ENABLE_PREEMPT(); \
55459 + if (x == GR_DONT_AUDIT) \
55460 + gr_handle_alertkill(current)
55461 +
55462 +enum {
55463 + FLOODING,
55464 + NO_FLOODING
55465 +};
55466 +
55467 +extern char *gr_alert_log_fmt;
55468 +extern char *gr_audit_log_fmt;
55469 +extern char *gr_alert_log_buf;
55470 +extern char *gr_audit_log_buf;
55471 +
55472 +static int gr_log_start(int audit)
55473 +{
55474 + char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
55475 + char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
55476 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
55477 +#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
55478 + unsigned long curr_secs = get_seconds();
55479 +
55480 + if (audit == GR_DO_AUDIT)
55481 + goto set_fmt;
55482 +
55483 + if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
55484 + grsec_alert_wtime = curr_secs;
55485 + grsec_alert_fyet = 0;
55486 + } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
55487 + && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
55488 + grsec_alert_fyet++;
55489 + } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
55490 + grsec_alert_wtime = curr_secs;
55491 + grsec_alert_fyet++;
55492 + printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
55493 + return FLOODING;
55494 + }
55495 + else return FLOODING;
55496 +
55497 +set_fmt:
55498 +#endif
55499 + memset(buf, 0, PAGE_SIZE);
55500 + if (current->signal->curr_ip && gr_acl_is_enabled()) {
55501 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
55502 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
55503 + } else if (current->signal->curr_ip) {
55504 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
55505 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
55506 + } else if (gr_acl_is_enabled()) {
55507 + sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
55508 + snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
55509 + } else {
55510 + sprintf(fmt, "%s%s", loglevel, "grsec: ");
55511 + strcpy(buf, fmt);
55512 + }
55513 +
55514 + return NO_FLOODING;
55515 +}
55516 +
55517 +static void gr_log_middle(int audit, const char *msg, va_list ap)
55518 + __attribute__ ((format (printf, 2, 0)));
55519 +
55520 +static void gr_log_middle(int audit, const char *msg, va_list ap)
55521 +{
55522 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
55523 + unsigned int len = strlen(buf);
55524 +
55525 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
55526 +
55527 + return;
55528 +}
55529 +
55530 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
55531 + __attribute__ ((format (printf, 2, 3)));
55532 +
55533 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
55534 +{
55535 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
55536 + unsigned int len = strlen(buf);
55537 + va_list ap;
55538 +
55539 + va_start(ap, msg);
55540 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
55541 + va_end(ap);
55542 +
55543 + return;
55544 +}
55545 +
55546 +static void gr_log_end(int audit, int append_default)
55547 +{
55548 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
55549 +
55550 + if (append_default) {
55551 + unsigned int len = strlen(buf);
55552 + snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
55553 + }
55554 +
55555 + printk("%s\n", buf);
55556 +
55557 + return;
55558 +}
55559 +
55560 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
55561 +{
55562 + int logtype;
55563 + char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
55564 + char *str1 = NULL, *str2 = NULL, *str3 = NULL;
55565 + void *voidptr = NULL;
55566 + int num1 = 0, num2 = 0;
55567 + unsigned long ulong1 = 0, ulong2 = 0;
55568 + struct dentry *dentry = NULL;
55569 + struct vfsmount *mnt = NULL;
55570 + struct file *file = NULL;
55571 + struct task_struct *task = NULL;
55572 + const struct cred *cred, *pcred;
55573 + va_list ap;
55574 +
55575 + BEGIN_LOCKS(audit);
55576 + logtype = gr_log_start(audit);
55577 + if (logtype == FLOODING) {
55578 + END_LOCKS(audit);
55579 + return;
55580 + }
55581 + va_start(ap, argtypes);
55582 + switch (argtypes) {
55583 + case GR_TTYSNIFF:
55584 + task = va_arg(ap, struct task_struct *);
55585 + gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
55586 + break;
55587 + case GR_SYSCTL_HIDDEN:
55588 + str1 = va_arg(ap, char *);
55589 + gr_log_middle_varargs(audit, msg, result, str1);
55590 + break;
55591 + case GR_RBAC:
55592 + dentry = va_arg(ap, struct dentry *);
55593 + mnt = va_arg(ap, struct vfsmount *);
55594 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
55595 + break;
55596 + case GR_RBAC_STR:
55597 + dentry = va_arg(ap, struct dentry *);
55598 + mnt = va_arg(ap, struct vfsmount *);
55599 + str1 = va_arg(ap, char *);
55600 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
55601 + break;
55602 + case GR_STR_RBAC:
55603 + str1 = va_arg(ap, char *);
55604 + dentry = va_arg(ap, struct dentry *);
55605 + mnt = va_arg(ap, struct vfsmount *);
55606 + gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
55607 + break;
55608 + case GR_RBAC_MODE2:
55609 + dentry = va_arg(ap, struct dentry *);
55610 + mnt = va_arg(ap, struct vfsmount *);
55611 + str1 = va_arg(ap, char *);
55612 + str2 = va_arg(ap, char *);
55613 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
55614 + break;
55615 + case GR_RBAC_MODE3:
55616 + dentry = va_arg(ap, struct dentry *);
55617 + mnt = va_arg(ap, struct vfsmount *);
55618 + str1 = va_arg(ap, char *);
55619 + str2 = va_arg(ap, char *);
55620 + str3 = va_arg(ap, char *);
55621 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
55622 + break;
55623 + case GR_FILENAME:
55624 + dentry = va_arg(ap, struct dentry *);
55625 + mnt = va_arg(ap, struct vfsmount *);
55626 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
55627 + break;
55628 + case GR_STR_FILENAME:
55629 + str1 = va_arg(ap, char *);
55630 + dentry = va_arg(ap, struct dentry *);
55631 + mnt = va_arg(ap, struct vfsmount *);
55632 + gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
55633 + break;
55634 + case GR_FILENAME_STR:
55635 + dentry = va_arg(ap, struct dentry *);
55636 + mnt = va_arg(ap, struct vfsmount *);
55637 + str1 = va_arg(ap, char *);
55638 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
55639 + break;
55640 + case GR_FILENAME_TWO_INT:
55641 + dentry = va_arg(ap, struct dentry *);
55642 + mnt = va_arg(ap, struct vfsmount *);
55643 + num1 = va_arg(ap, int);
55644 + num2 = va_arg(ap, int);
55645 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
55646 + break;
55647 + case GR_FILENAME_TWO_INT_STR:
55648 + dentry = va_arg(ap, struct dentry *);
55649 + mnt = va_arg(ap, struct vfsmount *);
55650 + num1 = va_arg(ap, int);
55651 + num2 = va_arg(ap, int);
55652 + str1 = va_arg(ap, char *);
55653 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
55654 + break;
55655 + case GR_TEXTREL:
55656 + file = va_arg(ap, struct file *);
55657 + ulong1 = va_arg(ap, unsigned long);
55658 + ulong2 = va_arg(ap, unsigned long);
55659 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
55660 + break;
55661 + case GR_PTRACE:
55662 + task = va_arg(ap, struct task_struct *);
55663 + gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
55664 + break;
55665 + case GR_RESOURCE:
55666 + task = va_arg(ap, struct task_struct *);
55667 + cred = __task_cred(task);
55668 + pcred = __task_cred(task->real_parent);
55669 + ulong1 = va_arg(ap, unsigned long);
55670 + str1 = va_arg(ap, char *);
55671 + ulong2 = va_arg(ap, unsigned long);
55672 + gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
55673 + break;
55674 + case GR_CAP:
55675 + task = va_arg(ap, struct task_struct *);
55676 + cred = __task_cred(task);
55677 + pcred = __task_cred(task->real_parent);
55678 + str1 = va_arg(ap, char *);
55679 + gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
55680 + break;
55681 + case GR_SIG:
55682 + str1 = va_arg(ap, char *);
55683 + voidptr = va_arg(ap, void *);
55684 + gr_log_middle_varargs(audit, msg, str1, voidptr);
55685 + break;
55686 + case GR_SIG2:
55687 + task = va_arg(ap, struct task_struct *);
55688 + cred = __task_cred(task);
55689 + pcred = __task_cred(task->real_parent);
55690 + num1 = va_arg(ap, int);
55691 + gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
55692 + break;
55693 + case GR_CRASH1:
55694 + task = va_arg(ap, struct task_struct *);
55695 + cred = __task_cred(task);
55696 + pcred = __task_cred(task->real_parent);
55697 + ulong1 = va_arg(ap, unsigned long);
55698 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
55699 + break;
55700 + case GR_CRASH2:
55701 + task = va_arg(ap, struct task_struct *);
55702 + cred = __task_cred(task);
55703 + pcred = __task_cred(task->real_parent);
55704 + ulong1 = va_arg(ap, unsigned long);
55705 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
55706 + break;
55707 + case GR_RWXMAP:
55708 + file = va_arg(ap, struct file *);
55709 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
55710 + break;
55711 + case GR_PSACCT:
55712 + {
55713 + unsigned int wday, cday;
55714 + __u8 whr, chr;
55715 + __u8 wmin, cmin;
55716 + __u8 wsec, csec;
55717 + char cur_tty[64] = { 0 };
55718 + char parent_tty[64] = { 0 };
55719 +
55720 + task = va_arg(ap, struct task_struct *);
55721 + wday = va_arg(ap, unsigned int);
55722 + cday = va_arg(ap, unsigned int);
55723 + whr = va_arg(ap, int);
55724 + chr = va_arg(ap, int);
55725 + wmin = va_arg(ap, int);
55726 + cmin = va_arg(ap, int);
55727 + wsec = va_arg(ap, int);
55728 + csec = va_arg(ap, int);
55729 + ulong1 = va_arg(ap, unsigned long);
55730 + cred = __task_cred(task);
55731 + pcred = __task_cred(task->real_parent);
55732 +
55733 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
55734 + }
55735 + break;
55736 + default:
55737 + gr_log_middle(audit, msg, ap);
55738 + }
55739 + va_end(ap);
55740 + // these don't need DEFAULTSECARGS printed on the end
55741 + if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
55742 + gr_log_end(audit, 0);
55743 + else
55744 + gr_log_end(audit, 1);
55745 + END_LOCKS(audit);
55746 +}
55747 diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
55748 new file mode 100644
55749 index 0000000..f536303
55750 --- /dev/null
55751 +++ b/grsecurity/grsec_mem.c
55752 @@ -0,0 +1,40 @@
55753 +#include <linux/kernel.h>
55754 +#include <linux/sched.h>
55755 +#include <linux/mm.h>
55756 +#include <linux/mman.h>
55757 +#include <linux/grinternal.h>
55758 +
55759 +void
55760 +gr_handle_ioperm(void)
55761 +{
55762 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
55763 + return;
55764 +}
55765 +
55766 +void
55767 +gr_handle_iopl(void)
55768 +{
55769 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
55770 + return;
55771 +}
55772 +
55773 +void
55774 +gr_handle_mem_readwrite(u64 from, u64 to)
55775 +{
55776 + gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
55777 + return;
55778 +}
55779 +
55780 +void
55781 +gr_handle_vm86(void)
55782 +{
55783 + gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
55784 + return;
55785 +}
55786 +
55787 +void
55788 +gr_log_badprocpid(const char *entry)
55789 +{
55790 + gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
55791 + return;
55792 +}
55793 diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
55794 new file mode 100644
55795 index 0000000..2131422
55796 --- /dev/null
55797 +++ b/grsecurity/grsec_mount.c
55798 @@ -0,0 +1,62 @@
55799 +#include <linux/kernel.h>
55800 +#include <linux/sched.h>
55801 +#include <linux/mount.h>
55802 +#include <linux/grsecurity.h>
55803 +#include <linux/grinternal.h>
55804 +
55805 +void
55806 +gr_log_remount(const char *devname, const int retval)
55807 +{
55808 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
55809 + if (grsec_enable_mount && (retval >= 0))
55810 + gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
55811 +#endif
55812 + return;
55813 +}
55814 +
55815 +void
55816 +gr_log_unmount(const char *devname, const int retval)
55817 +{
55818 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
55819 + if (grsec_enable_mount && (retval >= 0))
55820 + gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
55821 +#endif
55822 + return;
55823 +}
55824 +
55825 +void
55826 +gr_log_mount(const char *from, const char *to, const int retval)
55827 +{
55828 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
55829 + if (grsec_enable_mount && (retval >= 0))
55830 + gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
55831 +#endif
55832 + return;
55833 +}
55834 +
55835 +int
55836 +gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
55837 +{
55838 +#ifdef CONFIG_GRKERNSEC_ROFS
55839 + if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
55840 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
55841 + return -EPERM;
55842 + } else
55843 + return 0;
55844 +#endif
55845 + return 0;
55846 +}
55847 +
55848 +int
55849 +gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
55850 +{
55851 +#ifdef CONFIG_GRKERNSEC_ROFS
55852 + if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
55853 + dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
55854 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
55855 + return -EPERM;
55856 + } else
55857 + return 0;
55858 +#endif
55859 + return 0;
55860 +}
55861 diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
55862 new file mode 100644
55863 index 0000000..a3b12a0
55864 --- /dev/null
55865 +++ b/grsecurity/grsec_pax.c
55866 @@ -0,0 +1,36 @@
55867 +#include <linux/kernel.h>
55868 +#include <linux/sched.h>
55869 +#include <linux/mm.h>
55870 +#include <linux/file.h>
55871 +#include <linux/grinternal.h>
55872 +#include <linux/grsecurity.h>
55873 +
55874 +void
55875 +gr_log_textrel(struct vm_area_struct * vma)
55876 +{
55877 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
55878 + if (grsec_enable_audit_textrel)
55879 + gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
55880 +#endif
55881 + return;
55882 +}
55883 +
55884 +void
55885 +gr_log_rwxmmap(struct file *file)
55886 +{
55887 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
55888 + if (grsec_enable_log_rwxmaps)
55889 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
55890 +#endif
55891 + return;
55892 +}
55893 +
55894 +void
55895 +gr_log_rwxmprotect(struct file *file)
55896 +{
55897 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
55898 + if (grsec_enable_log_rwxmaps)
55899 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
55900 +#endif
55901 + return;
55902 +}
55903 diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
55904 new file mode 100644
55905 index 0000000..f7f29aa
55906 --- /dev/null
55907 +++ b/grsecurity/grsec_ptrace.c
55908 @@ -0,0 +1,30 @@
55909 +#include <linux/kernel.h>
55910 +#include <linux/sched.h>
55911 +#include <linux/grinternal.h>
55912 +#include <linux/security.h>
55913 +
55914 +void
55915 +gr_audit_ptrace(struct task_struct *task)
55916 +{
55917 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
55918 + if (grsec_enable_audit_ptrace)
55919 + gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
55920 +#endif
55921 + return;
55922 +}
55923 +
55924 +int
55925 +gr_ptrace_readexec(struct file *file, int unsafe_flags)
55926 +{
55927 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
55928 + const struct dentry *dentry = file->f_path.dentry;
55929 + const struct vfsmount *mnt = file->f_path.mnt;
55930 +
55931 + if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
55932 + (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
55933 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
55934 + return -EACCES;
55935 + }
55936 +#endif
55937 + return 0;
55938 +}
55939 diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
55940 new file mode 100644
55941 index 0000000..7a5b2de
55942 --- /dev/null
55943 +++ b/grsecurity/grsec_sig.c
55944 @@ -0,0 +1,207 @@
55945 +#include <linux/kernel.h>
55946 +#include <linux/sched.h>
55947 +#include <linux/delay.h>
55948 +#include <linux/grsecurity.h>
55949 +#include <linux/grinternal.h>
55950 +#include <linux/hardirq.h>
55951 +
55952 +char *signames[] = {
55953 + [SIGSEGV] = "Segmentation fault",
55954 + [SIGILL] = "Illegal instruction",
55955 + [SIGABRT] = "Abort",
55956 + [SIGBUS] = "Invalid alignment/Bus error"
55957 +};
55958 +
55959 +void
55960 +gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
55961 +{
55962 +#ifdef CONFIG_GRKERNSEC_SIGNAL
55963 + if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
55964 + (sig == SIGABRT) || (sig == SIGBUS))) {
55965 + if (t->pid == current->pid) {
55966 + gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
55967 + } else {
55968 + gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
55969 + }
55970 + }
55971 +#endif
55972 + return;
55973 +}
55974 +
55975 +int
55976 +gr_handle_signal(const struct task_struct *p, const int sig)
55977 +{
55978 +#ifdef CONFIG_GRKERNSEC
55979 + /* ignore the 0 signal for protected task checks */
55980 + if (current->pid > 1 && sig && gr_check_protected_task(p)) {
55981 + gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
55982 + return -EPERM;
55983 + } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
55984 + return -EPERM;
55985 + }
55986 +#endif
55987 + return 0;
55988 +}
55989 +
55990 +#ifdef CONFIG_GRKERNSEC
55991 +extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
55992 +
55993 +int gr_fake_force_sig(int sig, struct task_struct *t)
55994 +{
55995 + unsigned long int flags;
55996 + int ret, blocked, ignored;
55997 + struct k_sigaction *action;
55998 +
55999 + spin_lock_irqsave(&t->sighand->siglock, flags);
56000 + action = &t->sighand->action[sig-1];
56001 + ignored = action->sa.sa_handler == SIG_IGN;
56002 + blocked = sigismember(&t->blocked, sig);
56003 + if (blocked || ignored) {
56004 + action->sa.sa_handler = SIG_DFL;
56005 + if (blocked) {
56006 + sigdelset(&t->blocked, sig);
56007 + recalc_sigpending_and_wake(t);
56008 + }
56009 + }
56010 + if (action->sa.sa_handler == SIG_DFL)
56011 + t->signal->flags &= ~SIGNAL_UNKILLABLE;
56012 + ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
56013 +
56014 + spin_unlock_irqrestore(&t->sighand->siglock, flags);
56015 +
56016 + return ret;
56017 +}
56018 +#endif
56019 +
56020 +#ifdef CONFIG_GRKERNSEC_BRUTE
56021 +#define GR_USER_BAN_TIME (15 * 60)
56022 +
56023 +static int __get_dumpable(unsigned long mm_flags)
56024 +{
56025 + int ret;
56026 +
56027 + ret = mm_flags & MMF_DUMPABLE_MASK;
56028 + return (ret >= 2) ? 2 : ret;
56029 +}
56030 +#endif
56031 +
56032 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
56033 +{
56034 +#ifdef CONFIG_GRKERNSEC_BRUTE
56035 + uid_t uid = 0;
56036 +
56037 + if (!grsec_enable_brute)
56038 + return;
56039 +
56040 + rcu_read_lock();
56041 + read_lock(&tasklist_lock);
56042 + read_lock(&grsec_exec_file_lock);
56043 + if (p->real_parent && p->real_parent->exec_file == p->exec_file)
56044 + p->real_parent->brute = 1;
56045 + else {
56046 + const struct cred *cred = __task_cred(p), *cred2;
56047 + struct task_struct *tsk, *tsk2;
56048 +
56049 + if (!__get_dumpable(mm_flags) && cred->uid) {
56050 + struct user_struct *user;
56051 +
56052 + uid = cred->uid;
56053 +
56054 + /* this is put upon execution past expiration */
56055 + user = find_user(uid);
56056 + if (user == NULL)
56057 + goto unlock;
56058 + user->banned = 1;
56059 + user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
56060 + if (user->ban_expires == ~0UL)
56061 + user->ban_expires--;
56062 +
56063 + do_each_thread(tsk2, tsk) {
56064 + cred2 = __task_cred(tsk);
56065 + if (tsk != p && cred2->uid == uid)
56066 + gr_fake_force_sig(SIGKILL, tsk);
56067 + } while_each_thread(tsk2, tsk);
56068 + }
56069 + }
56070 +unlock:
56071 + read_unlock(&grsec_exec_file_lock);
56072 + read_unlock(&tasklist_lock);
56073 + rcu_read_unlock();
56074 +
56075 + if (uid)
56076 + printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
56077 +
56078 +#endif
56079 + return;
56080 +}
56081 +
56082 +void gr_handle_brute_check(void)
56083 +{
56084 +#ifdef CONFIG_GRKERNSEC_BRUTE
56085 + if (current->brute)
56086 + msleep(30 * 1000);
56087 +#endif
56088 + return;
56089 +}
56090 +
56091 +void gr_handle_kernel_exploit(void)
56092 +{
56093 +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
56094 + const struct cred *cred;
56095 + struct task_struct *tsk, *tsk2;
56096 + struct user_struct *user;
56097 + uid_t uid;
56098 +
56099 + if (in_irq() || in_serving_softirq() || in_nmi())
56100 + panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
56101 +
56102 + uid = current_uid();
56103 +
56104 + if (uid == 0)
56105 + panic("grsec: halting the system due to suspicious kernel crash caused by root");
56106 + else {
56107 + /* kill all the processes of this user, hold a reference
56108 + to their creds struct, and prevent them from creating
56109 + another process until system reset
56110 + */
56111 + printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
56112 + /* we intentionally leak this ref */
56113 + user = get_uid(current->cred->user);
56114 + if (user) {
56115 + user->banned = 1;
56116 + user->ban_expires = ~0UL;
56117 + }
56118 +
56119 + read_lock(&tasklist_lock);
56120 + do_each_thread(tsk2, tsk) {
56121 + cred = __task_cred(tsk);
56122 + if (cred->uid == uid)
56123 + gr_fake_force_sig(SIGKILL, tsk);
56124 + } while_each_thread(tsk2, tsk);
56125 + read_unlock(&tasklist_lock);
56126 + }
56127 +#endif
56128 +}
56129 +
56130 +int __gr_process_user_ban(struct user_struct *user)
56131 +{
56132 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
56133 + if (unlikely(user->banned)) {
56134 + if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
56135 + user->banned = 0;
56136 + user->ban_expires = 0;
56137 + free_uid(user);
56138 + } else
56139 + return -EPERM;
56140 + }
56141 +#endif
56142 + return 0;
56143 +}
56144 +
56145 +int gr_process_user_ban(void)
56146 +{
56147 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
56148 + return __gr_process_user_ban(current->cred->user);
56149 +#endif
56150 + return 0;
56151 +}
56152 diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
56153 new file mode 100644
56154 index 0000000..4030d57
56155 --- /dev/null
56156 +++ b/grsecurity/grsec_sock.c
56157 @@ -0,0 +1,244 @@
56158 +#include <linux/kernel.h>
56159 +#include <linux/module.h>
56160 +#include <linux/sched.h>
56161 +#include <linux/file.h>
56162 +#include <linux/net.h>
56163 +#include <linux/in.h>
56164 +#include <linux/ip.h>
56165 +#include <net/sock.h>
56166 +#include <net/inet_sock.h>
56167 +#include <linux/grsecurity.h>
56168 +#include <linux/grinternal.h>
56169 +#include <linux/gracl.h>
56170 +
56171 +extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
56172 +extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
56173 +
56174 +EXPORT_SYMBOL(gr_search_udp_recvmsg);
56175 +EXPORT_SYMBOL(gr_search_udp_sendmsg);
56176 +
56177 +#ifdef CONFIG_UNIX_MODULE
56178 +EXPORT_SYMBOL(gr_acl_handle_unix);
56179 +EXPORT_SYMBOL(gr_acl_handle_mknod);
56180 +EXPORT_SYMBOL(gr_handle_chroot_unix);
56181 +EXPORT_SYMBOL(gr_handle_create);
56182 +#endif
56183 +
56184 +#ifdef CONFIG_GRKERNSEC
56185 +#define gr_conn_table_size 32749
56186 +struct conn_table_entry {
56187 + struct conn_table_entry *next;
56188 + struct signal_struct *sig;
56189 +};
56190 +
56191 +struct conn_table_entry *gr_conn_table[gr_conn_table_size];
56192 +DEFINE_SPINLOCK(gr_conn_table_lock);
56193 +
56194 +extern const char * gr_socktype_to_name(unsigned char type);
56195 +extern const char * gr_proto_to_name(unsigned char proto);
56196 +extern const char * gr_sockfamily_to_name(unsigned char family);
56197 +
56198 +static __inline__ int
56199 +conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
56200 +{
56201 + return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
56202 +}
56203 +
56204 +static __inline__ int
56205 +conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
56206 + __u16 sport, __u16 dport)
56207 +{
56208 + if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
56209 + sig->gr_sport == sport && sig->gr_dport == dport))
56210 + return 1;
56211 + else
56212 + return 0;
56213 +}
56214 +
56215 +static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
56216 +{
56217 + struct conn_table_entry **match;
56218 + unsigned int index;
56219 +
56220 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
56221 + sig->gr_sport, sig->gr_dport,
56222 + gr_conn_table_size);
56223 +
56224 + newent->sig = sig;
56225 +
56226 + match = &gr_conn_table[index];
56227 + newent->next = *match;
56228 + *match = newent;
56229 +
56230 + return;
56231 +}
56232 +
56233 +static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
56234 +{
56235 + struct conn_table_entry *match, *last = NULL;
56236 + unsigned int index;
56237 +
56238 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
56239 + sig->gr_sport, sig->gr_dport,
56240 + gr_conn_table_size);
56241 +
56242 + match = gr_conn_table[index];
56243 + while (match && !conn_match(match->sig,
56244 + sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
56245 + sig->gr_dport)) {
56246 + last = match;
56247 + match = match->next;
56248 + }
56249 +
56250 + if (match) {
56251 + if (last)
56252 + last->next = match->next;
56253 + else
56254 + gr_conn_table[index] = NULL;
56255 + kfree(match);
56256 + }
56257 +
56258 + return;
56259 +}
56260 +
56261 +static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
56262 + __u16 sport, __u16 dport)
56263 +{
56264 + struct conn_table_entry *match;
56265 + unsigned int index;
56266 +
56267 + index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
56268 +
56269 + match = gr_conn_table[index];
56270 + while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
56271 + match = match->next;
56272 +
56273 + if (match)
56274 + return match->sig;
56275 + else
56276 + return NULL;
56277 +}
56278 +
56279 +#endif
56280 +
56281 +void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
56282 +{
56283 +#ifdef CONFIG_GRKERNSEC
56284 + struct signal_struct *sig = task->signal;
56285 + struct conn_table_entry *newent;
56286 +
56287 + newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
56288 + if (newent == NULL)
56289 + return;
56290 + /* no bh lock needed since we are called with bh disabled */
56291 + spin_lock(&gr_conn_table_lock);
56292 + gr_del_task_from_ip_table_nolock(sig);
56293 + sig->gr_saddr = inet->inet_rcv_saddr;
56294 + sig->gr_daddr = inet->inet_daddr;
56295 + sig->gr_sport = inet->inet_sport;
56296 + sig->gr_dport = inet->inet_dport;
56297 + gr_add_to_task_ip_table_nolock(sig, newent);
56298 + spin_unlock(&gr_conn_table_lock);
56299 +#endif
56300 + return;
56301 +}
56302 +
56303 +void gr_del_task_from_ip_table(struct task_struct *task)
56304 +{
56305 +#ifdef CONFIG_GRKERNSEC
56306 + spin_lock_bh(&gr_conn_table_lock);
56307 + gr_del_task_from_ip_table_nolock(task->signal);
56308 + spin_unlock_bh(&gr_conn_table_lock);
56309 +#endif
56310 + return;
56311 +}
56312 +
56313 +void
56314 +gr_attach_curr_ip(const struct sock *sk)
56315 +{
56316 +#ifdef CONFIG_GRKERNSEC
56317 + struct signal_struct *p, *set;
56318 + const struct inet_sock *inet = inet_sk(sk);
56319 +
56320 + if (unlikely(sk->sk_protocol != IPPROTO_TCP))
56321 + return;
56322 +
56323 + set = current->signal;
56324 +
56325 + spin_lock_bh(&gr_conn_table_lock);
56326 + p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
56327 + inet->inet_dport, inet->inet_sport);
56328 + if (unlikely(p != NULL)) {
56329 + set->curr_ip = p->curr_ip;
56330 + set->used_accept = 1;
56331 + gr_del_task_from_ip_table_nolock(p);
56332 + spin_unlock_bh(&gr_conn_table_lock);
56333 + return;
56334 + }
56335 + spin_unlock_bh(&gr_conn_table_lock);
56336 +
56337 + set->curr_ip = inet->inet_daddr;
56338 + set->used_accept = 1;
56339 +#endif
56340 + return;
56341 +}
56342 +
56343 +int
56344 +gr_handle_sock_all(const int family, const int type, const int protocol)
56345 +{
56346 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
56347 + if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
56348 + (family != AF_UNIX)) {
56349 + if (family == AF_INET)
56350 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
56351 + else
56352 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
56353 + return -EACCES;
56354 + }
56355 +#endif
56356 + return 0;
56357 +}
56358 +
56359 +int
56360 +gr_handle_sock_server(const struct sockaddr *sck)
56361 +{
56362 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
56363 + if (grsec_enable_socket_server &&
56364 + in_group_p(grsec_socket_server_gid) &&
56365 + sck && (sck->sa_family != AF_UNIX) &&
56366 + (sck->sa_family != AF_LOCAL)) {
56367 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
56368 + return -EACCES;
56369 + }
56370 +#endif
56371 + return 0;
56372 +}
56373 +
56374 +int
56375 +gr_handle_sock_server_other(const struct sock *sck)
56376 +{
56377 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
56378 + if (grsec_enable_socket_server &&
56379 + in_group_p(grsec_socket_server_gid) &&
56380 + sck && (sck->sk_family != AF_UNIX) &&
56381 + (sck->sk_family != AF_LOCAL)) {
56382 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
56383 + return -EACCES;
56384 + }
56385 +#endif
56386 + return 0;
56387 +}
56388 +
56389 +int
56390 +gr_handle_sock_client(const struct sockaddr *sck)
56391 +{
56392 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
56393 + if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
56394 + sck && (sck->sa_family != AF_UNIX) &&
56395 + (sck->sa_family != AF_LOCAL)) {
56396 + gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
56397 + return -EACCES;
56398 + }
56399 +#endif
56400 + return 0;
56401 +}
56402 diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
56403 new file mode 100644
56404 index 0000000..a1aedd7
56405 --- /dev/null
56406 +++ b/grsecurity/grsec_sysctl.c
56407 @@ -0,0 +1,451 @@
56408 +#include <linux/kernel.h>
56409 +#include <linux/sched.h>
56410 +#include <linux/sysctl.h>
56411 +#include <linux/grsecurity.h>
56412 +#include <linux/grinternal.h>
56413 +
56414 +int
56415 +gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
56416 +{
56417 +#ifdef CONFIG_GRKERNSEC_SYSCTL
56418 + if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
56419 + gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
56420 + return -EACCES;
56421 + }
56422 +#endif
56423 + return 0;
56424 +}
56425 +
56426 +#ifdef CONFIG_GRKERNSEC_ROFS
56427 +static int __maybe_unused one = 1;
56428 +#endif
56429 +
56430 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
56431 +struct ctl_table grsecurity_table[] = {
56432 +#ifdef CONFIG_GRKERNSEC_SYSCTL
56433 +#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
56434 +#ifdef CONFIG_GRKERNSEC_IO
56435 + {
56436 + .procname = "disable_priv_io",
56437 + .data = &grsec_disable_privio,
56438 + .maxlen = sizeof(int),
56439 + .mode = 0600,
56440 + .proc_handler = &proc_dointvec,
56441 + },
56442 +#endif
56443 +#endif
56444 +#ifdef CONFIG_GRKERNSEC_LINK
56445 + {
56446 + .procname = "linking_restrictions",
56447 + .data = &grsec_enable_link,
56448 + .maxlen = sizeof(int),
56449 + .mode = 0600,
56450 + .proc_handler = &proc_dointvec,
56451 + },
56452 +#endif
56453 +#ifdef CONFIG_GRKERNSEC_BRUTE
56454 + {
56455 + .procname = "deter_bruteforce",
56456 + .data = &grsec_enable_brute,
56457 + .maxlen = sizeof(int),
56458 + .mode = 0600,
56459 + .proc_handler = &proc_dointvec,
56460 + },
56461 +#endif
56462 +#ifdef CONFIG_GRKERNSEC_FIFO
56463 + {
56464 + .procname = "fifo_restrictions",
56465 + .data = &grsec_enable_fifo,
56466 + .maxlen = sizeof(int),
56467 + .mode = 0600,
56468 + .proc_handler = &proc_dointvec,
56469 + },
56470 +#endif
56471 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
56472 + {
56473 + .procname = "ptrace_readexec",
56474 + .data = &grsec_enable_ptrace_readexec,
56475 + .maxlen = sizeof(int),
56476 + .mode = 0600,
56477 + .proc_handler = &proc_dointvec,
56478 + },
56479 +#endif
56480 +#ifdef CONFIG_GRKERNSEC_SETXID
56481 + {
56482 + .procname = "consistent_setxid",
56483 + .data = &grsec_enable_setxid,
56484 + .maxlen = sizeof(int),
56485 + .mode = 0600,
56486 + .proc_handler = &proc_dointvec,
56487 + },
56488 +#endif
56489 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
56490 + {
56491 + .procname = "ip_blackhole",
56492 + .data = &grsec_enable_blackhole,
56493 + .maxlen = sizeof(int),
56494 + .mode = 0600,
56495 + .proc_handler = &proc_dointvec,
56496 + },
56497 + {
56498 + .procname = "lastack_retries",
56499 + .data = &grsec_lastack_retries,
56500 + .maxlen = sizeof(int),
56501 + .mode = 0600,
56502 + .proc_handler = &proc_dointvec,
56503 + },
56504 +#endif
56505 +#ifdef CONFIG_GRKERNSEC_EXECLOG
56506 + {
56507 + .procname = "exec_logging",
56508 + .data = &grsec_enable_execlog,
56509 + .maxlen = sizeof(int),
56510 + .mode = 0600,
56511 + .proc_handler = &proc_dointvec,
56512 + },
56513 +#endif
56514 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
56515 + {
56516 + .procname = "rwxmap_logging",
56517 + .data = &grsec_enable_log_rwxmaps,
56518 + .maxlen = sizeof(int),
56519 + .mode = 0600,
56520 + .proc_handler = &proc_dointvec,
56521 + },
56522 +#endif
56523 +#ifdef CONFIG_GRKERNSEC_SIGNAL
56524 + {
56525 + .procname = "signal_logging",
56526 + .data = &grsec_enable_signal,
56527 + .maxlen = sizeof(int),
56528 + .mode = 0600,
56529 + .proc_handler = &proc_dointvec,
56530 + },
56531 +#endif
56532 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
56533 + {
56534 + .procname = "forkfail_logging",
56535 + .data = &grsec_enable_forkfail,
56536 + .maxlen = sizeof(int),
56537 + .mode = 0600,
56538 + .proc_handler = &proc_dointvec,
56539 + },
56540 +#endif
56541 +#ifdef CONFIG_GRKERNSEC_TIME
56542 + {
56543 + .procname = "timechange_logging",
56544 + .data = &grsec_enable_time,
56545 + .maxlen = sizeof(int),
56546 + .mode = 0600,
56547 + .proc_handler = &proc_dointvec,
56548 + },
56549 +#endif
56550 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
56551 + {
56552 + .procname = "chroot_deny_shmat",
56553 + .data = &grsec_enable_chroot_shmat,
56554 + .maxlen = sizeof(int),
56555 + .mode = 0600,
56556 + .proc_handler = &proc_dointvec,
56557 + },
56558 +#endif
56559 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
56560 + {
56561 + .procname = "chroot_deny_unix",
56562 + .data = &grsec_enable_chroot_unix,
56563 + .maxlen = sizeof(int),
56564 + .mode = 0600,
56565 + .proc_handler = &proc_dointvec,
56566 + },
56567 +#endif
56568 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
56569 + {
56570 + .procname = "chroot_deny_mount",
56571 + .data = &grsec_enable_chroot_mount,
56572 + .maxlen = sizeof(int),
56573 + .mode = 0600,
56574 + .proc_handler = &proc_dointvec,
56575 + },
56576 +#endif
56577 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
56578 + {
56579 + .procname = "chroot_deny_fchdir",
56580 + .data = &grsec_enable_chroot_fchdir,
56581 + .maxlen = sizeof(int),
56582 + .mode = 0600,
56583 + .proc_handler = &proc_dointvec,
56584 + },
56585 +#endif
56586 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
56587 + {
56588 + .procname = "chroot_deny_chroot",
56589 + .data = &grsec_enable_chroot_double,
56590 + .maxlen = sizeof(int),
56591 + .mode = 0600,
56592 + .proc_handler = &proc_dointvec,
56593 + },
56594 +#endif
56595 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
56596 + {
56597 + .procname = "chroot_deny_pivot",
56598 + .data = &grsec_enable_chroot_pivot,
56599 + .maxlen = sizeof(int),
56600 + .mode = 0600,
56601 + .proc_handler = &proc_dointvec,
56602 + },
56603 +#endif
56604 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
56605 + {
56606 + .procname = "chroot_enforce_chdir",
56607 + .data = &grsec_enable_chroot_chdir,
56608 + .maxlen = sizeof(int),
56609 + .mode = 0600,
56610 + .proc_handler = &proc_dointvec,
56611 + },
56612 +#endif
56613 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
56614 + {
56615 + .procname = "chroot_deny_chmod",
56616 + .data = &grsec_enable_chroot_chmod,
56617 + .maxlen = sizeof(int),
56618 + .mode = 0600,
56619 + .proc_handler = &proc_dointvec,
56620 + },
56621 +#endif
56622 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
56623 + {
56624 + .procname = "chroot_deny_mknod",
56625 + .data = &grsec_enable_chroot_mknod,
56626 + .maxlen = sizeof(int),
56627 + .mode = 0600,
56628 + .proc_handler = &proc_dointvec,
56629 + },
56630 +#endif
56631 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
56632 + {
56633 + .procname = "chroot_restrict_nice",
56634 + .data = &grsec_enable_chroot_nice,
56635 + .maxlen = sizeof(int),
56636 + .mode = 0600,
56637 + .proc_handler = &proc_dointvec,
56638 + },
56639 +#endif
56640 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
56641 + {
56642 + .procname = "chroot_execlog",
56643 + .data = &grsec_enable_chroot_execlog,
56644 + .maxlen = sizeof(int),
56645 + .mode = 0600,
56646 + .proc_handler = &proc_dointvec,
56647 + },
56648 +#endif
56649 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56650 + {
56651 + .procname = "chroot_caps",
56652 + .data = &grsec_enable_chroot_caps,
56653 + .maxlen = sizeof(int),
56654 + .mode = 0600,
56655 + .proc_handler = &proc_dointvec,
56656 + },
56657 +#endif
56658 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
56659 + {
56660 + .procname = "chroot_deny_sysctl",
56661 + .data = &grsec_enable_chroot_sysctl,
56662 + .maxlen = sizeof(int),
56663 + .mode = 0600,
56664 + .proc_handler = &proc_dointvec,
56665 + },
56666 +#endif
56667 +#ifdef CONFIG_GRKERNSEC_TPE
56668 + {
56669 + .procname = "tpe",
56670 + .data = &grsec_enable_tpe,
56671 + .maxlen = sizeof(int),
56672 + .mode = 0600,
56673 + .proc_handler = &proc_dointvec,
56674 + },
56675 + {
56676 + .procname = "tpe_gid",
56677 + .data = &grsec_tpe_gid,
56678 + .maxlen = sizeof(int),
56679 + .mode = 0600,
56680 + .proc_handler = &proc_dointvec,
56681 + },
56682 +#endif
56683 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
56684 + {
56685 + .procname = "tpe_invert",
56686 + .data = &grsec_enable_tpe_invert,
56687 + .maxlen = sizeof(int),
56688 + .mode = 0600,
56689 + .proc_handler = &proc_dointvec,
56690 + },
56691 +#endif
56692 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
56693 + {
56694 + .procname = "tpe_restrict_all",
56695 + .data = &grsec_enable_tpe_all,
56696 + .maxlen = sizeof(int),
56697 + .mode = 0600,
56698 + .proc_handler = &proc_dointvec,
56699 + },
56700 +#endif
56701 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
56702 + {
56703 + .procname = "socket_all",
56704 + .data = &grsec_enable_socket_all,
56705 + .maxlen = sizeof(int),
56706 + .mode = 0600,
56707 + .proc_handler = &proc_dointvec,
56708 + },
56709 + {
56710 + .procname = "socket_all_gid",
56711 + .data = &grsec_socket_all_gid,
56712 + .maxlen = sizeof(int),
56713 + .mode = 0600,
56714 + .proc_handler = &proc_dointvec,
56715 + },
56716 +#endif
56717 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
56718 + {
56719 + .procname = "socket_client",
56720 + .data = &grsec_enable_socket_client,
56721 + .maxlen = sizeof(int),
56722 + .mode = 0600,
56723 + .proc_handler = &proc_dointvec,
56724 + },
56725 + {
56726 + .procname = "socket_client_gid",
56727 + .data = &grsec_socket_client_gid,
56728 + .maxlen = sizeof(int),
56729 + .mode = 0600,
56730 + .proc_handler = &proc_dointvec,
56731 + },
56732 +#endif
56733 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
56734 + {
56735 + .procname = "socket_server",
56736 + .data = &grsec_enable_socket_server,
56737 + .maxlen = sizeof(int),
56738 + .mode = 0600,
56739 + .proc_handler = &proc_dointvec,
56740 + },
56741 + {
56742 + .procname = "socket_server_gid",
56743 + .data = &grsec_socket_server_gid,
56744 + .maxlen = sizeof(int),
56745 + .mode = 0600,
56746 + .proc_handler = &proc_dointvec,
56747 + },
56748 +#endif
56749 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
56750 + {
56751 + .procname = "audit_group",
56752 + .data = &grsec_enable_group,
56753 + .maxlen = sizeof(int),
56754 + .mode = 0600,
56755 + .proc_handler = &proc_dointvec,
56756 + },
56757 + {
56758 + .procname = "audit_gid",
56759 + .data = &grsec_audit_gid,
56760 + .maxlen = sizeof(int),
56761 + .mode = 0600,
56762 + .proc_handler = &proc_dointvec,
56763 + },
56764 +#endif
56765 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
56766 + {
56767 + .procname = "audit_chdir",
56768 + .data = &grsec_enable_chdir,
56769 + .maxlen = sizeof(int),
56770 + .mode = 0600,
56771 + .proc_handler = &proc_dointvec,
56772 + },
56773 +#endif
56774 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
56775 + {
56776 + .procname = "audit_mount",
56777 + .data = &grsec_enable_mount,
56778 + .maxlen = sizeof(int),
56779 + .mode = 0600,
56780 + .proc_handler = &proc_dointvec,
56781 + },
56782 +#endif
56783 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
56784 + {
56785 + .procname = "audit_textrel",
56786 + .data = &grsec_enable_audit_textrel,
56787 + .maxlen = sizeof(int),
56788 + .mode = 0600,
56789 + .proc_handler = &proc_dointvec,
56790 + },
56791 +#endif
56792 +#ifdef CONFIG_GRKERNSEC_DMESG
56793 + {
56794 + .procname = "dmesg",
56795 + .data = &grsec_enable_dmesg,
56796 + .maxlen = sizeof(int),
56797 + .mode = 0600,
56798 + .proc_handler = &proc_dointvec,
56799 + },
56800 +#endif
56801 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
56802 + {
56803 + .procname = "chroot_findtask",
56804 + .data = &grsec_enable_chroot_findtask,
56805 + .maxlen = sizeof(int),
56806 + .mode = 0600,
56807 + .proc_handler = &proc_dointvec,
56808 + },
56809 +#endif
56810 +#ifdef CONFIG_GRKERNSEC_RESLOG
56811 + {
56812 + .procname = "resource_logging",
56813 + .data = &grsec_resource_logging,
56814 + .maxlen = sizeof(int),
56815 + .mode = 0600,
56816 + .proc_handler = &proc_dointvec,
56817 + },
56818 +#endif
56819 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
56820 + {
56821 + .procname = "audit_ptrace",
56822 + .data = &grsec_enable_audit_ptrace,
56823 + .maxlen = sizeof(int),
56824 + .mode = 0600,
56825 + .proc_handler = &proc_dointvec,
56826 + },
56827 +#endif
56828 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
56829 + {
56830 + .procname = "harden_ptrace",
56831 + .data = &grsec_enable_harden_ptrace,
56832 + .maxlen = sizeof(int),
56833 + .mode = 0600,
56834 + .proc_handler = &proc_dointvec,
56835 + },
56836 +#endif
56837 + {
56838 + .procname = "grsec_lock",
56839 + .data = &grsec_lock,
56840 + .maxlen = sizeof(int),
56841 + .mode = 0600,
56842 + .proc_handler = &proc_dointvec,
56843 + },
56844 +#endif
56845 +#ifdef CONFIG_GRKERNSEC_ROFS
56846 + {
56847 + .procname = "romount_protect",
56848 + .data = &grsec_enable_rofs,
56849 + .maxlen = sizeof(int),
56850 + .mode = 0600,
56851 + .proc_handler = &proc_dointvec_minmax,
56852 + .extra1 = &one,
56853 + .extra2 = &one,
56854 + },
56855 +#endif
56856 + { }
56857 +};
56858 +#endif
56859 diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
56860 new file mode 100644
56861 index 0000000..0dc13c3
56862 --- /dev/null
56863 +++ b/grsecurity/grsec_time.c
56864 @@ -0,0 +1,16 @@
56865 +#include <linux/kernel.h>
56866 +#include <linux/sched.h>
56867 +#include <linux/grinternal.h>
56868 +#include <linux/module.h>
56869 +
56870 +void
56871 +gr_log_timechange(void)
56872 +{
56873 +#ifdef CONFIG_GRKERNSEC_TIME
56874 + if (grsec_enable_time)
56875 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
56876 +#endif
56877 + return;
56878 +}
56879 +
56880 +EXPORT_SYMBOL(gr_log_timechange);
56881 diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
56882 new file mode 100644
56883 index 0000000..07e0dc0
56884 --- /dev/null
56885 +++ b/grsecurity/grsec_tpe.c
56886 @@ -0,0 +1,73 @@
56887 +#include <linux/kernel.h>
56888 +#include <linux/sched.h>
56889 +#include <linux/file.h>
56890 +#include <linux/fs.h>
56891 +#include <linux/grinternal.h>
56892 +
56893 +extern int gr_acl_tpe_check(void);
56894 +
56895 +int
56896 +gr_tpe_allow(const struct file *file)
56897 +{
56898 +#ifdef CONFIG_GRKERNSEC
56899 + struct inode *inode = file->f_path.dentry->d_parent->d_inode;
56900 + const struct cred *cred = current_cred();
56901 + char *msg = NULL;
56902 + char *msg2 = NULL;
56903 +
56904 + // never restrict root
56905 + if (!cred->uid)
56906 + return 1;
56907 +
56908 + if (grsec_enable_tpe) {
56909 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
56910 + if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
56911 + msg = "not being in trusted group";
56912 + else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
56913 + msg = "being in untrusted group";
56914 +#else
56915 + if (in_group_p(grsec_tpe_gid))
56916 + msg = "being in untrusted group";
56917 +#endif
56918 + }
56919 + if (!msg && gr_acl_tpe_check())
56920 + msg = "being in untrusted role";
56921 +
56922 + // not in any affected group/role
56923 + if (!msg)
56924 + goto next_check;
56925 +
56926 + if (inode->i_uid)
56927 + msg2 = "file in non-root-owned directory";
56928 + else if (inode->i_mode & S_IWOTH)
56929 + msg2 = "file in world-writable directory";
56930 + else if (inode->i_mode & S_IWGRP)
56931 + msg2 = "file in group-writable directory";
56932 +
56933 + if (msg && msg2) {
56934 + char fullmsg[70] = {0};
56935 + snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
56936 + gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
56937 + return 0;
56938 + }
56939 + msg = NULL;
56940 +next_check:
56941 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
56942 + if (!grsec_enable_tpe || !grsec_enable_tpe_all)
56943 + return 1;
56944 +
56945 + if (inode->i_uid && (inode->i_uid != cred->uid))
56946 + msg = "directory not owned by user";
56947 + else if (inode->i_mode & S_IWOTH)
56948 + msg = "file in world-writable directory";
56949 + else if (inode->i_mode & S_IWGRP)
56950 + msg = "file in group-writable directory";
56951 +
56952 + if (msg) {
56953 + gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
56954 + return 0;
56955 + }
56956 +#endif
56957 +#endif
56958 + return 1;
56959 +}
56960 diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
56961 new file mode 100644
56962 index 0000000..9f7b1ac
56963 --- /dev/null
56964 +++ b/grsecurity/grsum.c
56965 @@ -0,0 +1,61 @@
56966 +#include <linux/err.h>
56967 +#include <linux/kernel.h>
56968 +#include <linux/sched.h>
56969 +#include <linux/mm.h>
56970 +#include <linux/scatterlist.h>
56971 +#include <linux/crypto.h>
56972 +#include <linux/gracl.h>
56973 +
56974 +
56975 +#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
56976 +#error "crypto and sha256 must be built into the kernel"
56977 +#endif
56978 +
56979 +int
56980 +chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
56981 +{
56982 + char *p;
56983 + struct crypto_hash *tfm;
56984 + struct hash_desc desc;
56985 + struct scatterlist sg;
56986 + unsigned char temp_sum[GR_SHA_LEN];
56987 + volatile int retval = 0;
56988 + volatile int dummy = 0;
56989 + unsigned int i;
56990 +
56991 + sg_init_table(&sg, 1);
56992 +
56993 + tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
56994 + if (IS_ERR(tfm)) {
56995 + /* should never happen, since sha256 should be built in */
56996 + return 1;
56997 + }
56998 +
56999 + desc.tfm = tfm;
57000 + desc.flags = 0;
57001 +
57002 + crypto_hash_init(&desc);
57003 +
57004 + p = salt;
57005 + sg_set_buf(&sg, p, GR_SALT_LEN);
57006 + crypto_hash_update(&desc, &sg, sg.length);
57007 +
57008 + p = entry->pw;
57009 + sg_set_buf(&sg, p, strlen(p));
57010 +
57011 + crypto_hash_update(&desc, &sg, sg.length);
57012 +
57013 + crypto_hash_final(&desc, temp_sum);
57014 +
57015 + memset(entry->pw, 0, GR_PW_LEN);
57016 +
57017 + for (i = 0; i < GR_SHA_LEN; i++)
57018 + if (sum[i] != temp_sum[i])
57019 + retval = 1;
57020 + else
57021 + dummy = 1; // waste a cycle
57022 +
57023 + crypto_free_hash(tfm);
57024 +
57025 + return retval;
57026 +}
57027 diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
57028 index 6cd5b64..f620d2d 100644
57029 --- a/include/acpi/acpi_bus.h
57030 +++ b/include/acpi/acpi_bus.h
57031 @@ -107,7 +107,7 @@ struct acpi_device_ops {
57032 acpi_op_bind bind;
57033 acpi_op_unbind unbind;
57034 acpi_op_notify notify;
57035 -};
57036 +} __no_const;
57037
57038 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
57039
57040 diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
57041 index b7babf0..71e4e74 100644
57042 --- a/include/asm-generic/atomic-long.h
57043 +++ b/include/asm-generic/atomic-long.h
57044 @@ -22,6 +22,12 @@
57045
57046 typedef atomic64_t atomic_long_t;
57047
57048 +#ifdef CONFIG_PAX_REFCOUNT
57049 +typedef atomic64_unchecked_t atomic_long_unchecked_t;
57050 +#else
57051 +typedef atomic64_t atomic_long_unchecked_t;
57052 +#endif
57053 +
57054 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
57055
57056 static inline long atomic_long_read(atomic_long_t *l)
57057 @@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
57058 return (long)atomic64_read(v);
57059 }
57060
57061 +#ifdef CONFIG_PAX_REFCOUNT
57062 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
57063 +{
57064 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57065 +
57066 + return (long)atomic64_read_unchecked(v);
57067 +}
57068 +#endif
57069 +
57070 static inline void atomic_long_set(atomic_long_t *l, long i)
57071 {
57072 atomic64_t *v = (atomic64_t *)l;
57073 @@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
57074 atomic64_set(v, i);
57075 }
57076
57077 +#ifdef CONFIG_PAX_REFCOUNT
57078 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
57079 +{
57080 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57081 +
57082 + atomic64_set_unchecked(v, i);
57083 +}
57084 +#endif
57085 +
57086 static inline void atomic_long_inc(atomic_long_t *l)
57087 {
57088 atomic64_t *v = (atomic64_t *)l;
57089 @@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
57090 atomic64_inc(v);
57091 }
57092
57093 +#ifdef CONFIG_PAX_REFCOUNT
57094 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
57095 +{
57096 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57097 +
57098 + atomic64_inc_unchecked(v);
57099 +}
57100 +#endif
57101 +
57102 static inline void atomic_long_dec(atomic_long_t *l)
57103 {
57104 atomic64_t *v = (atomic64_t *)l;
57105 @@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
57106 atomic64_dec(v);
57107 }
57108
57109 +#ifdef CONFIG_PAX_REFCOUNT
57110 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
57111 +{
57112 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57113 +
57114 + atomic64_dec_unchecked(v);
57115 +}
57116 +#endif
57117 +
57118 static inline void atomic_long_add(long i, atomic_long_t *l)
57119 {
57120 atomic64_t *v = (atomic64_t *)l;
57121 @@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
57122 atomic64_add(i, v);
57123 }
57124
57125 +#ifdef CONFIG_PAX_REFCOUNT
57126 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
57127 +{
57128 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57129 +
57130 + atomic64_add_unchecked(i, v);
57131 +}
57132 +#endif
57133 +
57134 static inline void atomic_long_sub(long i, atomic_long_t *l)
57135 {
57136 atomic64_t *v = (atomic64_t *)l;
57137 @@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
57138 atomic64_sub(i, v);
57139 }
57140
57141 +#ifdef CONFIG_PAX_REFCOUNT
57142 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
57143 +{
57144 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57145 +
57146 + atomic64_sub_unchecked(i, v);
57147 +}
57148 +#endif
57149 +
57150 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
57151 {
57152 atomic64_t *v = (atomic64_t *)l;
57153 @@ -115,6 +175,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
57154 return (long)atomic64_inc_return(v);
57155 }
57156
57157 +#ifdef CONFIG_PAX_REFCOUNT
57158 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
57159 +{
57160 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57161 +
57162 + return (long)atomic64_inc_return_unchecked(v);
57163 +}
57164 +#endif
57165 +
57166 static inline long atomic_long_dec_return(atomic_long_t *l)
57167 {
57168 atomic64_t *v = (atomic64_t *)l;
57169 @@ -140,6 +209,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
57170
57171 typedef atomic_t atomic_long_t;
57172
57173 +#ifdef CONFIG_PAX_REFCOUNT
57174 +typedef atomic_unchecked_t atomic_long_unchecked_t;
57175 +#else
57176 +typedef atomic_t atomic_long_unchecked_t;
57177 +#endif
57178 +
57179 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
57180 static inline long atomic_long_read(atomic_long_t *l)
57181 {
57182 @@ -148,6 +223,15 @@ static inline long atomic_long_read(atomic_long_t *l)
57183 return (long)atomic_read(v);
57184 }
57185
57186 +#ifdef CONFIG_PAX_REFCOUNT
57187 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
57188 +{
57189 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57190 +
57191 + return (long)atomic_read_unchecked(v);
57192 +}
57193 +#endif
57194 +
57195 static inline void atomic_long_set(atomic_long_t *l, long i)
57196 {
57197 atomic_t *v = (atomic_t *)l;
57198 @@ -155,6 +239,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
57199 atomic_set(v, i);
57200 }
57201
57202 +#ifdef CONFIG_PAX_REFCOUNT
57203 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
57204 +{
57205 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57206 +
57207 + atomic_set_unchecked(v, i);
57208 +}
57209 +#endif
57210 +
57211 static inline void atomic_long_inc(atomic_long_t *l)
57212 {
57213 atomic_t *v = (atomic_t *)l;
57214 @@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
57215 atomic_inc(v);
57216 }
57217
57218 +#ifdef CONFIG_PAX_REFCOUNT
57219 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
57220 +{
57221 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57222 +
57223 + atomic_inc_unchecked(v);
57224 +}
57225 +#endif
57226 +
57227 static inline void atomic_long_dec(atomic_long_t *l)
57228 {
57229 atomic_t *v = (atomic_t *)l;
57230 @@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
57231 atomic_dec(v);
57232 }
57233
57234 +#ifdef CONFIG_PAX_REFCOUNT
57235 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
57236 +{
57237 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57238 +
57239 + atomic_dec_unchecked(v);
57240 +}
57241 +#endif
57242 +
57243 static inline void atomic_long_add(long i, atomic_long_t *l)
57244 {
57245 atomic_t *v = (atomic_t *)l;
57246 @@ -176,6 +287,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
57247 atomic_add(i, v);
57248 }
57249
57250 +#ifdef CONFIG_PAX_REFCOUNT
57251 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
57252 +{
57253 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57254 +
57255 + atomic_add_unchecked(i, v);
57256 +}
57257 +#endif
57258 +
57259 static inline void atomic_long_sub(long i, atomic_long_t *l)
57260 {
57261 atomic_t *v = (atomic_t *)l;
57262 @@ -183,6 +303,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
57263 atomic_sub(i, v);
57264 }
57265
57266 +#ifdef CONFIG_PAX_REFCOUNT
57267 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
57268 +{
57269 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57270 +
57271 + atomic_sub_unchecked(i, v);
57272 +}
57273 +#endif
57274 +
57275 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
57276 {
57277 atomic_t *v = (atomic_t *)l;
57278 @@ -232,6 +361,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
57279 return (long)atomic_inc_return(v);
57280 }
57281
57282 +#ifdef CONFIG_PAX_REFCOUNT
57283 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
57284 +{
57285 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57286 +
57287 + return (long)atomic_inc_return_unchecked(v);
57288 +}
57289 +#endif
57290 +
57291 static inline long atomic_long_dec_return(atomic_long_t *l)
57292 {
57293 atomic_t *v = (atomic_t *)l;
57294 @@ -255,4 +393,49 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
57295
57296 #endif /* BITS_PER_LONG == 64 */
57297
57298 +#ifdef CONFIG_PAX_REFCOUNT
57299 +static inline void pax_refcount_needs_these_functions(void)
57300 +{
57301 + atomic_read_unchecked((atomic_unchecked_t *)NULL);
57302 + atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
57303 + atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
57304 + atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
57305 + atomic_inc_unchecked((atomic_unchecked_t *)NULL);
57306 + (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
57307 + atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
57308 + atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
57309 + atomic_dec_unchecked((atomic_unchecked_t *)NULL);
57310 + atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
57311 + (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
57312 +
57313 + atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
57314 + atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
57315 + atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
57316 + atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
57317 + atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
57318 + atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
57319 + atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
57320 +}
57321 +#else
57322 +#define atomic_read_unchecked(v) atomic_read(v)
57323 +#define atomic_set_unchecked(v, i) atomic_set((v), (i))
57324 +#define atomic_add_unchecked(i, v) atomic_add((i), (v))
57325 +#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
57326 +#define atomic_inc_unchecked(v) atomic_inc(v)
57327 +#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
57328 +#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
57329 +#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
57330 +#define atomic_dec_unchecked(v) atomic_dec(v)
57331 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
57332 +#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
57333 +
57334 +#define atomic_long_read_unchecked(v) atomic_long_read(v)
57335 +#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
57336 +#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
57337 +#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
57338 +#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
57339 +#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
57340 +#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
57341 +#endif
57342 +
57343 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
57344 diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
57345 index b18ce4f..2ee2843 100644
57346 --- a/include/asm-generic/atomic64.h
57347 +++ b/include/asm-generic/atomic64.h
57348 @@ -16,6 +16,8 @@ typedef struct {
57349 long long counter;
57350 } atomic64_t;
57351
57352 +typedef atomic64_t atomic64_unchecked_t;
57353 +
57354 #define ATOMIC64_INIT(i) { (i) }
57355
57356 extern long long atomic64_read(const atomic64_t *v);
57357 @@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
57358 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
57359 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
57360
57361 +#define atomic64_read_unchecked(v) atomic64_read(v)
57362 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
57363 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
57364 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
57365 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
57366 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
57367 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
57368 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
57369 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
57370 +
57371 #endif /* _ASM_GENERIC_ATOMIC64_H */
57372 diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
57373 index 1bfcfe5..e04c5c9 100644
57374 --- a/include/asm-generic/cache.h
57375 +++ b/include/asm-generic/cache.h
57376 @@ -6,7 +6,7 @@
57377 * cache lines need to provide their own cache.h.
57378 */
57379
57380 -#define L1_CACHE_SHIFT 5
57381 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
57382 +#define L1_CACHE_SHIFT 5UL
57383 +#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
57384
57385 #endif /* __ASM_GENERIC_CACHE_H */
57386 diff --git a/include/asm-generic/int-l64.h b/include/asm-generic/int-l64.h
57387 index 1ca3efc..e3dc852 100644
57388 --- a/include/asm-generic/int-l64.h
57389 +++ b/include/asm-generic/int-l64.h
57390 @@ -46,6 +46,8 @@ typedef unsigned int u32;
57391 typedef signed long s64;
57392 typedef unsigned long u64;
57393
57394 +typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
57395 +
57396 #define S8_C(x) x
57397 #define U8_C(x) x ## U
57398 #define S16_C(x) x
57399 diff --git a/include/asm-generic/int-ll64.h b/include/asm-generic/int-ll64.h
57400 index f394147..b6152b9 100644
57401 --- a/include/asm-generic/int-ll64.h
57402 +++ b/include/asm-generic/int-ll64.h
57403 @@ -51,6 +51,8 @@ typedef unsigned int u32;
57404 typedef signed long long s64;
57405 typedef unsigned long long u64;
57406
57407 +typedef unsigned long long intoverflow_t;
57408 +
57409 #define S8_C(x) x
57410 #define U8_C(x) x ## U
57411 #define S16_C(x) x
57412 diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
57413 index 0232ccb..13d9165 100644
57414 --- a/include/asm-generic/kmap_types.h
57415 +++ b/include/asm-generic/kmap_types.h
57416 @@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
57417 KMAP_D(17) KM_NMI,
57418 KMAP_D(18) KM_NMI_PTE,
57419 KMAP_D(19) KM_KDB,
57420 +KMAP_D(20) KM_CLEARPAGE,
57421 /*
57422 * Remember to update debug_kmap_atomic() when adding new kmap types!
57423 */
57424 -KMAP_D(20) KM_TYPE_NR
57425 +KMAP_D(21) KM_TYPE_NR
57426 };
57427
57428 #undef KMAP_D
57429 diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
57430 index 725612b..9cc513a 100644
57431 --- a/include/asm-generic/pgtable-nopmd.h
57432 +++ b/include/asm-generic/pgtable-nopmd.h
57433 @@ -1,14 +1,19 @@
57434 #ifndef _PGTABLE_NOPMD_H
57435 #define _PGTABLE_NOPMD_H
57436
57437 -#ifndef __ASSEMBLY__
57438 -
57439 #include <asm-generic/pgtable-nopud.h>
57440
57441 -struct mm_struct;
57442 -
57443 #define __PAGETABLE_PMD_FOLDED
57444
57445 +#define PMD_SHIFT PUD_SHIFT
57446 +#define PTRS_PER_PMD 1
57447 +#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
57448 +#define PMD_MASK (~(PMD_SIZE-1))
57449 +
57450 +#ifndef __ASSEMBLY__
57451 +
57452 +struct mm_struct;
57453 +
57454 /*
57455 * Having the pmd type consist of a pud gets the size right, and allows
57456 * us to conceptually access the pud entry that this pmd is folded into
57457 @@ -16,11 +21,6 @@ struct mm_struct;
57458 */
57459 typedef struct { pud_t pud; } pmd_t;
57460
57461 -#define PMD_SHIFT PUD_SHIFT
57462 -#define PTRS_PER_PMD 1
57463 -#define PMD_SIZE (1UL << PMD_SHIFT)
57464 -#define PMD_MASK (~(PMD_SIZE-1))
57465 -
57466 /*
57467 * The "pud_xxx()" functions here are trivial for a folded two-level
57468 * setup: the pmd is never bad, and a pmd always exists (as it's folded
57469 diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
57470 index 810431d..ccc3638 100644
57471 --- a/include/asm-generic/pgtable-nopud.h
57472 +++ b/include/asm-generic/pgtable-nopud.h
57473 @@ -1,10 +1,15 @@
57474 #ifndef _PGTABLE_NOPUD_H
57475 #define _PGTABLE_NOPUD_H
57476
57477 -#ifndef __ASSEMBLY__
57478 -
57479 #define __PAGETABLE_PUD_FOLDED
57480
57481 +#define PUD_SHIFT PGDIR_SHIFT
57482 +#define PTRS_PER_PUD 1
57483 +#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
57484 +#define PUD_MASK (~(PUD_SIZE-1))
57485 +
57486 +#ifndef __ASSEMBLY__
57487 +
57488 /*
57489 * Having the pud type consist of a pgd gets the size right, and allows
57490 * us to conceptually access the pgd entry that this pud is folded into
57491 @@ -12,11 +17,6 @@
57492 */
57493 typedef struct { pgd_t pgd; } pud_t;
57494
57495 -#define PUD_SHIFT PGDIR_SHIFT
57496 -#define PTRS_PER_PUD 1
57497 -#define PUD_SIZE (1UL << PUD_SHIFT)
57498 -#define PUD_MASK (~(PUD_SIZE-1))
57499 -
57500 /*
57501 * The "pgd_xxx()" functions here are trivial for a folded two-level
57502 * setup: the pud is never bad, and a pud always exists (as it's folded
57503 diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
57504 index 76bff2b..c7a14e2 100644
57505 --- a/include/asm-generic/pgtable.h
57506 +++ b/include/asm-generic/pgtable.h
57507 @@ -443,6 +443,14 @@ static inline int pmd_write(pmd_t pmd)
57508 #endif /* __HAVE_ARCH_PMD_WRITE */
57509 #endif
57510
57511 +#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
57512 +static inline unsigned long pax_open_kernel(void) { return 0; }
57513 +#endif
57514 +
57515 +#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
57516 +static inline unsigned long pax_close_kernel(void) { return 0; }
57517 +#endif
57518 +
57519 #endif /* !__ASSEMBLY__ */
57520
57521 #endif /* _ASM_GENERIC_PGTABLE_H */
57522 diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
57523 index b5e2e4c..6a5373e 100644
57524 --- a/include/asm-generic/vmlinux.lds.h
57525 +++ b/include/asm-generic/vmlinux.lds.h
57526 @@ -217,6 +217,7 @@
57527 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
57528 VMLINUX_SYMBOL(__start_rodata) = .; \
57529 *(.rodata) *(.rodata.*) \
57530 + *(.data..read_only) \
57531 *(__vermagic) /* Kernel version magic */ \
57532 . = ALIGN(8); \
57533 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
57534 @@ -722,17 +723,18 @@
57535 * section in the linker script will go there too. @phdr should have
57536 * a leading colon.
57537 *
57538 - * Note that this macros defines __per_cpu_load as an absolute symbol.
57539 + * Note that this macros defines per_cpu_load as an absolute symbol.
57540 * If there is no need to put the percpu section at a predetermined
57541 * address, use PERCPU_SECTION.
57542 */
57543 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
57544 - VMLINUX_SYMBOL(__per_cpu_load) = .; \
57545 - .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
57546 + per_cpu_load = .; \
57547 + .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
57548 - LOAD_OFFSET) { \
57549 + VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
57550 PERCPU_INPUT(cacheline) \
57551 } phdr \
57552 - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
57553 + . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
57554
57555 /**
57556 * PERCPU_SECTION - define output section for percpu area, simple version
57557 diff --git a/include/drm/drmP.h b/include/drm/drmP.h
57558 index bf4b2dc..2d0762f 100644
57559 --- a/include/drm/drmP.h
57560 +++ b/include/drm/drmP.h
57561 @@ -72,6 +72,7 @@
57562 #include <linux/workqueue.h>
57563 #include <linux/poll.h>
57564 #include <asm/pgalloc.h>
57565 +#include <asm/local.h>
57566 #include "drm.h"
57567
57568 #include <linux/idr.h>
57569 @@ -1038,7 +1039,7 @@ struct drm_device {
57570
57571 /** \name Usage Counters */
57572 /*@{ */
57573 - int open_count; /**< Outstanding files open */
57574 + local_t open_count; /**< Outstanding files open */
57575 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
57576 atomic_t vma_count; /**< Outstanding vma areas open */
57577 int buf_use; /**< Buffers in use -- cannot alloc */
57578 @@ -1049,7 +1050,7 @@ struct drm_device {
57579 /*@{ */
57580 unsigned long counters;
57581 enum drm_stat_type types[15];
57582 - atomic_t counts[15];
57583 + atomic_unchecked_t counts[15];
57584 /*@} */
57585
57586 struct list_head filelist;
57587 diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
57588 index 73b0712..0b7ef2f 100644
57589 --- a/include/drm/drm_crtc_helper.h
57590 +++ b/include/drm/drm_crtc_helper.h
57591 @@ -74,7 +74,7 @@ struct drm_crtc_helper_funcs {
57592
57593 /* disable crtc when not in use - more explicit than dpms off */
57594 void (*disable)(struct drm_crtc *crtc);
57595 -};
57596 +} __no_const;
57597
57598 struct drm_encoder_helper_funcs {
57599 void (*dpms)(struct drm_encoder *encoder, int mode);
57600 @@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs {
57601 struct drm_connector *connector);
57602 /* disable encoder when not in use - more explicit than dpms off */
57603 void (*disable)(struct drm_encoder *encoder);
57604 -};
57605 +} __no_const;
57606
57607 struct drm_connector_helper_funcs {
57608 int (*get_modes)(struct drm_connector *connector);
57609 diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
57610 index 26c1f78..6722682 100644
57611 --- a/include/drm/ttm/ttm_memory.h
57612 +++ b/include/drm/ttm/ttm_memory.h
57613 @@ -47,7 +47,7 @@
57614
57615 struct ttm_mem_shrink {
57616 int (*do_shrink) (struct ttm_mem_shrink *);
57617 -};
57618 +} __no_const;
57619
57620 /**
57621 * struct ttm_mem_global - Global memory accounting structure.
57622 diff --git a/include/linux/a.out.h b/include/linux/a.out.h
57623 index e86dfca..40cc55f 100644
57624 --- a/include/linux/a.out.h
57625 +++ b/include/linux/a.out.h
57626 @@ -39,6 +39,14 @@ enum machine_type {
57627 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
57628 };
57629
57630 +/* Constants for the N_FLAGS field */
57631 +#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
57632 +#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
57633 +#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
57634 +#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
57635 +/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
57636 +#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
57637 +
57638 #if !defined (N_MAGIC)
57639 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
57640 #endif
57641 diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
57642 index 49a83ca..df96b54 100644
57643 --- a/include/linux/atmdev.h
57644 +++ b/include/linux/atmdev.h
57645 @@ -237,7 +237,7 @@ struct compat_atm_iobuf {
57646 #endif
57647
57648 struct k_atm_aal_stats {
57649 -#define __HANDLE_ITEM(i) atomic_t i
57650 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
57651 __AAL_STAT_ITEMS
57652 #undef __HANDLE_ITEM
57653 };
57654 diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
57655 index fd88a39..8a801b4 100644
57656 --- a/include/linux/binfmts.h
57657 +++ b/include/linux/binfmts.h
57658 @@ -18,7 +18,7 @@ struct pt_regs;
57659 #define BINPRM_BUF_SIZE 128
57660
57661 #ifdef __KERNEL__
57662 -#include <linux/list.h>
57663 +#include <linux/sched.h>
57664
57665 #define CORENAME_MAX_SIZE 128
57666
57667 @@ -58,6 +58,7 @@ struct linux_binprm {
57668 unsigned interp_flags;
57669 unsigned interp_data;
57670 unsigned long loader, exec;
57671 + char tcomm[TASK_COMM_LEN];
57672 };
57673
57674 #define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0
57675 @@ -88,6 +89,7 @@ struct linux_binfmt {
57676 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
57677 int (*load_shlib)(struct file *);
57678 int (*core_dump)(struct coredump_params *cprm);
57679 + void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
57680 unsigned long min_coredump; /* minimal dump size */
57681 };
57682
57683 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
57684 index 0ed1eb0..3ab569b 100644
57685 --- a/include/linux/blkdev.h
57686 +++ b/include/linux/blkdev.h
57687 @@ -1315,7 +1315,7 @@ struct block_device_operations {
57688 /* this callback is with swap_lock and sometimes page table lock held */
57689 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
57690 struct module *owner;
57691 -};
57692 +} __do_const;
57693
57694 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
57695 unsigned long);
57696 diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
57697 index 4d1a074..88f929a 100644
57698 --- a/include/linux/blktrace_api.h
57699 +++ b/include/linux/blktrace_api.h
57700 @@ -162,7 +162,7 @@ struct blk_trace {
57701 struct dentry *dir;
57702 struct dentry *dropped_file;
57703 struct dentry *msg_file;
57704 - atomic_t dropped;
57705 + atomic_unchecked_t dropped;
57706 };
57707
57708 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
57709 diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
57710 index 83195fb..0b0f77d 100644
57711 --- a/include/linux/byteorder/little_endian.h
57712 +++ b/include/linux/byteorder/little_endian.h
57713 @@ -42,51 +42,51 @@
57714
57715 static inline __le64 __cpu_to_le64p(const __u64 *p)
57716 {
57717 - return (__force __le64)*p;
57718 + return (__force const __le64)*p;
57719 }
57720 static inline __u64 __le64_to_cpup(const __le64 *p)
57721 {
57722 - return (__force __u64)*p;
57723 + return (__force const __u64)*p;
57724 }
57725 static inline __le32 __cpu_to_le32p(const __u32 *p)
57726 {
57727 - return (__force __le32)*p;
57728 + return (__force const __le32)*p;
57729 }
57730 static inline __u32 __le32_to_cpup(const __le32 *p)
57731 {
57732 - return (__force __u32)*p;
57733 + return (__force const __u32)*p;
57734 }
57735 static inline __le16 __cpu_to_le16p(const __u16 *p)
57736 {
57737 - return (__force __le16)*p;
57738 + return (__force const __le16)*p;
57739 }
57740 static inline __u16 __le16_to_cpup(const __le16 *p)
57741 {
57742 - return (__force __u16)*p;
57743 + return (__force const __u16)*p;
57744 }
57745 static inline __be64 __cpu_to_be64p(const __u64 *p)
57746 {
57747 - return (__force __be64)__swab64p(p);
57748 + return (__force const __be64)__swab64p(p);
57749 }
57750 static inline __u64 __be64_to_cpup(const __be64 *p)
57751 {
57752 - return __swab64p((__u64 *)p);
57753 + return __swab64p((const __u64 *)p);
57754 }
57755 static inline __be32 __cpu_to_be32p(const __u32 *p)
57756 {
57757 - return (__force __be32)__swab32p(p);
57758 + return (__force const __be32)__swab32p(p);
57759 }
57760 static inline __u32 __be32_to_cpup(const __be32 *p)
57761 {
57762 - return __swab32p((__u32 *)p);
57763 + return __swab32p((const __u32 *)p);
57764 }
57765 static inline __be16 __cpu_to_be16p(const __u16 *p)
57766 {
57767 - return (__force __be16)__swab16p(p);
57768 + return (__force const __be16)__swab16p(p);
57769 }
57770 static inline __u16 __be16_to_cpup(const __be16 *p)
57771 {
57772 - return __swab16p((__u16 *)p);
57773 + return __swab16p((const __u16 *)p);
57774 }
57775 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
57776 #define __le64_to_cpus(x) do { (void)(x); } while (0)
57777 diff --git a/include/linux/cache.h b/include/linux/cache.h
57778 index 4c57065..4307975 100644
57779 --- a/include/linux/cache.h
57780 +++ b/include/linux/cache.h
57781 @@ -16,6 +16,10 @@
57782 #define __read_mostly
57783 #endif
57784
57785 +#ifndef __read_only
57786 +#define __read_only __read_mostly
57787 +#endif
57788 +
57789 #ifndef ____cacheline_aligned
57790 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
57791 #endif
57792 diff --git a/include/linux/capability.h b/include/linux/capability.h
57793 index a63d13d..069bfd5 100644
57794 --- a/include/linux/capability.h
57795 +++ b/include/linux/capability.h
57796 @@ -548,6 +548,9 @@ extern bool capable(int cap);
57797 extern bool ns_capable(struct user_namespace *ns, int cap);
57798 extern bool task_ns_capable(struct task_struct *t, int cap);
57799 extern bool nsown_capable(int cap);
57800 +extern bool task_ns_capable_nolog(struct task_struct *t, int cap);
57801 +extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
57802 +extern bool capable_nolog(int cap);
57803
57804 /* audit system wants to get cap info from files as well */
57805 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
57806 diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
57807 index 04ffb2e..6799180 100644
57808 --- a/include/linux/cleancache.h
57809 +++ b/include/linux/cleancache.h
57810 @@ -31,7 +31,7 @@ struct cleancache_ops {
57811 void (*flush_page)(int, struct cleancache_filekey, pgoff_t);
57812 void (*flush_inode)(int, struct cleancache_filekey);
57813 void (*flush_fs)(int);
57814 -};
57815 +} __no_const;
57816
57817 extern struct cleancache_ops
57818 cleancache_register_ops(struct cleancache_ops *ops);
57819 diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
57820 index dfadc96..c0e70c1 100644
57821 --- a/include/linux/compiler-gcc4.h
57822 +++ b/include/linux/compiler-gcc4.h
57823 @@ -31,6 +31,12 @@
57824
57825
57826 #if __GNUC_MINOR__ >= 5
57827 +
57828 +#ifdef CONSTIFY_PLUGIN
57829 +#define __no_const __attribute__((no_const))
57830 +#define __do_const __attribute__((do_const))
57831 +#endif
57832 +
57833 /*
57834 * Mark a position in code as unreachable. This can be used to
57835 * suppress control flow warnings after asm blocks that transfer
57836 @@ -46,6 +52,11 @@
57837 #define __noclone __attribute__((__noclone__))
57838
57839 #endif
57840 +
57841 +#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
57842 +#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
57843 +#define __bos0(ptr) __bos((ptr), 0)
57844 +#define __bos1(ptr) __bos((ptr), 1)
57845 #endif
57846
57847 #if __GNUC_MINOR__ > 0
57848 diff --git a/include/linux/compiler.h b/include/linux/compiler.h
57849 index 320d6c9..8573a1c 100644
57850 --- a/include/linux/compiler.h
57851 +++ b/include/linux/compiler.h
57852 @@ -5,31 +5,62 @@
57853
57854 #ifdef __CHECKER__
57855 # define __user __attribute__((noderef, address_space(1)))
57856 +# define __force_user __force __user
57857 # define __kernel __attribute__((address_space(0)))
57858 +# define __force_kernel __force __kernel
57859 # define __safe __attribute__((safe))
57860 # define __force __attribute__((force))
57861 # define __nocast __attribute__((nocast))
57862 # define __iomem __attribute__((noderef, address_space(2)))
57863 +# define __force_iomem __force __iomem
57864 # define __acquires(x) __attribute__((context(x,0,1)))
57865 # define __releases(x) __attribute__((context(x,1,0)))
57866 # define __acquire(x) __context__(x,1)
57867 # define __release(x) __context__(x,-1)
57868 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
57869 # define __percpu __attribute__((noderef, address_space(3)))
57870 +# define __force_percpu __force __percpu
57871 #ifdef CONFIG_SPARSE_RCU_POINTER
57872 # define __rcu __attribute__((noderef, address_space(4)))
57873 +# define __force_rcu __force __rcu
57874 #else
57875 # define __rcu
57876 +# define __force_rcu
57877 #endif
57878 extern void __chk_user_ptr(const volatile void __user *);
57879 extern void __chk_io_ptr(const volatile void __iomem *);
57880 +#elif defined(CHECKER_PLUGIN)
57881 +//# define __user
57882 +//# define __force_user
57883 +//# define __kernel
57884 +//# define __force_kernel
57885 +# define __safe
57886 +# define __force
57887 +# define __nocast
57888 +# define __iomem
57889 +# define __force_iomem
57890 +# define __chk_user_ptr(x) (void)0
57891 +# define __chk_io_ptr(x) (void)0
57892 +# define __builtin_warning(x, y...) (1)
57893 +# define __acquires(x)
57894 +# define __releases(x)
57895 +# define __acquire(x) (void)0
57896 +# define __release(x) (void)0
57897 +# define __cond_lock(x,c) (c)
57898 +# define __percpu
57899 +# define __force_percpu
57900 +# define __rcu
57901 +# define __force_rcu
57902 #else
57903 # define __user
57904 +# define __force_user
57905 # define __kernel
57906 +# define __force_kernel
57907 # define __safe
57908 # define __force
57909 # define __nocast
57910 # define __iomem
57911 +# define __force_iomem
57912 # define __chk_user_ptr(x) (void)0
57913 # define __chk_io_ptr(x) (void)0
57914 # define __builtin_warning(x, y...) (1)
57915 @@ -39,7 +70,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
57916 # define __release(x) (void)0
57917 # define __cond_lock(x,c) (c)
57918 # define __percpu
57919 +# define __force_percpu
57920 # define __rcu
57921 +# define __force_rcu
57922 #endif
57923
57924 #ifdef __KERNEL__
57925 @@ -264,6 +297,14 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
57926 # define __attribute_const__ /* unimplemented */
57927 #endif
57928
57929 +#ifndef __no_const
57930 +# define __no_const
57931 +#endif
57932 +
57933 +#ifndef __do_const
57934 +# define __do_const
57935 +#endif
57936 +
57937 /*
57938 * Tell gcc if a function is cold. The compiler will assume any path
57939 * directly leading to the call is unlikely.
57940 @@ -273,6 +314,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
57941 #define __cold
57942 #endif
57943
57944 +#ifndef __alloc_size
57945 +#define __alloc_size(...)
57946 +#endif
57947 +
57948 +#ifndef __bos
57949 +#define __bos(ptr, arg)
57950 +#endif
57951 +
57952 +#ifndef __bos0
57953 +#define __bos0(ptr)
57954 +#endif
57955 +
57956 +#ifndef __bos1
57957 +#define __bos1(ptr)
57958 +#endif
57959 +
57960 /* Simple shorthand for a section definition */
57961 #ifndef __section
57962 # define __section(S) __attribute__ ((__section__(#S)))
57963 @@ -306,6 +363,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
57964 * use is to mediate communication between process-level code and irq/NMI
57965 * handlers, all running on the same CPU.
57966 */
57967 -#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
57968 +#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
57969 +#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
57970
57971 #endif /* __LINUX_COMPILER_H */
57972 diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
57973 index e9eaec5..bfeb9bb 100644
57974 --- a/include/linux/cpuset.h
57975 +++ b/include/linux/cpuset.h
57976 @@ -118,7 +118,7 @@ static inline void put_mems_allowed(void)
57977 * nodemask.
57978 */
57979 smp_mb();
57980 - --ACCESS_ONCE(current->mems_allowed_change_disable);
57981 + --ACCESS_ONCE_RW(current->mems_allowed_change_disable);
57982 }
57983
57984 static inline void set_mems_allowed(nodemask_t nodemask)
57985 diff --git a/include/linux/cred.h b/include/linux/cred.h
57986 index 4030896..8d6f342 100644
57987 --- a/include/linux/cred.h
57988 +++ b/include/linux/cred.h
57989 @@ -207,6 +207,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
57990 static inline void validate_process_creds(void)
57991 {
57992 }
57993 +static inline void validate_task_creds(struct task_struct *task)
57994 +{
57995 +}
57996 #endif
57997
57998 /**
57999 diff --git a/include/linux/crypto.h b/include/linux/crypto.h
58000 index 8a94217..15d49e3 100644
58001 --- a/include/linux/crypto.h
58002 +++ b/include/linux/crypto.h
58003 @@ -365,7 +365,7 @@ struct cipher_tfm {
58004 const u8 *key, unsigned int keylen);
58005 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
58006 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
58007 -};
58008 +} __no_const;
58009
58010 struct hash_tfm {
58011 int (*init)(struct hash_desc *desc);
58012 @@ -386,13 +386,13 @@ struct compress_tfm {
58013 int (*cot_decompress)(struct crypto_tfm *tfm,
58014 const u8 *src, unsigned int slen,
58015 u8 *dst, unsigned int *dlen);
58016 -};
58017 +} __no_const;
58018
58019 struct rng_tfm {
58020 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
58021 unsigned int dlen);
58022 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
58023 -};
58024 +} __no_const;
58025
58026 #define crt_ablkcipher crt_u.ablkcipher
58027 #define crt_aead crt_u.aead
58028 diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
58029 index 7925bf0..d5143d2 100644
58030 --- a/include/linux/decompress/mm.h
58031 +++ b/include/linux/decompress/mm.h
58032 @@ -77,7 +77,7 @@ static void free(void *where)
58033 * warnings when not needed (indeed large_malloc / large_free are not
58034 * needed by inflate */
58035
58036 -#define malloc(a) kmalloc(a, GFP_KERNEL)
58037 +#define malloc(a) kmalloc((a), GFP_KERNEL)
58038 #define free(a) kfree(a)
58039
58040 #define large_malloc(a) vmalloc(a)
58041 diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
58042 index e13117c..e9fc938 100644
58043 --- a/include/linux/dma-mapping.h
58044 +++ b/include/linux/dma-mapping.h
58045 @@ -46,7 +46,7 @@ struct dma_map_ops {
58046 u64 (*get_required_mask)(struct device *dev);
58047 #endif
58048 int is_phys;
58049 -};
58050 +} __do_const;
58051
58052 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
58053
58054 diff --git a/include/linux/efi.h b/include/linux/efi.h
58055 index 2362a0b..cfaf8fcc 100644
58056 --- a/include/linux/efi.h
58057 +++ b/include/linux/efi.h
58058 @@ -446,7 +446,7 @@ struct efivar_operations {
58059 efi_get_variable_t *get_variable;
58060 efi_get_next_variable_t *get_next_variable;
58061 efi_set_variable_t *set_variable;
58062 -};
58063 +} __no_const;
58064
58065 struct efivars {
58066 /*
58067 diff --git a/include/linux/elf.h b/include/linux/elf.h
58068 index 31f0508..5421c01 100644
58069 --- a/include/linux/elf.h
58070 +++ b/include/linux/elf.h
58071 @@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
58072 #define PT_GNU_EH_FRAME 0x6474e550
58073
58074 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
58075 +#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
58076 +
58077 +#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
58078 +
58079 +/* Constants for the e_flags field */
58080 +#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
58081 +#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
58082 +#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
58083 +#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
58084 +/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
58085 +#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
58086
58087 /*
58088 * Extended Numbering
58089 @@ -106,6 +117,8 @@ typedef __s64 Elf64_Sxword;
58090 #define DT_DEBUG 21
58091 #define DT_TEXTREL 22
58092 #define DT_JMPREL 23
58093 +#define DT_FLAGS 30
58094 + #define DF_TEXTREL 0x00000004
58095 #define DT_ENCODING 32
58096 #define OLD_DT_LOOS 0x60000000
58097 #define DT_LOOS 0x6000000d
58098 @@ -252,6 +265,19 @@ typedef struct elf64_hdr {
58099 #define PF_W 0x2
58100 #define PF_X 0x1
58101
58102 +#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
58103 +#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
58104 +#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
58105 +#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
58106 +#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
58107 +#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
58108 +/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
58109 +/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
58110 +#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
58111 +#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
58112 +#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
58113 +#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
58114 +
58115 typedef struct elf32_phdr{
58116 Elf32_Word p_type;
58117 Elf32_Off p_offset;
58118 @@ -344,6 +370,8 @@ typedef struct elf64_shdr {
58119 #define EI_OSABI 7
58120 #define EI_PAD 8
58121
58122 +#define EI_PAX 14
58123 +
58124 #define ELFMAG0 0x7f /* EI_MAG */
58125 #define ELFMAG1 'E'
58126 #define ELFMAG2 'L'
58127 @@ -423,6 +451,7 @@ extern Elf32_Dyn _DYNAMIC [];
58128 #define elf_note elf32_note
58129 #define elf_addr_t Elf32_Off
58130 #define Elf_Half Elf32_Half
58131 +#define elf_dyn Elf32_Dyn
58132
58133 #else
58134
58135 @@ -433,6 +462,7 @@ extern Elf64_Dyn _DYNAMIC [];
58136 #define elf_note elf64_note
58137 #define elf_addr_t Elf64_Off
58138 #define Elf_Half Elf64_Half
58139 +#define elf_dyn Elf64_Dyn
58140
58141 #endif
58142
58143 diff --git a/include/linux/filter.h b/include/linux/filter.h
58144 index 8eeb205..d59bfa2 100644
58145 --- a/include/linux/filter.h
58146 +++ b/include/linux/filter.h
58147 @@ -134,6 +134,7 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */
58148
58149 struct sk_buff;
58150 struct sock;
58151 +struct bpf_jit_work;
58152
58153 struct sk_filter
58154 {
58155 @@ -141,6 +142,9 @@ struct sk_filter
58156 unsigned int len; /* Number of filter blocks */
58157 unsigned int (*bpf_func)(const struct sk_buff *skb,
58158 const struct sock_filter *filter);
58159 +#ifdef CONFIG_BPF_JIT
58160 + struct bpf_jit_work *work;
58161 +#endif
58162 struct rcu_head rcu;
58163 struct sock_filter insns[0];
58164 };
58165 diff --git a/include/linux/firewire.h b/include/linux/firewire.h
58166 index 84ccf8e..2e9b14c 100644
58167 --- a/include/linux/firewire.h
58168 +++ b/include/linux/firewire.h
58169 @@ -428,7 +428,7 @@ struct fw_iso_context {
58170 union {
58171 fw_iso_callback_t sc;
58172 fw_iso_mc_callback_t mc;
58173 - } callback;
58174 + } __no_const callback;
58175 void *callback_data;
58176 };
58177
58178 diff --git a/include/linux/fs.h b/include/linux/fs.h
58179 index e0bc4ff..d79c2fa 100644
58180 --- a/include/linux/fs.h
58181 +++ b/include/linux/fs.h
58182 @@ -1608,7 +1608,8 @@ struct file_operations {
58183 int (*setlease)(struct file *, long, struct file_lock **);
58184 long (*fallocate)(struct file *file, int mode, loff_t offset,
58185 loff_t len);
58186 -};
58187 +} __do_const;
58188 +typedef struct file_operations __no_const file_operations_no_const;
58189
58190 struct inode_operations {
58191 struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *);
58192 diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
58193 index 003dc0f..3c4ea97 100644
58194 --- a/include/linux/fs_struct.h
58195 +++ b/include/linux/fs_struct.h
58196 @@ -6,7 +6,7 @@
58197 #include <linux/seqlock.h>
58198
58199 struct fs_struct {
58200 - int users;
58201 + atomic_t users;
58202 spinlock_t lock;
58203 seqcount_t seq;
58204 int umask;
58205 diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
58206 index ce31408..b1ad003 100644
58207 --- a/include/linux/fscache-cache.h
58208 +++ b/include/linux/fscache-cache.h
58209 @@ -102,7 +102,7 @@ struct fscache_operation {
58210 fscache_operation_release_t release;
58211 };
58212
58213 -extern atomic_t fscache_op_debug_id;
58214 +extern atomic_unchecked_t fscache_op_debug_id;
58215 extern void fscache_op_work_func(struct work_struct *work);
58216
58217 extern void fscache_enqueue_operation(struct fscache_operation *);
58218 @@ -122,7 +122,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
58219 {
58220 INIT_WORK(&op->work, fscache_op_work_func);
58221 atomic_set(&op->usage, 1);
58222 - op->debug_id = atomic_inc_return(&fscache_op_debug_id);
58223 + op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
58224 op->processor = processor;
58225 op->release = release;
58226 INIT_LIST_HEAD(&op->pend_link);
58227 diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
58228 index 2a53f10..0187fdf 100644
58229 --- a/include/linux/fsnotify.h
58230 +++ b/include/linux/fsnotify.h
58231 @@ -314,7 +314,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
58232 */
58233 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
58234 {
58235 - return kstrdup(name, GFP_KERNEL);
58236 + return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
58237 }
58238
58239 /*
58240 diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
58241 index 91d0e0a3..035666b 100644
58242 --- a/include/linux/fsnotify_backend.h
58243 +++ b/include/linux/fsnotify_backend.h
58244 @@ -105,6 +105,7 @@ struct fsnotify_ops {
58245 void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group);
58246 void (*free_event_priv)(struct fsnotify_event_private_data *priv);
58247 };
58248 +typedef struct fsnotify_ops __no_const fsnotify_ops_no_const;
58249
58250 /*
58251 * A group is a "thing" that wants to receive notification about filesystem
58252 diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
58253 index c3da42d..c70e0df 100644
58254 --- a/include/linux/ftrace_event.h
58255 +++ b/include/linux/ftrace_event.h
58256 @@ -97,7 +97,7 @@ struct trace_event_functions {
58257 trace_print_func raw;
58258 trace_print_func hex;
58259 trace_print_func binary;
58260 -};
58261 +} __no_const;
58262
58263 struct trace_event {
58264 struct hlist_node node;
58265 @@ -254,7 +254,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
58266 extern int trace_add_event_call(struct ftrace_event_call *call);
58267 extern void trace_remove_event_call(struct ftrace_event_call *call);
58268
58269 -#define is_signed_type(type) (((type)(-1)) < 0)
58270 +#define is_signed_type(type) (((type)(-1)) < (type)1)
58271
58272 int trace_set_clr_event(const char *system, const char *event, int set);
58273
58274 diff --git a/include/linux/genhd.h b/include/linux/genhd.h
58275 index 6d18f35..ab71e2c 100644
58276 --- a/include/linux/genhd.h
58277 +++ b/include/linux/genhd.h
58278 @@ -185,7 +185,7 @@ struct gendisk {
58279 struct kobject *slave_dir;
58280
58281 struct timer_rand_state *random;
58282 - atomic_t sync_io; /* RAID */
58283 + atomic_unchecked_t sync_io; /* RAID */
58284 struct disk_events *ev;
58285 #ifdef CONFIG_BLK_DEV_INTEGRITY
58286 struct blk_integrity *integrity;
58287 diff --git a/include/linux/gracl.h b/include/linux/gracl.h
58288 new file mode 100644
58289 index 0000000..8a130b6
58290 --- /dev/null
58291 +++ b/include/linux/gracl.h
58292 @@ -0,0 +1,319 @@
58293 +#ifndef GR_ACL_H
58294 +#define GR_ACL_H
58295 +
58296 +#include <linux/grdefs.h>
58297 +#include <linux/resource.h>
58298 +#include <linux/capability.h>
58299 +#include <linux/dcache.h>
58300 +#include <asm/resource.h>
58301 +
58302 +/* Major status information */
58303 +
58304 +#define GR_VERSION "grsecurity 2.9"
58305 +#define GRSECURITY_VERSION 0x2900
58306 +
58307 +enum {
58308 + GR_SHUTDOWN = 0,
58309 + GR_ENABLE = 1,
58310 + GR_SPROLE = 2,
58311 + GR_RELOAD = 3,
58312 + GR_SEGVMOD = 4,
58313 + GR_STATUS = 5,
58314 + GR_UNSPROLE = 6,
58315 + GR_PASSSET = 7,
58316 + GR_SPROLEPAM = 8,
58317 +};
58318 +
58319 +/* Password setup definitions
58320 + * kernel/grhash.c */
58321 +enum {
58322 + GR_PW_LEN = 128,
58323 + GR_SALT_LEN = 16,
58324 + GR_SHA_LEN = 32,
58325 +};
58326 +
58327 +enum {
58328 + GR_SPROLE_LEN = 64,
58329 +};
58330 +
58331 +enum {
58332 + GR_NO_GLOB = 0,
58333 + GR_REG_GLOB,
58334 + GR_CREATE_GLOB
58335 +};
58336 +
58337 +#define GR_NLIMITS 32
58338 +
58339 +/* Begin Data Structures */
58340 +
58341 +struct sprole_pw {
58342 + unsigned char *rolename;
58343 + unsigned char salt[GR_SALT_LEN];
58344 + unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
58345 +};
58346 +
58347 +struct name_entry {
58348 + __u32 key;
58349 + ino_t inode;
58350 + dev_t device;
58351 + char *name;
58352 + __u16 len;
58353 + __u8 deleted;
58354 + struct name_entry *prev;
58355 + struct name_entry *next;
58356 +};
58357 +
58358 +struct inodev_entry {
58359 + struct name_entry *nentry;
58360 + struct inodev_entry *prev;
58361 + struct inodev_entry *next;
58362 +};
58363 +
58364 +struct acl_role_db {
58365 + struct acl_role_label **r_hash;
58366 + __u32 r_size;
58367 +};
58368 +
58369 +struct inodev_db {
58370 + struct inodev_entry **i_hash;
58371 + __u32 i_size;
58372 +};
58373 +
58374 +struct name_db {
58375 + struct name_entry **n_hash;
58376 + __u32 n_size;
58377 +};
58378 +
58379 +struct crash_uid {
58380 + uid_t uid;
58381 + unsigned long expires;
58382 +};
58383 +
58384 +struct gr_hash_struct {
58385 + void **table;
58386 + void **nametable;
58387 + void *first;
58388 + __u32 table_size;
58389 + __u32 used_size;
58390 + int type;
58391 +};
58392 +
58393 +/* Userspace Grsecurity ACL data structures */
58394 +
58395 +struct acl_subject_label {
58396 + char *filename;
58397 + ino_t inode;
58398 + dev_t device;
58399 + __u32 mode;
58400 + kernel_cap_t cap_mask;
58401 + kernel_cap_t cap_lower;
58402 + kernel_cap_t cap_invert_audit;
58403 +
58404 + struct rlimit res[GR_NLIMITS];
58405 + __u32 resmask;
58406 +
58407 + __u8 user_trans_type;
58408 + __u8 group_trans_type;
58409 + uid_t *user_transitions;
58410 + gid_t *group_transitions;
58411 + __u16 user_trans_num;
58412 + __u16 group_trans_num;
58413 +
58414 + __u32 sock_families[2];
58415 + __u32 ip_proto[8];
58416 + __u32 ip_type;
58417 + struct acl_ip_label **ips;
58418 + __u32 ip_num;
58419 + __u32 inaddr_any_override;
58420 +
58421 + __u32 crashes;
58422 + unsigned long expires;
58423 +
58424 + struct acl_subject_label *parent_subject;
58425 + struct gr_hash_struct *hash;
58426 + struct acl_subject_label *prev;
58427 + struct acl_subject_label *next;
58428 +
58429 + struct acl_object_label **obj_hash;
58430 + __u32 obj_hash_size;
58431 + __u16 pax_flags;
58432 +};
58433 +
58434 +struct role_allowed_ip {
58435 + __u32 addr;
58436 + __u32 netmask;
58437 +
58438 + struct role_allowed_ip *prev;
58439 + struct role_allowed_ip *next;
58440 +};
58441 +
58442 +struct role_transition {
58443 + char *rolename;
58444 +
58445 + struct role_transition *prev;
58446 + struct role_transition *next;
58447 +};
58448 +
58449 +struct acl_role_label {
58450 + char *rolename;
58451 + uid_t uidgid;
58452 + __u16 roletype;
58453 +
58454 + __u16 auth_attempts;
58455 + unsigned long expires;
58456 +
58457 + struct acl_subject_label *root_label;
58458 + struct gr_hash_struct *hash;
58459 +
58460 + struct acl_role_label *prev;
58461 + struct acl_role_label *next;
58462 +
58463 + struct role_transition *transitions;
58464 + struct role_allowed_ip *allowed_ips;
58465 + uid_t *domain_children;
58466 + __u16 domain_child_num;
58467 +
58468 + umode_t umask;
58469 +
58470 + struct acl_subject_label **subj_hash;
58471 + __u32 subj_hash_size;
58472 +};
58473 +
58474 +struct user_acl_role_db {
58475 + struct acl_role_label **r_table;
58476 + __u32 num_pointers; /* Number of allocations to track */
58477 + __u32 num_roles; /* Number of roles */
58478 + __u32 num_domain_children; /* Number of domain children */
58479 + __u32 num_subjects; /* Number of subjects */
58480 + __u32 num_objects; /* Number of objects */
58481 +};
58482 +
58483 +struct acl_object_label {
58484 + char *filename;
58485 + ino_t inode;
58486 + dev_t device;
58487 + __u32 mode;
58488 +
58489 + struct acl_subject_label *nested;
58490 + struct acl_object_label *globbed;
58491 +
58492 + /* next two structures not used */
58493 +
58494 + struct acl_object_label *prev;
58495 + struct acl_object_label *next;
58496 +};
58497 +
58498 +struct acl_ip_label {
58499 + char *iface;
58500 + __u32 addr;
58501 + __u32 netmask;
58502 + __u16 low, high;
58503 + __u8 mode;
58504 + __u32 type;
58505 + __u32 proto[8];
58506 +
58507 + /* next two structures not used */
58508 +
58509 + struct acl_ip_label *prev;
58510 + struct acl_ip_label *next;
58511 +};
58512 +
58513 +struct gr_arg {
58514 + struct user_acl_role_db role_db;
58515 + unsigned char pw[GR_PW_LEN];
58516 + unsigned char salt[GR_SALT_LEN];
58517 + unsigned char sum[GR_SHA_LEN];
58518 + unsigned char sp_role[GR_SPROLE_LEN];
58519 + struct sprole_pw *sprole_pws;
58520 + dev_t segv_device;
58521 + ino_t segv_inode;
58522 + uid_t segv_uid;
58523 + __u16 num_sprole_pws;
58524 + __u16 mode;
58525 +};
58526 +
58527 +struct gr_arg_wrapper {
58528 + struct gr_arg *arg;
58529 + __u32 version;
58530 + __u32 size;
58531 +};
58532 +
58533 +struct subject_map {
58534 + struct acl_subject_label *user;
58535 + struct acl_subject_label *kernel;
58536 + struct subject_map *prev;
58537 + struct subject_map *next;
58538 +};
58539 +
58540 +struct acl_subj_map_db {
58541 + struct subject_map **s_hash;
58542 + __u32 s_size;
58543 +};
58544 +
58545 +/* End Data Structures Section */
58546 +
58547 +/* Hash functions generated by empirical testing by Brad Spengler
58548 + Makes good use of the low bits of the inode. Generally 0-1 times
58549 + in loop for successful match. 0-3 for unsuccessful match.
58550 + Shift/add algorithm with modulus of table size and an XOR*/
58551 +
58552 +static __inline__ unsigned int
58553 +rhash(const uid_t uid, const __u16 type, const unsigned int sz)
58554 +{
58555 + return ((((uid + type) << (16 + type)) ^ uid) % sz);
58556 +}
58557 +
58558 + static __inline__ unsigned int
58559 +shash(const struct acl_subject_label *userp, const unsigned int sz)
58560 +{
58561 + return ((const unsigned long)userp % sz);
58562 +}
58563 +
58564 +static __inline__ unsigned int
58565 +fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
58566 +{
58567 + return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
58568 +}
58569 +
58570 +static __inline__ unsigned int
58571 +nhash(const char *name, const __u16 len, const unsigned int sz)
58572 +{
58573 + return full_name_hash((const unsigned char *)name, len) % sz;
58574 +}
58575 +
58576 +#define FOR_EACH_ROLE_START(role) \
58577 + role = role_list; \
58578 + while (role) {
58579 +
58580 +#define FOR_EACH_ROLE_END(role) \
58581 + role = role->prev; \
58582 + }
58583 +
58584 +#define FOR_EACH_SUBJECT_START(role,subj,iter) \
58585 + subj = NULL; \
58586 + iter = 0; \
58587 + while (iter < role->subj_hash_size) { \
58588 + if (subj == NULL) \
58589 + subj = role->subj_hash[iter]; \
58590 + if (subj == NULL) { \
58591 + iter++; \
58592 + continue; \
58593 + }
58594 +
58595 +#define FOR_EACH_SUBJECT_END(subj,iter) \
58596 + subj = subj->next; \
58597 + if (subj == NULL) \
58598 + iter++; \
58599 + }
58600 +
58601 +
58602 +#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
58603 + subj = role->hash->first; \
58604 + while (subj != NULL) {
58605 +
58606 +#define FOR_EACH_NESTED_SUBJECT_END(subj) \
58607 + subj = subj->next; \
58608 + }
58609 +
58610 +#endif
58611 +
58612 diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
58613 new file mode 100644
58614 index 0000000..323ecf2
58615 --- /dev/null
58616 +++ b/include/linux/gralloc.h
58617 @@ -0,0 +1,9 @@
58618 +#ifndef __GRALLOC_H
58619 +#define __GRALLOC_H
58620 +
58621 +void acl_free_all(void);
58622 +int acl_alloc_stack_init(unsigned long size);
58623 +void *acl_alloc(unsigned long len);
58624 +void *acl_alloc_num(unsigned long num, unsigned long len);
58625 +
58626 +#endif
58627 diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
58628 new file mode 100644
58629 index 0000000..b30e9bc
58630 --- /dev/null
58631 +++ b/include/linux/grdefs.h
58632 @@ -0,0 +1,140 @@
58633 +#ifndef GRDEFS_H
58634 +#define GRDEFS_H
58635 +
58636 +/* Begin grsecurity status declarations */
58637 +
58638 +enum {
58639 + GR_READY = 0x01,
58640 + GR_STATUS_INIT = 0x00 // disabled state
58641 +};
58642 +
58643 +/* Begin ACL declarations */
58644 +
58645 +/* Role flags */
58646 +
58647 +enum {
58648 + GR_ROLE_USER = 0x0001,
58649 + GR_ROLE_GROUP = 0x0002,
58650 + GR_ROLE_DEFAULT = 0x0004,
58651 + GR_ROLE_SPECIAL = 0x0008,
58652 + GR_ROLE_AUTH = 0x0010,
58653 + GR_ROLE_NOPW = 0x0020,
58654 + GR_ROLE_GOD = 0x0040,
58655 + GR_ROLE_LEARN = 0x0080,
58656 + GR_ROLE_TPE = 0x0100,
58657 + GR_ROLE_DOMAIN = 0x0200,
58658 + GR_ROLE_PAM = 0x0400,
58659 + GR_ROLE_PERSIST = 0x0800
58660 +};
58661 +
58662 +/* ACL Subject and Object mode flags */
58663 +enum {
58664 + GR_DELETED = 0x80000000
58665 +};
58666 +
58667 +/* ACL Object-only mode flags */
58668 +enum {
58669 + GR_READ = 0x00000001,
58670 + GR_APPEND = 0x00000002,
58671 + GR_WRITE = 0x00000004,
58672 + GR_EXEC = 0x00000008,
58673 + GR_FIND = 0x00000010,
58674 + GR_INHERIT = 0x00000020,
58675 + GR_SETID = 0x00000040,
58676 + GR_CREATE = 0x00000080,
58677 + GR_DELETE = 0x00000100,
58678 + GR_LINK = 0x00000200,
58679 + GR_AUDIT_READ = 0x00000400,
58680 + GR_AUDIT_APPEND = 0x00000800,
58681 + GR_AUDIT_WRITE = 0x00001000,
58682 + GR_AUDIT_EXEC = 0x00002000,
58683 + GR_AUDIT_FIND = 0x00004000,
58684 + GR_AUDIT_INHERIT= 0x00008000,
58685 + GR_AUDIT_SETID = 0x00010000,
58686 + GR_AUDIT_CREATE = 0x00020000,
58687 + GR_AUDIT_DELETE = 0x00040000,
58688 + GR_AUDIT_LINK = 0x00080000,
58689 + GR_PTRACERD = 0x00100000,
58690 + GR_NOPTRACE = 0x00200000,
58691 + GR_SUPPRESS = 0x00400000,
58692 + GR_NOLEARN = 0x00800000,
58693 + GR_INIT_TRANSFER= 0x01000000
58694 +};
58695 +
58696 +#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
58697 + GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
58698 + GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
58699 +
58700 +/* ACL subject-only mode flags */
58701 +enum {
58702 + GR_KILL = 0x00000001,
58703 + GR_VIEW = 0x00000002,
58704 + GR_PROTECTED = 0x00000004,
58705 + GR_LEARN = 0x00000008,
58706 + GR_OVERRIDE = 0x00000010,
58707 + /* just a placeholder, this mode is only used in userspace */
58708 + GR_DUMMY = 0x00000020,
58709 + GR_PROTSHM = 0x00000040,
58710 + GR_KILLPROC = 0x00000080,
58711 + GR_KILLIPPROC = 0x00000100,
58712 + /* just a placeholder, this mode is only used in userspace */
58713 + GR_NOTROJAN = 0x00000200,
58714 + GR_PROTPROCFD = 0x00000400,
58715 + GR_PROCACCT = 0x00000800,
58716 + GR_RELAXPTRACE = 0x00001000,
58717 + GR_NESTED = 0x00002000,
58718 + GR_INHERITLEARN = 0x00004000,
58719 + GR_PROCFIND = 0x00008000,
58720 + GR_POVERRIDE = 0x00010000,
58721 + GR_KERNELAUTH = 0x00020000,
58722 + GR_ATSECURE = 0x00040000,
58723 + GR_SHMEXEC = 0x00080000
58724 +};
58725 +
58726 +enum {
58727 + GR_PAX_ENABLE_SEGMEXEC = 0x0001,
58728 + GR_PAX_ENABLE_PAGEEXEC = 0x0002,
58729 + GR_PAX_ENABLE_MPROTECT = 0x0004,
58730 + GR_PAX_ENABLE_RANDMMAP = 0x0008,
58731 + GR_PAX_ENABLE_EMUTRAMP = 0x0010,
58732 + GR_PAX_DISABLE_SEGMEXEC = 0x0100,
58733 + GR_PAX_DISABLE_PAGEEXEC = 0x0200,
58734 + GR_PAX_DISABLE_MPROTECT = 0x0400,
58735 + GR_PAX_DISABLE_RANDMMAP = 0x0800,
58736 + GR_PAX_DISABLE_EMUTRAMP = 0x1000,
58737 +};
58738 +
58739 +enum {
58740 + GR_ID_USER = 0x01,
58741 + GR_ID_GROUP = 0x02,
58742 +};
58743 +
58744 +enum {
58745 + GR_ID_ALLOW = 0x01,
58746 + GR_ID_DENY = 0x02,
58747 +};
58748 +
58749 +#define GR_CRASH_RES 31
58750 +#define GR_UIDTABLE_MAX 500
58751 +
58752 +/* begin resource learning section */
58753 +enum {
58754 + GR_RLIM_CPU_BUMP = 60,
58755 + GR_RLIM_FSIZE_BUMP = 50000,
58756 + GR_RLIM_DATA_BUMP = 10000,
58757 + GR_RLIM_STACK_BUMP = 1000,
58758 + GR_RLIM_CORE_BUMP = 10000,
58759 + GR_RLIM_RSS_BUMP = 500000,
58760 + GR_RLIM_NPROC_BUMP = 1,
58761 + GR_RLIM_NOFILE_BUMP = 5,
58762 + GR_RLIM_MEMLOCK_BUMP = 50000,
58763 + GR_RLIM_AS_BUMP = 500000,
58764 + GR_RLIM_LOCKS_BUMP = 2,
58765 + GR_RLIM_SIGPENDING_BUMP = 5,
58766 + GR_RLIM_MSGQUEUE_BUMP = 10000,
58767 + GR_RLIM_NICE_BUMP = 1,
58768 + GR_RLIM_RTPRIO_BUMP = 1,
58769 + GR_RLIM_RTTIME_BUMP = 1000000
58770 +};
58771 +
58772 +#endif
58773 diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
58774 new file mode 100644
58775 index 0000000..da390f1
58776 --- /dev/null
58777 +++ b/include/linux/grinternal.h
58778 @@ -0,0 +1,221 @@
58779 +#ifndef __GRINTERNAL_H
58780 +#define __GRINTERNAL_H
58781 +
58782 +#ifdef CONFIG_GRKERNSEC
58783 +
58784 +#include <linux/fs.h>
58785 +#include <linux/mnt_namespace.h>
58786 +#include <linux/nsproxy.h>
58787 +#include <linux/gracl.h>
58788 +#include <linux/grdefs.h>
58789 +#include <linux/grmsg.h>
58790 +
58791 +void gr_add_learn_entry(const char *fmt, ...)
58792 + __attribute__ ((format (printf, 1, 2)));
58793 +__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
58794 + const struct vfsmount *mnt);
58795 +__u32 gr_check_create(const struct dentry *new_dentry,
58796 + const struct dentry *parent,
58797 + const struct vfsmount *mnt, const __u32 mode);
58798 +int gr_check_protected_task(const struct task_struct *task);
58799 +__u32 to_gr_audit(const __u32 reqmode);
58800 +int gr_set_acls(const int type);
58801 +int gr_apply_subject_to_task(struct task_struct *task);
58802 +int gr_acl_is_enabled(void);
58803 +char gr_roletype_to_char(void);
58804 +
58805 +void gr_handle_alertkill(struct task_struct *task);
58806 +char *gr_to_filename(const struct dentry *dentry,
58807 + const struct vfsmount *mnt);
58808 +char *gr_to_filename1(const struct dentry *dentry,
58809 + const struct vfsmount *mnt);
58810 +char *gr_to_filename2(const struct dentry *dentry,
58811 + const struct vfsmount *mnt);
58812 +char *gr_to_filename3(const struct dentry *dentry,
58813 + const struct vfsmount *mnt);
58814 +
58815 +extern int grsec_enable_ptrace_readexec;
58816 +extern int grsec_enable_harden_ptrace;
58817 +extern int grsec_enable_link;
58818 +extern int grsec_enable_fifo;
58819 +extern int grsec_enable_execve;
58820 +extern int grsec_enable_shm;
58821 +extern int grsec_enable_execlog;
58822 +extern int grsec_enable_signal;
58823 +extern int grsec_enable_audit_ptrace;
58824 +extern int grsec_enable_forkfail;
58825 +extern int grsec_enable_time;
58826 +extern int grsec_enable_rofs;
58827 +extern int grsec_enable_chroot_shmat;
58828 +extern int grsec_enable_chroot_mount;
58829 +extern int grsec_enable_chroot_double;
58830 +extern int grsec_enable_chroot_pivot;
58831 +extern int grsec_enable_chroot_chdir;
58832 +extern int grsec_enable_chroot_chmod;
58833 +extern int grsec_enable_chroot_mknod;
58834 +extern int grsec_enable_chroot_fchdir;
58835 +extern int grsec_enable_chroot_nice;
58836 +extern int grsec_enable_chroot_execlog;
58837 +extern int grsec_enable_chroot_caps;
58838 +extern int grsec_enable_chroot_sysctl;
58839 +extern int grsec_enable_chroot_unix;
58840 +extern int grsec_enable_tpe;
58841 +extern int grsec_tpe_gid;
58842 +extern int grsec_enable_tpe_all;
58843 +extern int grsec_enable_tpe_invert;
58844 +extern int grsec_enable_socket_all;
58845 +extern int grsec_socket_all_gid;
58846 +extern int grsec_enable_socket_client;
58847 +extern int grsec_socket_client_gid;
58848 +extern int grsec_enable_socket_server;
58849 +extern int grsec_socket_server_gid;
58850 +extern int grsec_audit_gid;
58851 +extern int grsec_enable_group;
58852 +extern int grsec_enable_audit_textrel;
58853 +extern int grsec_enable_log_rwxmaps;
58854 +extern int grsec_enable_mount;
58855 +extern int grsec_enable_chdir;
58856 +extern int grsec_resource_logging;
58857 +extern int grsec_enable_blackhole;
58858 +extern int grsec_lastack_retries;
58859 +extern int grsec_enable_brute;
58860 +extern int grsec_lock;
58861 +
58862 +extern spinlock_t grsec_alert_lock;
58863 +extern unsigned long grsec_alert_wtime;
58864 +extern unsigned long grsec_alert_fyet;
58865 +
58866 +extern spinlock_t grsec_audit_lock;
58867 +
58868 +extern rwlock_t grsec_exec_file_lock;
58869 +
58870 +#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
58871 + gr_to_filename2((tsk)->exec_file->f_path.dentry, \
58872 + (tsk)->exec_file->f_vfsmnt) : "/")
58873 +
58874 +#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
58875 + gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
58876 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
58877 +
58878 +#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
58879 + gr_to_filename((tsk)->exec_file->f_path.dentry, \
58880 + (tsk)->exec_file->f_vfsmnt) : "/")
58881 +
58882 +#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
58883 + gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
58884 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
58885 +
58886 +#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
58887 +
58888 +#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
58889 +
58890 +#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
58891 + (task)->pid, (cred)->uid, \
58892 + (cred)->euid, (cred)->gid, (cred)->egid, \
58893 + gr_parent_task_fullpath(task), \
58894 + (task)->real_parent->comm, (task)->real_parent->pid, \
58895 + (pcred)->uid, (pcred)->euid, \
58896 + (pcred)->gid, (pcred)->egid
58897 +
58898 +#define GR_CHROOT_CAPS {{ \
58899 + CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
58900 + CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
58901 + CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
58902 + CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
58903 + CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
58904 + CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
58905 + CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
58906 +
58907 +#define security_learn(normal_msg,args...) \
58908 +({ \
58909 + read_lock(&grsec_exec_file_lock); \
58910 + gr_add_learn_entry(normal_msg "\n", ## args); \
58911 + read_unlock(&grsec_exec_file_lock); \
58912 +})
58913 +
58914 +enum {
58915 + GR_DO_AUDIT,
58916 + GR_DONT_AUDIT,
58917 + /* used for non-audit messages that we shouldn't kill the task on */
58918 + GR_DONT_AUDIT_GOOD
58919 +};
58920 +
58921 +enum {
58922 + GR_TTYSNIFF,
58923 + GR_RBAC,
58924 + GR_RBAC_STR,
58925 + GR_STR_RBAC,
58926 + GR_RBAC_MODE2,
58927 + GR_RBAC_MODE3,
58928 + GR_FILENAME,
58929 + GR_SYSCTL_HIDDEN,
58930 + GR_NOARGS,
58931 + GR_ONE_INT,
58932 + GR_ONE_INT_TWO_STR,
58933 + GR_ONE_STR,
58934 + GR_STR_INT,
58935 + GR_TWO_STR_INT,
58936 + GR_TWO_INT,
58937 + GR_TWO_U64,
58938 + GR_THREE_INT,
58939 + GR_FIVE_INT_TWO_STR,
58940 + GR_TWO_STR,
58941 + GR_THREE_STR,
58942 + GR_FOUR_STR,
58943 + GR_STR_FILENAME,
58944 + GR_FILENAME_STR,
58945 + GR_FILENAME_TWO_INT,
58946 + GR_FILENAME_TWO_INT_STR,
58947 + GR_TEXTREL,
58948 + GR_PTRACE,
58949 + GR_RESOURCE,
58950 + GR_CAP,
58951 + GR_SIG,
58952 + GR_SIG2,
58953 + GR_CRASH1,
58954 + GR_CRASH2,
58955 + GR_PSACCT,
58956 + GR_RWXMAP
58957 +};
58958 +
58959 +#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
58960 +#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
58961 +#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
58962 +#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
58963 +#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
58964 +#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
58965 +#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
58966 +#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
58967 +#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
58968 +#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
58969 +#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
58970 +#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
58971 +#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
58972 +#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
58973 +#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
58974 +#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
58975 +#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
58976 +#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
58977 +#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
58978 +#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
58979 +#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
58980 +#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
58981 +#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
58982 +#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
58983 +#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
58984 +#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
58985 +#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
58986 +#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
58987 +#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
58988 +#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
58989 +#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
58990 +#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
58991 +#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
58992 +#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
58993 +#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
58994 +
58995 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
58996 +
58997 +#endif
58998 +
58999 +#endif
59000 diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
59001 new file mode 100644
59002 index 0000000..f885406
59003 --- /dev/null
59004 +++ b/include/linux/grmsg.h
59005 @@ -0,0 +1,109 @@
59006 +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
59007 +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
59008 +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
59009 +#define GR_STOPMOD_MSG "denied modification of module state by "
59010 +#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
59011 +#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
59012 +#define GR_IOPERM_MSG "denied use of ioperm() by "
59013 +#define GR_IOPL_MSG "denied use of iopl() by "
59014 +#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
59015 +#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
59016 +#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
59017 +#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
59018 +#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
59019 +#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
59020 +#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
59021 +#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
59022 +#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
59023 +#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
59024 +#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
59025 +#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
59026 +#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
59027 +#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
59028 +#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
59029 +#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
59030 +#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
59031 +#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
59032 +#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
59033 +#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
59034 +#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
59035 +#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
59036 +#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
59037 +#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
59038 +#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
59039 +#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
59040 +#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
59041 +#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
59042 +#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
59043 +#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
59044 +#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
59045 +#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
59046 +#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
59047 +#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
59048 +#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
59049 +#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
59050 +#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
59051 +#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
59052 +#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
59053 +#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
59054 +#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
59055 +#define GR_INITF_ACL_MSG "init_variables() failed %s by "
59056 +#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
59057 +#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
59058 +#define GR_SHUTS_ACL_MSG "shutdown auth success for "
59059 +#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
59060 +#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
59061 +#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
59062 +#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
59063 +#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
59064 +#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
59065 +#define GR_ENABLEF_ACL_MSG "unable to load %s for "
59066 +#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
59067 +#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
59068 +#define GR_RELOADF_ACL_MSG "failed reload of %s for "
59069 +#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
59070 +#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
59071 +#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
59072 +#define GR_SPROLEF_ACL_MSG "special role %s failure for "
59073 +#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
59074 +#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
59075 +#define GR_INVMODE_ACL_MSG "invalid mode %d by "
59076 +#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
59077 +#define GR_FAILFORK_MSG "failed fork with errno %s by "
59078 +#define GR_NICE_CHROOT_MSG "denied priority change by "
59079 +#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
59080 +#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
59081 +#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
59082 +#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
59083 +#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
59084 +#define GR_TIME_MSG "time set by "
59085 +#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
59086 +#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
59087 +#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
59088 +#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
59089 +#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
59090 +#define GR_BIND_MSG "denied bind() by "
59091 +#define GR_CONNECT_MSG "denied connect() by "
59092 +#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
59093 +#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
59094 +#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
59095 +#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
59096 +#define GR_CAP_ACL_MSG "use of %s denied for "
59097 +#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
59098 +#define GR_CAP_ACL_MSG2 "use of %s permitted for "
59099 +#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
59100 +#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
59101 +#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
59102 +#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
59103 +#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
59104 +#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
59105 +#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
59106 +#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
59107 +#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
59108 +#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
59109 +#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
59110 +#define GR_VM86_MSG "denied use of vm86 by "
59111 +#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
59112 +#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
59113 +#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
59114 +#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
59115 diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
59116 new file mode 100644
59117 index 0000000..2ccf677
59118 --- /dev/null
59119 +++ b/include/linux/grsecurity.h
59120 @@ -0,0 +1,229 @@
59121 +#ifndef GR_SECURITY_H
59122 +#define GR_SECURITY_H
59123 +#include <linux/fs.h>
59124 +#include <linux/fs_struct.h>
59125 +#include <linux/binfmts.h>
59126 +#include <linux/gracl.h>
59127 +
59128 +/* notify of brain-dead configs */
59129 +#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59130 +#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
59131 +#endif
59132 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
59133 +#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
59134 +#endif
59135 +#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
59136 +#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
59137 +#endif
59138 +#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
59139 +#error "CONFIG_PAX enabled, but no PaX options are enabled."
59140 +#endif
59141 +
59142 +#include <linux/compat.h>
59143 +
59144 +struct user_arg_ptr {
59145 +#ifdef CONFIG_COMPAT
59146 + bool is_compat;
59147 +#endif
59148 + union {
59149 + const char __user *const __user *native;
59150 +#ifdef CONFIG_COMPAT
59151 + compat_uptr_t __user *compat;
59152 +#endif
59153 + } ptr;
59154 +};
59155 +
59156 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
59157 +void gr_handle_brute_check(void);
59158 +void gr_handle_kernel_exploit(void);
59159 +int gr_process_user_ban(void);
59160 +
59161 +char gr_roletype_to_char(void);
59162 +
59163 +int gr_acl_enable_at_secure(void);
59164 +
59165 +int gr_check_user_change(int real, int effective, int fs);
59166 +int gr_check_group_change(int real, int effective, int fs);
59167 +
59168 +void gr_del_task_from_ip_table(struct task_struct *p);
59169 +
59170 +int gr_pid_is_chrooted(struct task_struct *p);
59171 +int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
59172 +int gr_handle_chroot_nice(void);
59173 +int gr_handle_chroot_sysctl(const int op);
59174 +int gr_handle_chroot_setpriority(struct task_struct *p,
59175 + const int niceval);
59176 +int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
59177 +int gr_handle_chroot_chroot(const struct dentry *dentry,
59178 + const struct vfsmount *mnt);
59179 +void gr_handle_chroot_chdir(struct path *path);
59180 +int gr_handle_chroot_chmod(const struct dentry *dentry,
59181 + const struct vfsmount *mnt, const int mode);
59182 +int gr_handle_chroot_mknod(const struct dentry *dentry,
59183 + const struct vfsmount *mnt, const int mode);
59184 +int gr_handle_chroot_mount(const struct dentry *dentry,
59185 + const struct vfsmount *mnt,
59186 + const char *dev_name);
59187 +int gr_handle_chroot_pivot(void);
59188 +int gr_handle_chroot_unix(const pid_t pid);
59189 +
59190 +int gr_handle_rawio(const struct inode *inode);
59191 +
59192 +void gr_handle_ioperm(void);
59193 +void gr_handle_iopl(void);
59194 +
59195 +umode_t gr_acl_umask(void);
59196 +
59197 +int gr_tpe_allow(const struct file *file);
59198 +
59199 +void gr_set_chroot_entries(struct task_struct *task, struct path *path);
59200 +void gr_clear_chroot_entries(struct task_struct *task);
59201 +
59202 +void gr_log_forkfail(const int retval);
59203 +void gr_log_timechange(void);
59204 +void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
59205 +void gr_log_chdir(const struct dentry *dentry,
59206 + const struct vfsmount *mnt);
59207 +void gr_log_chroot_exec(const struct dentry *dentry,
59208 + const struct vfsmount *mnt);
59209 +void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv);
59210 +void gr_log_remount(const char *devname, const int retval);
59211 +void gr_log_unmount(const char *devname, const int retval);
59212 +void gr_log_mount(const char *from, const char *to, const int retval);
59213 +void gr_log_textrel(struct vm_area_struct *vma);
59214 +void gr_log_rwxmmap(struct file *file);
59215 +void gr_log_rwxmprotect(struct file *file);
59216 +
59217 +int gr_handle_follow_link(const struct inode *parent,
59218 + const struct inode *inode,
59219 + const struct dentry *dentry,
59220 + const struct vfsmount *mnt);
59221 +int gr_handle_fifo(const struct dentry *dentry,
59222 + const struct vfsmount *mnt,
59223 + const struct dentry *dir, const int flag,
59224 + const int acc_mode);
59225 +int gr_handle_hardlink(const struct dentry *dentry,
59226 + const struct vfsmount *mnt,
59227 + struct inode *inode,
59228 + const int mode, const char *to);
59229 +
59230 +int gr_is_capable(const int cap);
59231 +int gr_is_capable_nolog(const int cap);
59232 +void gr_learn_resource(const struct task_struct *task, const int limit,
59233 + const unsigned long wanted, const int gt);
59234 +void gr_copy_label(struct task_struct *tsk);
59235 +void gr_handle_crash(struct task_struct *task, const int sig);
59236 +int gr_handle_signal(const struct task_struct *p, const int sig);
59237 +int gr_check_crash_uid(const uid_t uid);
59238 +int gr_check_protected_task(const struct task_struct *task);
59239 +int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
59240 +int gr_acl_handle_mmap(const struct file *file,
59241 + const unsigned long prot);
59242 +int gr_acl_handle_mprotect(const struct file *file,
59243 + const unsigned long prot);
59244 +int gr_check_hidden_task(const struct task_struct *tsk);
59245 +__u32 gr_acl_handle_truncate(const struct dentry *dentry,
59246 + const struct vfsmount *mnt);
59247 +__u32 gr_acl_handle_utime(const struct dentry *dentry,
59248 + const struct vfsmount *mnt);
59249 +__u32 gr_acl_handle_access(const struct dentry *dentry,
59250 + const struct vfsmount *mnt, const int fmode);
59251 +__u32 gr_acl_handle_chmod(const struct dentry *dentry,
59252 + const struct vfsmount *mnt, umode_t *mode);
59253 +__u32 gr_acl_handle_chown(const struct dentry *dentry,
59254 + const struct vfsmount *mnt);
59255 +__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
59256 + const struct vfsmount *mnt);
59257 +int gr_handle_ptrace(struct task_struct *task, const long request);
59258 +int gr_handle_proc_ptrace(struct task_struct *task);
59259 +__u32 gr_acl_handle_execve(const struct dentry *dentry,
59260 + const struct vfsmount *mnt);
59261 +int gr_check_crash_exec(const struct file *filp);
59262 +int gr_acl_is_enabled(void);
59263 +void gr_set_kernel_label(struct task_struct *task);
59264 +void gr_set_role_label(struct task_struct *task, const uid_t uid,
59265 + const gid_t gid);
59266 +int gr_set_proc_label(const struct dentry *dentry,
59267 + const struct vfsmount *mnt,
59268 + const int unsafe_flags);
59269 +__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
59270 + const struct vfsmount *mnt);
59271 +__u32 gr_acl_handle_open(const struct dentry *dentry,
59272 + const struct vfsmount *mnt, int acc_mode);
59273 +__u32 gr_acl_handle_creat(const struct dentry *dentry,
59274 + const struct dentry *p_dentry,
59275 + const struct vfsmount *p_mnt,
59276 + int open_flags, int acc_mode, const int imode);
59277 +void gr_handle_create(const struct dentry *dentry,
59278 + const struct vfsmount *mnt);
59279 +void gr_handle_proc_create(const struct dentry *dentry,
59280 + const struct inode *inode);
59281 +__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
59282 + const struct dentry *parent_dentry,
59283 + const struct vfsmount *parent_mnt,
59284 + const int mode);
59285 +__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
59286 + const struct dentry *parent_dentry,
59287 + const struct vfsmount *parent_mnt);
59288 +__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
59289 + const struct vfsmount *mnt);
59290 +void gr_handle_delete(const ino_t ino, const dev_t dev);
59291 +__u32 gr_acl_handle_unlink(const struct dentry *dentry,
59292 + const struct vfsmount *mnt);
59293 +__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
59294 + const struct dentry *parent_dentry,
59295 + const struct vfsmount *parent_mnt,
59296 + const char *from);
59297 +__u32 gr_acl_handle_link(const struct dentry *new_dentry,
59298 + const struct dentry *parent_dentry,
59299 + const struct vfsmount *parent_mnt,
59300 + const struct dentry *old_dentry,
59301 + const struct vfsmount *old_mnt, const char *to);
59302 +int gr_acl_handle_rename(struct dentry *new_dentry,
59303 + struct dentry *parent_dentry,
59304 + const struct vfsmount *parent_mnt,
59305 + struct dentry *old_dentry,
59306 + struct inode *old_parent_inode,
59307 + struct vfsmount *old_mnt, const char *newname);
59308 +void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
59309 + struct dentry *old_dentry,
59310 + struct dentry *new_dentry,
59311 + struct vfsmount *mnt, const __u8 replace);
59312 +__u32 gr_check_link(const struct dentry *new_dentry,
59313 + const struct dentry *parent_dentry,
59314 + const struct vfsmount *parent_mnt,
59315 + const struct dentry *old_dentry,
59316 + const struct vfsmount *old_mnt);
59317 +int gr_acl_handle_filldir(const struct file *file, const char *name,
59318 + const unsigned int namelen, const ino_t ino);
59319 +
59320 +__u32 gr_acl_handle_unix(const struct dentry *dentry,
59321 + const struct vfsmount *mnt);
59322 +void gr_acl_handle_exit(void);
59323 +void gr_acl_handle_psacct(struct task_struct *task, const long code);
59324 +int gr_acl_handle_procpidmem(const struct task_struct *task);
59325 +int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
59326 +int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
59327 +void gr_audit_ptrace(struct task_struct *task);
59328 +dev_t gr_get_dev_from_dentry(struct dentry *dentry);
59329 +
59330 +int gr_ptrace_readexec(struct file *file, int unsafe_flags);
59331 +
59332 +#ifdef CONFIG_GRKERNSEC
59333 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
59334 +void gr_handle_vm86(void);
59335 +void gr_handle_mem_readwrite(u64 from, u64 to);
59336 +
59337 +void gr_log_badprocpid(const char *entry);
59338 +
59339 +extern int grsec_enable_dmesg;
59340 +extern int grsec_disable_privio;
59341 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
59342 +extern int grsec_enable_chroot_findtask;
59343 +#endif
59344 +#ifdef CONFIG_GRKERNSEC_SETXID
59345 +extern int grsec_enable_setxid;
59346 +#endif
59347 +#endif
59348 +
59349 +#endif
59350 diff --git a/include/linux/grsock.h b/include/linux/grsock.h
59351 new file mode 100644
59352 index 0000000..e7ffaaf
59353 --- /dev/null
59354 +++ b/include/linux/grsock.h
59355 @@ -0,0 +1,19 @@
59356 +#ifndef __GRSOCK_H
59357 +#define __GRSOCK_H
59358 +
59359 +extern void gr_attach_curr_ip(const struct sock *sk);
59360 +extern int gr_handle_sock_all(const int family, const int type,
59361 + const int protocol);
59362 +extern int gr_handle_sock_server(const struct sockaddr *sck);
59363 +extern int gr_handle_sock_server_other(const struct sock *sck);
59364 +extern int gr_handle_sock_client(const struct sockaddr *sck);
59365 +extern int gr_search_connect(struct socket * sock,
59366 + struct sockaddr_in * addr);
59367 +extern int gr_search_bind(struct socket * sock,
59368 + struct sockaddr_in * addr);
59369 +extern int gr_search_listen(struct socket * sock);
59370 +extern int gr_search_accept(struct socket * sock);
59371 +extern int gr_search_socket(const int domain, const int type,
59372 + const int protocol);
59373 +
59374 +#endif
59375 diff --git a/include/linux/hid.h b/include/linux/hid.h
59376 index c235e4e..f0cf7a0 100644
59377 --- a/include/linux/hid.h
59378 +++ b/include/linux/hid.h
59379 @@ -679,7 +679,7 @@ struct hid_ll_driver {
59380 unsigned int code, int value);
59381
59382 int (*parse)(struct hid_device *hdev);
59383 -};
59384 +} __no_const;
59385
59386 #define PM_HINT_FULLON 1<<5
59387 #define PM_HINT_NORMAL 1<<1
59388 diff --git a/include/linux/highmem.h b/include/linux/highmem.h
59389 index 3a93f73..b19d0b3 100644
59390 --- a/include/linux/highmem.h
59391 +++ b/include/linux/highmem.h
59392 @@ -185,6 +185,18 @@ static inline void clear_highpage(struct page *page)
59393 kunmap_atomic(kaddr, KM_USER0);
59394 }
59395
59396 +static inline void sanitize_highpage(struct page *page)
59397 +{
59398 + void *kaddr;
59399 + unsigned long flags;
59400 +
59401 + local_irq_save(flags);
59402 + kaddr = kmap_atomic(page, KM_CLEARPAGE);
59403 + clear_page(kaddr);
59404 + kunmap_atomic(kaddr, KM_CLEARPAGE);
59405 + local_irq_restore(flags);
59406 +}
59407 +
59408 static inline void zero_user_segments(struct page *page,
59409 unsigned start1, unsigned end1,
59410 unsigned start2, unsigned end2)
59411 diff --git a/include/linux/i2c.h b/include/linux/i2c.h
59412 index 07d103a..04ec65b 100644
59413 --- a/include/linux/i2c.h
59414 +++ b/include/linux/i2c.h
59415 @@ -364,6 +364,7 @@ struct i2c_algorithm {
59416 /* To determine what the adapter supports */
59417 u32 (*functionality) (struct i2c_adapter *);
59418 };
59419 +typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
59420
59421 /*
59422 * i2c_adapter is the structure used to identify a physical i2c bus along
59423 diff --git a/include/linux/i2o.h b/include/linux/i2o.h
59424 index a6deef4..c56a7f2 100644
59425 --- a/include/linux/i2o.h
59426 +++ b/include/linux/i2o.h
59427 @@ -564,7 +564,7 @@ struct i2o_controller {
59428 struct i2o_device *exec; /* Executive */
59429 #if BITS_PER_LONG == 64
59430 spinlock_t context_list_lock; /* lock for context_list */
59431 - atomic_t context_list_counter; /* needed for unique contexts */
59432 + atomic_unchecked_t context_list_counter; /* needed for unique contexts */
59433 struct list_head context_list; /* list of context id's
59434 and pointers */
59435 #endif
59436 diff --git a/include/linux/init.h b/include/linux/init.h
59437 index 9146f39..885354d 100644
59438 --- a/include/linux/init.h
59439 +++ b/include/linux/init.h
59440 @@ -293,13 +293,13 @@ void __init parse_early_options(char *cmdline);
59441
59442 /* Each module must use one module_init(). */
59443 #define module_init(initfn) \
59444 - static inline initcall_t __inittest(void) \
59445 + static inline __used initcall_t __inittest(void) \
59446 { return initfn; } \
59447 int init_module(void) __attribute__((alias(#initfn)));
59448
59449 /* This is only required if you want to be unloadable. */
59450 #define module_exit(exitfn) \
59451 - static inline exitcall_t __exittest(void) \
59452 + static inline __used exitcall_t __exittest(void) \
59453 { return exitfn; } \
59454 void cleanup_module(void) __attribute__((alias(#exitfn)));
59455
59456 diff --git a/include/linux/init_task.h b/include/linux/init_task.h
59457 index 32574ee..00d4ef1 100644
59458 --- a/include/linux/init_task.h
59459 +++ b/include/linux/init_task.h
59460 @@ -128,6 +128,12 @@ extern struct cred init_cred;
59461
59462 #define INIT_TASK_COMM "swapper"
59463
59464 +#ifdef CONFIG_X86
59465 +#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
59466 +#else
59467 +#define INIT_TASK_THREAD_INFO
59468 +#endif
59469 +
59470 /*
59471 * INIT_TASK is used to set up the first task table, touch at
59472 * your own risk!. Base=0, limit=0x1fffff (=2MB)
59473 @@ -166,6 +172,7 @@ extern struct cred init_cred;
59474 RCU_INIT_POINTER(.cred, &init_cred), \
59475 .comm = INIT_TASK_COMM, \
59476 .thread = INIT_THREAD, \
59477 + INIT_TASK_THREAD_INFO \
59478 .fs = &init_fs, \
59479 .files = &init_files, \
59480 .signal = &init_signals, \
59481 diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
59482 index e6ca56d..8583707 100644
59483 --- a/include/linux/intel-iommu.h
59484 +++ b/include/linux/intel-iommu.h
59485 @@ -296,7 +296,7 @@ struct iommu_flush {
59486 u8 fm, u64 type);
59487 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
59488 unsigned int size_order, u64 type);
59489 -};
59490 +} __no_const;
59491
59492 enum {
59493 SR_DMAR_FECTL_REG,
59494 diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
59495 index a64b00e..464d8bc 100644
59496 --- a/include/linux/interrupt.h
59497 +++ b/include/linux/interrupt.h
59498 @@ -441,7 +441,7 @@ enum
59499 /* map softirq index to softirq name. update 'softirq_to_name' in
59500 * kernel/softirq.c when adding a new softirq.
59501 */
59502 -extern char *softirq_to_name[NR_SOFTIRQS];
59503 +extern const char * const softirq_to_name[NR_SOFTIRQS];
59504
59505 /* softirq mask and active fields moved to irq_cpustat_t in
59506 * asm/hardirq.h to get better cache usage. KAO
59507 @@ -449,12 +449,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
59508
59509 struct softirq_action
59510 {
59511 - void (*action)(struct softirq_action *);
59512 + void (*action)(void);
59513 };
59514
59515 asmlinkage void do_softirq(void);
59516 asmlinkage void __do_softirq(void);
59517 -extern void open_softirq(int nr, void (*action)(struct softirq_action *));
59518 +extern void open_softirq(int nr, void (*action)(void));
59519 extern void softirq_init(void);
59520 static inline void __raise_softirq_irqoff(unsigned int nr)
59521 {
59522 diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
59523 index 3875719..4cd454c 100644
59524 --- a/include/linux/kallsyms.h
59525 +++ b/include/linux/kallsyms.h
59526 @@ -15,7 +15,8 @@
59527
59528 struct module;
59529
59530 -#ifdef CONFIG_KALLSYMS
59531 +#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
59532 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
59533 /* Lookup the address for a symbol. Returns 0 if not found. */
59534 unsigned long kallsyms_lookup_name(const char *name);
59535
59536 @@ -99,6 +100,16 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
59537 /* Stupid that this does nothing, but I didn't create this mess. */
59538 #define __print_symbol(fmt, addr)
59539 #endif /*CONFIG_KALLSYMS*/
59540 +#else /* when included by kallsyms.c, vsnprintf.c, or
59541 + arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
59542 +extern void __print_symbol(const char *fmt, unsigned long address);
59543 +extern int sprint_backtrace(char *buffer, unsigned long address);
59544 +extern int sprint_symbol(char *buffer, unsigned long address);
59545 +const char *kallsyms_lookup(unsigned long addr,
59546 + unsigned long *symbolsize,
59547 + unsigned long *offset,
59548 + char **modname, char *namebuf);
59549 +#endif
59550
59551 /* This macro allows us to keep printk typechecking */
59552 static __printf(1, 2)
59553 diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
59554 index fa39183..40160be 100644
59555 --- a/include/linux/kgdb.h
59556 +++ b/include/linux/kgdb.h
59557 @@ -53,7 +53,7 @@ extern int kgdb_connected;
59558 extern int kgdb_io_module_registered;
59559
59560 extern atomic_t kgdb_setting_breakpoint;
59561 -extern atomic_t kgdb_cpu_doing_single_step;
59562 +extern atomic_unchecked_t kgdb_cpu_doing_single_step;
59563
59564 extern struct task_struct *kgdb_usethread;
59565 extern struct task_struct *kgdb_contthread;
59566 @@ -251,7 +251,7 @@ struct kgdb_arch {
59567 void (*disable_hw_break)(struct pt_regs *regs);
59568 void (*remove_all_hw_break)(void);
59569 void (*correct_hw_break)(void);
59570 -};
59571 +} __do_const;
59572
59573 /**
59574 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
59575 @@ -276,7 +276,7 @@ struct kgdb_io {
59576 void (*pre_exception) (void);
59577 void (*post_exception) (void);
59578 int is_console;
59579 -};
59580 +} __do_const;
59581
59582 extern struct kgdb_arch arch_kgdb_ops;
59583
59584 diff --git a/include/linux/kmod.h b/include/linux/kmod.h
59585 index b16f653..eb908f4 100644
59586 --- a/include/linux/kmod.h
59587 +++ b/include/linux/kmod.h
59588 @@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
59589 * usually useless though. */
59590 extern __printf(2, 3)
59591 int __request_module(bool wait, const char *name, ...);
59592 +extern __printf(3, 4)
59593 +int ___request_module(bool wait, char *param_name, const char *name, ...);
59594 #define request_module(mod...) __request_module(true, mod)
59595 #define request_module_nowait(mod...) __request_module(false, mod)
59596 #define try_then_request_module(x, mod...) \
59597 diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
59598 index d526231..086e89b 100644
59599 --- a/include/linux/kvm_host.h
59600 +++ b/include/linux/kvm_host.h
59601 @@ -308,7 +308,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
59602 void vcpu_load(struct kvm_vcpu *vcpu);
59603 void vcpu_put(struct kvm_vcpu *vcpu);
59604
59605 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
59606 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
59607 struct module *module);
59608 void kvm_exit(void);
59609
59610 @@ -454,7 +454,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
59611 struct kvm_guest_debug *dbg);
59612 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
59613
59614 -int kvm_arch_init(void *opaque);
59615 +int kvm_arch_init(const void *opaque);
59616 void kvm_arch_exit(void);
59617
59618 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
59619 diff --git a/include/linux/libata.h b/include/linux/libata.h
59620 index cafc09a..d7e7829 100644
59621 --- a/include/linux/libata.h
59622 +++ b/include/linux/libata.h
59623 @@ -909,7 +909,7 @@ struct ata_port_operations {
59624 * fields must be pointers.
59625 */
59626 const struct ata_port_operations *inherits;
59627 -};
59628 +} __do_const;
59629
59630 struct ata_port_info {
59631 unsigned long flags;
59632 diff --git a/include/linux/mca.h b/include/linux/mca.h
59633 index 3797270..7765ede 100644
59634 --- a/include/linux/mca.h
59635 +++ b/include/linux/mca.h
59636 @@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
59637 int region);
59638 void * (*mca_transform_memory)(struct mca_device *,
59639 void *memory);
59640 -};
59641 +} __no_const;
59642
59643 struct mca_bus {
59644 u64 default_dma_mask;
59645 diff --git a/include/linux/memory.h b/include/linux/memory.h
59646 index 935699b..11042cc 100644
59647 --- a/include/linux/memory.h
59648 +++ b/include/linux/memory.h
59649 @@ -144,7 +144,7 @@ struct memory_accessor {
59650 size_t count);
59651 ssize_t (*write)(struct memory_accessor *, const char *buf,
59652 off_t offset, size_t count);
59653 -};
59654 +} __no_const;
59655
59656 /*
59657 * Kernel text modification mutex, used for code patching. Users of this lock
59658 diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h
59659 index 9970337..9444122 100644
59660 --- a/include/linux/mfd/abx500.h
59661 +++ b/include/linux/mfd/abx500.h
59662 @@ -188,6 +188,7 @@ struct abx500_ops {
59663 int (*event_registers_startup_state_get) (struct device *, u8 *);
59664 int (*startup_irq_enabled) (struct device *, unsigned int);
59665 };
59666 +typedef struct abx500_ops __no_const abx500_ops_no_const;
59667
59668 int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
59669 void abx500_remove_ops(struct device *dev);
59670 diff --git a/include/linux/mm.h b/include/linux/mm.h
59671 index 4baadd1..2e0b45e 100644
59672 --- a/include/linux/mm.h
59673 +++ b/include/linux/mm.h
59674 @@ -115,7 +115,14 @@ extern unsigned int kobjsize(const void *objp);
59675
59676 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
59677 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
59678 +
59679 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
59680 +#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
59681 +#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
59682 +#else
59683 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
59684 +#endif
59685 +
59686 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
59687 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
59688
59689 @@ -1012,34 +1019,6 @@ int set_page_dirty(struct page *page);
59690 int set_page_dirty_lock(struct page *page);
59691 int clear_page_dirty_for_io(struct page *page);
59692
59693 -/* Is the vma a continuation of the stack vma above it? */
59694 -static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
59695 -{
59696 - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
59697 -}
59698 -
59699 -static inline int stack_guard_page_start(struct vm_area_struct *vma,
59700 - unsigned long addr)
59701 -{
59702 - return (vma->vm_flags & VM_GROWSDOWN) &&
59703 - (vma->vm_start == addr) &&
59704 - !vma_growsdown(vma->vm_prev, addr);
59705 -}
59706 -
59707 -/* Is the vma a continuation of the stack vma below it? */
59708 -static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
59709 -{
59710 - return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
59711 -}
59712 -
59713 -static inline int stack_guard_page_end(struct vm_area_struct *vma,
59714 - unsigned long addr)
59715 -{
59716 - return (vma->vm_flags & VM_GROWSUP) &&
59717 - (vma->vm_end == addr) &&
59718 - !vma_growsup(vma->vm_next, addr);
59719 -}
59720 -
59721 extern unsigned long move_page_tables(struct vm_area_struct *vma,
59722 unsigned long old_addr, struct vm_area_struct *new_vma,
59723 unsigned long new_addr, unsigned long len);
59724 @@ -1134,6 +1113,15 @@ static inline void sync_mm_rss(struct task_struct *task, struct mm_struct *mm)
59725 }
59726 #endif
59727
59728 +#ifdef CONFIG_MMU
59729 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
59730 +#else
59731 +static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
59732 +{
59733 + return __pgprot(0);
59734 +}
59735 +#endif
59736 +
59737 int vma_wants_writenotify(struct vm_area_struct *vma);
59738
59739 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
59740 @@ -1419,6 +1407,7 @@ out:
59741 }
59742
59743 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
59744 +extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
59745
59746 extern unsigned long do_brk(unsigned long, unsigned long);
59747
59748 @@ -1476,6 +1465,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
59749 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
59750 struct vm_area_struct **pprev);
59751
59752 +extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
59753 +extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
59754 +extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
59755 +
59756 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
59757 NULL if none. Assume start_addr < end_addr. */
59758 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
59759 @@ -1492,15 +1485,6 @@ static inline unsigned long vma_pages(struct vm_area_struct *vma)
59760 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
59761 }
59762
59763 -#ifdef CONFIG_MMU
59764 -pgprot_t vm_get_page_prot(unsigned long vm_flags);
59765 -#else
59766 -static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
59767 -{
59768 - return __pgprot(0);
59769 -}
59770 -#endif
59771 -
59772 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
59773 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
59774 unsigned long pfn, unsigned long size, pgprot_t);
59775 @@ -1614,7 +1598,7 @@ extern int unpoison_memory(unsigned long pfn);
59776 extern int sysctl_memory_failure_early_kill;
59777 extern int sysctl_memory_failure_recovery;
59778 extern void shake_page(struct page *p, int access);
59779 -extern atomic_long_t mce_bad_pages;
59780 +extern atomic_long_unchecked_t mce_bad_pages;
59781 extern int soft_offline_page(struct page *page, int flags);
59782
59783 extern void dump_page(struct page *page);
59784 @@ -1628,5 +1612,11 @@ extern void copy_user_huge_page(struct page *dst, struct page *src,
59785 unsigned int pages_per_huge_page);
59786 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
59787
59788 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
59789 +extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
59790 +#else
59791 +static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
59792 +#endif
59793 +
59794 #endif /* __KERNEL__ */
59795 #endif /* _LINUX_MM_H */
59796 diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
59797 index 5b42f1b..759e4b4 100644
59798 --- a/include/linux/mm_types.h
59799 +++ b/include/linux/mm_types.h
59800 @@ -253,6 +253,8 @@ struct vm_area_struct {
59801 #ifdef CONFIG_NUMA
59802 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
59803 #endif
59804 +
59805 + struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
59806 };
59807
59808 struct core_thread {
59809 @@ -389,6 +391,24 @@ struct mm_struct {
59810 #ifdef CONFIG_CPUMASK_OFFSTACK
59811 struct cpumask cpumask_allocation;
59812 #endif
59813 +
59814 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
59815 + unsigned long pax_flags;
59816 +#endif
59817 +
59818 +#ifdef CONFIG_PAX_DLRESOLVE
59819 + unsigned long call_dl_resolve;
59820 +#endif
59821 +
59822 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
59823 + unsigned long call_syscall;
59824 +#endif
59825 +
59826 +#ifdef CONFIG_PAX_ASLR
59827 + unsigned long delta_mmap; /* randomized offset */
59828 + unsigned long delta_stack; /* randomized offset */
59829 +#endif
59830 +
59831 };
59832
59833 static inline void mm_init_cpumask(struct mm_struct *mm)
59834 diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
59835 index 1d1b1e1..2a13c78 100644
59836 --- a/include/linux/mmu_notifier.h
59837 +++ b/include/linux/mmu_notifier.h
59838 @@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
59839 */
59840 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
59841 ({ \
59842 - pte_t __pte; \
59843 + pte_t ___pte; \
59844 struct vm_area_struct *___vma = __vma; \
59845 unsigned long ___address = __address; \
59846 - __pte = ptep_clear_flush(___vma, ___address, __ptep); \
59847 + ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
59848 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
59849 - __pte; \
59850 + ___pte; \
59851 })
59852
59853 #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
59854 diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
59855 index 188cb2f..d78409b 100644
59856 --- a/include/linux/mmzone.h
59857 +++ b/include/linux/mmzone.h
59858 @@ -369,7 +369,7 @@ struct zone {
59859 unsigned long flags; /* zone flags, see below */
59860
59861 /* Zone statistics */
59862 - atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
59863 + atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
59864
59865 /*
59866 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
59867 diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
59868 index 468819c..17b9db3 100644
59869 --- a/include/linux/mod_devicetable.h
59870 +++ b/include/linux/mod_devicetable.h
59871 @@ -12,7 +12,7 @@
59872 typedef unsigned long kernel_ulong_t;
59873 #endif
59874
59875 -#define PCI_ANY_ID (~0)
59876 +#define PCI_ANY_ID ((__u16)~0)
59877
59878 struct pci_device_id {
59879 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
59880 @@ -131,7 +131,7 @@ struct usb_device_id {
59881 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
59882 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
59883
59884 -#define HID_ANY_ID (~0)
59885 +#define HID_ANY_ID (~0U)
59886
59887 struct hid_device_id {
59888 __u16 bus;
59889 diff --git a/include/linux/module.h b/include/linux/module.h
59890 index 3cb7839..511cb87 100644
59891 --- a/include/linux/module.h
59892 +++ b/include/linux/module.h
59893 @@ -17,6 +17,7 @@
59894 #include <linux/moduleparam.h>
59895 #include <linux/tracepoint.h>
59896 #include <linux/export.h>
59897 +#include <linux/fs.h>
59898
59899 #include <linux/percpu.h>
59900 #include <asm/module.h>
59901 @@ -261,19 +262,16 @@ struct module
59902 int (*init)(void);
59903
59904 /* If this is non-NULL, vfree after init() returns */
59905 - void *module_init;
59906 + void *module_init_rx, *module_init_rw;
59907
59908 /* Here is the actual code + data, vfree'd on unload. */
59909 - void *module_core;
59910 + void *module_core_rx, *module_core_rw;
59911
59912 /* Here are the sizes of the init and core sections */
59913 - unsigned int init_size, core_size;
59914 + unsigned int init_size_rw, core_size_rw;
59915
59916 /* The size of the executable code in each section. */
59917 - unsigned int init_text_size, core_text_size;
59918 -
59919 - /* Size of RO sections of the module (text+rodata) */
59920 - unsigned int init_ro_size, core_ro_size;
59921 + unsigned int init_size_rx, core_size_rx;
59922
59923 /* Arch-specific module values */
59924 struct mod_arch_specific arch;
59925 @@ -329,6 +327,10 @@ struct module
59926 #ifdef CONFIG_EVENT_TRACING
59927 struct ftrace_event_call **trace_events;
59928 unsigned int num_trace_events;
59929 + struct file_operations trace_id;
59930 + struct file_operations trace_enable;
59931 + struct file_operations trace_format;
59932 + struct file_operations trace_filter;
59933 #endif
59934 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
59935 unsigned int num_ftrace_callsites;
59936 @@ -379,16 +381,46 @@ bool is_module_address(unsigned long addr);
59937 bool is_module_percpu_address(unsigned long addr);
59938 bool is_module_text_address(unsigned long addr);
59939
59940 +static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
59941 +{
59942 +
59943 +#ifdef CONFIG_PAX_KERNEXEC
59944 + if (ktla_ktva(addr) >= (unsigned long)start &&
59945 + ktla_ktva(addr) < (unsigned long)start + size)
59946 + return 1;
59947 +#endif
59948 +
59949 + return ((void *)addr >= start && (void *)addr < start + size);
59950 +}
59951 +
59952 +static inline int within_module_core_rx(unsigned long addr, struct module *mod)
59953 +{
59954 + return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
59955 +}
59956 +
59957 +static inline int within_module_core_rw(unsigned long addr, struct module *mod)
59958 +{
59959 + return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
59960 +}
59961 +
59962 +static inline int within_module_init_rx(unsigned long addr, struct module *mod)
59963 +{
59964 + return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
59965 +}
59966 +
59967 +static inline int within_module_init_rw(unsigned long addr, struct module *mod)
59968 +{
59969 + return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
59970 +}
59971 +
59972 static inline int within_module_core(unsigned long addr, struct module *mod)
59973 {
59974 - return (unsigned long)mod->module_core <= addr &&
59975 - addr < (unsigned long)mod->module_core + mod->core_size;
59976 + return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
59977 }
59978
59979 static inline int within_module_init(unsigned long addr, struct module *mod)
59980 {
59981 - return (unsigned long)mod->module_init <= addr &&
59982 - addr < (unsigned long)mod->module_init + mod->init_size;
59983 + return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
59984 }
59985
59986 /* Search for module by name: must hold module_mutex. */
59987 diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
59988 index b2be02e..6a9fdb1 100644
59989 --- a/include/linux/moduleloader.h
59990 +++ b/include/linux/moduleloader.h
59991 @@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
59992 sections. Returns NULL on failure. */
59993 void *module_alloc(unsigned long size);
59994
59995 +#ifdef CONFIG_PAX_KERNEXEC
59996 +void *module_alloc_exec(unsigned long size);
59997 +#else
59998 +#define module_alloc_exec(x) module_alloc(x)
59999 +#endif
60000 +
60001 /* Free memory returned from module_alloc. */
60002 void module_free(struct module *mod, void *module_region);
60003
60004 +#ifdef CONFIG_PAX_KERNEXEC
60005 +void module_free_exec(struct module *mod, void *module_region);
60006 +#else
60007 +#define module_free_exec(x, y) module_free((x), (y))
60008 +#endif
60009 +
60010 /* Apply the given relocation to the (simplified) ELF. Return -error
60011 or 0. */
60012 int apply_relocate(Elf_Shdr *sechdrs,
60013 diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
60014 index 7939f63..ec6df57 100644
60015 --- a/include/linux/moduleparam.h
60016 +++ b/include/linux/moduleparam.h
60017 @@ -260,7 +260,7 @@ static inline void __kernel_param_unlock(void)
60018 * @len is usually just sizeof(string).
60019 */
60020 #define module_param_string(name, string, len, perm) \
60021 - static const struct kparam_string __param_string_##name \
60022 + static const struct kparam_string __param_string_##name __used \
60023 = { len, string }; \
60024 __module_param_call(MODULE_PARAM_PREFIX, name, \
60025 &param_ops_string, \
60026 @@ -395,7 +395,7 @@ extern int param_get_invbool(char *buffer, const struct kernel_param *kp);
60027 * module_param_named() for why this might be necessary.
60028 */
60029 #define module_param_array_named(name, array, type, nump, perm) \
60030 - static const struct kparam_array __param_arr_##name \
60031 + static const struct kparam_array __param_arr_##name __used \
60032 = { .max = ARRAY_SIZE(array), .num = nump, \
60033 .ops = &param_ops_##type, \
60034 .elemsize = sizeof(array[0]), .elem = array }; \
60035 diff --git a/include/linux/namei.h b/include/linux/namei.h
60036 index ffc0213..2c1f2cb 100644
60037 --- a/include/linux/namei.h
60038 +++ b/include/linux/namei.h
60039 @@ -24,7 +24,7 @@ struct nameidata {
60040 unsigned seq;
60041 int last_type;
60042 unsigned depth;
60043 - char *saved_names[MAX_NESTED_LINKS + 1];
60044 + const char *saved_names[MAX_NESTED_LINKS + 1];
60045
60046 /* Intent data */
60047 union {
60048 @@ -94,12 +94,12 @@ extern int follow_up(struct path *);
60049 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
60050 extern void unlock_rename(struct dentry *, struct dentry *);
60051
60052 -static inline void nd_set_link(struct nameidata *nd, char *path)
60053 +static inline void nd_set_link(struct nameidata *nd, const char *path)
60054 {
60055 nd->saved_names[nd->depth] = path;
60056 }
60057
60058 -static inline char *nd_get_link(struct nameidata *nd)
60059 +static inline const char *nd_get_link(const struct nameidata *nd)
60060 {
60061 return nd->saved_names[nd->depth];
60062 }
60063 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
60064 index a82ad4d..90d15b7 100644
60065 --- a/include/linux/netdevice.h
60066 +++ b/include/linux/netdevice.h
60067 @@ -949,6 +949,7 @@ struct net_device_ops {
60068 int (*ndo_set_features)(struct net_device *dev,
60069 u32 features);
60070 };
60071 +typedef struct net_device_ops __no_const net_device_ops_no_const;
60072
60073 /*
60074 * The DEVICE structure.
60075 @@ -1088,7 +1089,7 @@ struct net_device {
60076 int iflink;
60077
60078 struct net_device_stats stats;
60079 - atomic_long_t rx_dropped; /* dropped packets by core network
60080 + atomic_long_unchecked_t rx_dropped; /* dropped packets by core network
60081 * Do not use this in drivers.
60082 */
60083
60084 diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
60085 new file mode 100644
60086 index 0000000..33f4af8
60087 --- /dev/null
60088 +++ b/include/linux/netfilter/xt_gradm.h
60089 @@ -0,0 +1,9 @@
60090 +#ifndef _LINUX_NETFILTER_XT_GRADM_H
60091 +#define _LINUX_NETFILTER_XT_GRADM_H 1
60092 +
60093 +struct xt_gradm_mtinfo {
60094 + __u16 flags;
60095 + __u16 invflags;
60096 +};
60097 +
60098 +#endif
60099 diff --git a/include/linux/of_pdt.h b/include/linux/of_pdt.h
60100 index c65a18a..0c05f3a 100644
60101 --- a/include/linux/of_pdt.h
60102 +++ b/include/linux/of_pdt.h
60103 @@ -32,7 +32,7 @@ struct of_pdt_ops {
60104
60105 /* return 0 on success; fill in 'len' with number of bytes in path */
60106 int (*pkg2path)(phandle node, char *buf, const int buflen, int *len);
60107 -};
60108 +} __no_const;
60109
60110 extern void *prom_early_alloc(unsigned long size);
60111
60112 diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
60113 index a4c5624..79d6d88 100644
60114 --- a/include/linux/oprofile.h
60115 +++ b/include/linux/oprofile.h
60116 @@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
60117 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
60118 char const * name, ulong * val);
60119
60120 -/** Create a file for read-only access to an atomic_t. */
60121 +/** Create a file for read-only access to an atomic_unchecked_t. */
60122 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
60123 - char const * name, atomic_t * val);
60124 + char const * name, atomic_unchecked_t * val);
60125
60126 /** create a directory */
60127 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
60128 diff --git a/include/linux/padata.h b/include/linux/padata.h
60129 index 4633b2f..988bc08 100644
60130 --- a/include/linux/padata.h
60131 +++ b/include/linux/padata.h
60132 @@ -129,7 +129,7 @@ struct parallel_data {
60133 struct padata_instance *pinst;
60134 struct padata_parallel_queue __percpu *pqueue;
60135 struct padata_serial_queue __percpu *squeue;
60136 - atomic_t seq_nr;
60137 + atomic_unchecked_t seq_nr;
60138 atomic_t reorder_objects;
60139 atomic_t refcnt;
60140 unsigned int max_seq_nr;
60141 diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
60142 index b1f8912..c955bff 100644
60143 --- a/include/linux/perf_event.h
60144 +++ b/include/linux/perf_event.h
60145 @@ -748,8 +748,8 @@ struct perf_event {
60146
60147 enum perf_event_active_state state;
60148 unsigned int attach_state;
60149 - local64_t count;
60150 - atomic64_t child_count;
60151 + local64_t count; /* PaX: fix it one day */
60152 + atomic64_unchecked_t child_count;
60153
60154 /*
60155 * These are the total time in nanoseconds that the event
60156 @@ -800,8 +800,8 @@ struct perf_event {
60157 * These accumulate total time (in nanoseconds) that children
60158 * events have been enabled and running, respectively.
60159 */
60160 - atomic64_t child_total_time_enabled;
60161 - atomic64_t child_total_time_running;
60162 + atomic64_unchecked_t child_total_time_enabled;
60163 + atomic64_unchecked_t child_total_time_running;
60164
60165 /*
60166 * Protect attach/detach and child_list:
60167 diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
60168 index 77257c9..51d473a 100644
60169 --- a/include/linux/pipe_fs_i.h
60170 +++ b/include/linux/pipe_fs_i.h
60171 @@ -46,9 +46,9 @@ struct pipe_buffer {
60172 struct pipe_inode_info {
60173 wait_queue_head_t wait;
60174 unsigned int nrbufs, curbuf, buffers;
60175 - unsigned int readers;
60176 - unsigned int writers;
60177 - unsigned int waiting_writers;
60178 + atomic_t readers;
60179 + atomic_t writers;
60180 + atomic_t waiting_writers;
60181 unsigned int r_counter;
60182 unsigned int w_counter;
60183 struct page *tmp_page;
60184 diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
60185 index d3085e7..fd01052 100644
60186 --- a/include/linux/pm_runtime.h
60187 +++ b/include/linux/pm_runtime.h
60188 @@ -95,7 +95,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
60189
60190 static inline void pm_runtime_mark_last_busy(struct device *dev)
60191 {
60192 - ACCESS_ONCE(dev->power.last_busy) = jiffies;
60193 + ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
60194 }
60195
60196 #else /* !CONFIG_PM_RUNTIME */
60197 diff --git a/include/linux/poison.h b/include/linux/poison.h
60198 index 79159de..f1233a9 100644
60199 --- a/include/linux/poison.h
60200 +++ b/include/linux/poison.h
60201 @@ -19,8 +19,8 @@
60202 * under normal circumstances, used to verify that nobody uses
60203 * non-initialized list entries.
60204 */
60205 -#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
60206 -#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
60207 +#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
60208 +#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
60209
60210 /********** include/linux/timer.h **********/
60211 /*
60212 diff --git a/include/linux/preempt.h b/include/linux/preempt.h
60213 index 58969b2..ead129b 100644
60214 --- a/include/linux/preempt.h
60215 +++ b/include/linux/preempt.h
60216 @@ -123,7 +123,7 @@ struct preempt_ops {
60217 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
60218 void (*sched_out)(struct preempt_notifier *notifier,
60219 struct task_struct *next);
60220 -};
60221 +} __no_const;
60222
60223 /**
60224 * preempt_notifier - key for installing preemption notifiers
60225 diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
60226 index 643b96c..ef55a9c 100644
60227 --- a/include/linux/proc_fs.h
60228 +++ b/include/linux/proc_fs.h
60229 @@ -155,6 +155,19 @@ static inline struct proc_dir_entry *proc_create(const char *name, mode_t mode,
60230 return proc_create_data(name, mode, parent, proc_fops, NULL);
60231 }
60232
60233 +static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
60234 + struct proc_dir_entry *parent, const struct file_operations *proc_fops)
60235 +{
60236 +#ifdef CONFIG_GRKERNSEC_PROC_USER
60237 + return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
60238 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60239 + return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
60240 +#else
60241 + return proc_create_data(name, mode, parent, proc_fops, NULL);
60242 +#endif
60243 +}
60244 +
60245 +
60246 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
60247 mode_t mode, struct proc_dir_entry *base,
60248 read_proc_t *read_proc, void * data)
60249 @@ -258,7 +271,7 @@ union proc_op {
60250 int (*proc_show)(struct seq_file *m,
60251 struct pid_namespace *ns, struct pid *pid,
60252 struct task_struct *task);
60253 -};
60254 +} __no_const;
60255
60256 struct ctl_table_header;
60257 struct ctl_table;
60258 diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
60259 index 800f113..e9ee2e3 100644
60260 --- a/include/linux/ptrace.h
60261 +++ b/include/linux/ptrace.h
60262 @@ -129,10 +129,12 @@ extern void __ptrace_unlink(struct task_struct *child);
60263 extern void exit_ptrace(struct task_struct *tracer);
60264 #define PTRACE_MODE_READ 1
60265 #define PTRACE_MODE_ATTACH 2
60266 -/* Returns 0 on success, -errno on denial. */
60267 -extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
60268 /* Returns true on success, false on denial. */
60269 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
60270 +/* Returns true on success, false on denial. */
60271 +extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
60272 +/* Returns true on success, false on denial. */
60273 +extern bool ptrace_may_access_nolock(struct task_struct *task, unsigned int mode);
60274
60275 static inline int ptrace_reparented(struct task_struct *child)
60276 {
60277 diff --git a/include/linux/random.h b/include/linux/random.h
60278 index 8f74538..02a1012 100644
60279 --- a/include/linux/random.h
60280 +++ b/include/linux/random.h
60281 @@ -69,12 +69,17 @@ void srandom32(u32 seed);
60282
60283 u32 prandom32(struct rnd_state *);
60284
60285 +static inline unsigned long pax_get_random_long(void)
60286 +{
60287 + return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
60288 +}
60289 +
60290 /*
60291 * Handle minimum values for seeds
60292 */
60293 static inline u32 __seed(u32 x, u32 m)
60294 {
60295 - return (x < m) ? x + m : x;
60296 + return (x <= m) ? x + m + 1 : x;
60297 }
60298
60299 /**
60300 diff --git a/include/linux/reboot.h b/include/linux/reboot.h
60301 index e0879a7..a12f962 100644
60302 --- a/include/linux/reboot.h
60303 +++ b/include/linux/reboot.h
60304 @@ -52,9 +52,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
60305 * Architecture-specific implementations of sys_reboot commands.
60306 */
60307
60308 -extern void machine_restart(char *cmd);
60309 -extern void machine_halt(void);
60310 -extern void machine_power_off(void);
60311 +extern void machine_restart(char *cmd) __noreturn;
60312 +extern void machine_halt(void) __noreturn;
60313 +extern void machine_power_off(void) __noreturn;
60314
60315 extern void machine_shutdown(void);
60316 struct pt_regs;
60317 @@ -65,9 +65,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
60318 */
60319
60320 extern void kernel_restart_prepare(char *cmd);
60321 -extern void kernel_restart(char *cmd);
60322 -extern void kernel_halt(void);
60323 -extern void kernel_power_off(void);
60324 +extern void kernel_restart(char *cmd) __noreturn;
60325 +extern void kernel_halt(void) __noreturn;
60326 +extern void kernel_power_off(void) __noreturn;
60327
60328 extern int C_A_D; /* for sysctl */
60329 void ctrl_alt_del(void);
60330 @@ -81,7 +81,7 @@ extern int orderly_poweroff(bool force);
60331 * Emergency restart, callable from an interrupt handler.
60332 */
60333
60334 -extern void emergency_restart(void);
60335 +extern void emergency_restart(void) __noreturn;
60336 #include <asm/emergency-restart.h>
60337
60338 #endif
60339 diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
60340 index 96d465f..b084e05 100644
60341 --- a/include/linux/reiserfs_fs.h
60342 +++ b/include/linux/reiserfs_fs.h
60343 @@ -1406,7 +1406,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
60344 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
60345
60346 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
60347 -#define get_generation(s) atomic_read (&fs_generation(s))
60348 +#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
60349 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
60350 #define __fs_changed(gen,s) (gen != get_generation (s))
60351 #define fs_changed(gen,s) \
60352 diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h
60353 index 52c83b6..18ed7eb 100644
60354 --- a/include/linux/reiserfs_fs_sb.h
60355 +++ b/include/linux/reiserfs_fs_sb.h
60356 @@ -386,7 +386,7 @@ struct reiserfs_sb_info {
60357 /* Comment? -Hans */
60358 wait_queue_head_t s_wait;
60359 /* To be obsoleted soon by per buffer seals.. -Hans */
60360 - atomic_t s_generation_counter; // increased by one every time the
60361 + atomic_unchecked_t s_generation_counter; // increased by one every time the
60362 // tree gets re-balanced
60363 unsigned long s_properties; /* File system properties. Currently holds
60364 on-disk FS format */
60365 diff --git a/include/linux/relay.h b/include/linux/relay.h
60366 index 14a86bc..17d0700 100644
60367 --- a/include/linux/relay.h
60368 +++ b/include/linux/relay.h
60369 @@ -159,7 +159,7 @@ struct rchan_callbacks
60370 * The callback should return 0 if successful, negative if not.
60371 */
60372 int (*remove_buf_file)(struct dentry *dentry);
60373 -};
60374 +} __no_const;
60375
60376 /*
60377 * CONFIG_RELAY kernel API, kernel/relay.c
60378 diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
60379 index c6c6084..5bf1212 100644
60380 --- a/include/linux/rfkill.h
60381 +++ b/include/linux/rfkill.h
60382 @@ -147,6 +147,7 @@ struct rfkill_ops {
60383 void (*query)(struct rfkill *rfkill, void *data);
60384 int (*set_block)(void *data, bool blocked);
60385 };
60386 +typedef struct rfkill_ops __no_const rfkill_ops_no_const;
60387
60388 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
60389 /**
60390 diff --git a/include/linux/rio.h b/include/linux/rio.h
60391 index 4d50611..c6858a2 100644
60392 --- a/include/linux/rio.h
60393 +++ b/include/linux/rio.h
60394 @@ -315,7 +315,7 @@ struct rio_ops {
60395 int mbox, void *buffer, size_t len);
60396 int (*add_inb_buffer)(struct rio_mport *mport, int mbox, void *buf);
60397 void *(*get_inb_message)(struct rio_mport *mport, int mbox);
60398 -};
60399 +} __no_const;
60400
60401 #define RIO_RESOURCE_MEM 0x00000100
60402 #define RIO_RESOURCE_DOORBELL 0x00000200
60403 diff --git a/include/linux/rmap.h b/include/linux/rmap.h
60404 index 2148b12..519b820 100644
60405 --- a/include/linux/rmap.h
60406 +++ b/include/linux/rmap.h
60407 @@ -119,8 +119,8 @@ static inline void anon_vma_unlock(struct anon_vma *anon_vma)
60408 void anon_vma_init(void); /* create anon_vma_cachep */
60409 int anon_vma_prepare(struct vm_area_struct *);
60410 void unlink_anon_vmas(struct vm_area_struct *);
60411 -int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
60412 -int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
60413 +int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
60414 +int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
60415 void __anon_vma_link(struct vm_area_struct *);
60416
60417 static inline void anon_vma_merge(struct vm_area_struct *vma,
60418 diff --git a/include/linux/sched.h b/include/linux/sched.h
60419 index 1c4f3e9..b4e4851 100644
60420 --- a/include/linux/sched.h
60421 +++ b/include/linux/sched.h
60422 @@ -101,6 +101,7 @@ struct bio_list;
60423 struct fs_struct;
60424 struct perf_event_context;
60425 struct blk_plug;
60426 +struct linux_binprm;
60427
60428 /*
60429 * List of flags we want to share for kernel threads,
60430 @@ -380,10 +381,13 @@ struct user_namespace;
60431 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
60432
60433 extern int sysctl_max_map_count;
60434 +extern unsigned long sysctl_heap_stack_gap;
60435
60436 #include <linux/aio.h>
60437
60438 #ifdef CONFIG_MMU
60439 +extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
60440 +extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
60441 extern void arch_pick_mmap_layout(struct mm_struct *mm);
60442 extern unsigned long
60443 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
60444 @@ -629,6 +633,17 @@ struct signal_struct {
60445 #ifdef CONFIG_TASKSTATS
60446 struct taskstats *stats;
60447 #endif
60448 +
60449 +#ifdef CONFIG_GRKERNSEC
60450 + u32 curr_ip;
60451 + u32 saved_ip;
60452 + u32 gr_saddr;
60453 + u32 gr_daddr;
60454 + u16 gr_sport;
60455 + u16 gr_dport;
60456 + u8 used_accept:1;
60457 +#endif
60458 +
60459 #ifdef CONFIG_AUDIT
60460 unsigned audit_tty;
60461 struct tty_audit_buf *tty_audit_buf;
60462 @@ -710,6 +725,11 @@ struct user_struct {
60463 struct key *session_keyring; /* UID's default session keyring */
60464 #endif
60465
60466 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
60467 + unsigned int banned;
60468 + unsigned long ban_expires;
60469 +#endif
60470 +
60471 /* Hash table maintenance information */
60472 struct hlist_node uidhash_node;
60473 uid_t uid;
60474 @@ -1337,8 +1357,8 @@ struct task_struct {
60475 struct list_head thread_group;
60476
60477 struct completion *vfork_done; /* for vfork() */
60478 - int __user *set_child_tid; /* CLONE_CHILD_SETTID */
60479 - int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
60480 + pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
60481 + pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
60482
60483 cputime_t utime, stime, utimescaled, stimescaled;
60484 cputime_t gtime;
60485 @@ -1354,13 +1374,6 @@ struct task_struct {
60486 struct task_cputime cputime_expires;
60487 struct list_head cpu_timers[3];
60488
60489 -/* process credentials */
60490 - const struct cred __rcu *real_cred; /* objective and real subjective task
60491 - * credentials (COW) */
60492 - const struct cred __rcu *cred; /* effective (overridable) subjective task
60493 - * credentials (COW) */
60494 - struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
60495 -
60496 char comm[TASK_COMM_LEN]; /* executable name excluding path
60497 - access with [gs]et_task_comm (which lock
60498 it with task_lock())
60499 @@ -1377,8 +1390,16 @@ struct task_struct {
60500 #endif
60501 /* CPU-specific state of this task */
60502 struct thread_struct thread;
60503 +/* thread_info moved to task_struct */
60504 +#ifdef CONFIG_X86
60505 + struct thread_info tinfo;
60506 +#endif
60507 /* filesystem information */
60508 struct fs_struct *fs;
60509 +
60510 + const struct cred __rcu *cred; /* effective (overridable) subjective task
60511 + * credentials (COW) */
60512 +
60513 /* open file information */
60514 struct files_struct *files;
60515 /* namespaces */
60516 @@ -1425,6 +1446,11 @@ struct task_struct {
60517 struct rt_mutex_waiter *pi_blocked_on;
60518 #endif
60519
60520 +/* process credentials */
60521 + const struct cred __rcu *real_cred; /* objective and real subjective task
60522 + * credentials (COW) */
60523 + struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
60524 +
60525 #ifdef CONFIG_DEBUG_MUTEXES
60526 /* mutex deadlock detection */
60527 struct mutex_waiter *blocked_on;
60528 @@ -1540,6 +1566,27 @@ struct task_struct {
60529 unsigned long default_timer_slack_ns;
60530
60531 struct list_head *scm_work_list;
60532 +
60533 +#ifdef CONFIG_GRKERNSEC
60534 + /* grsecurity */
60535 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60536 + u64 exec_id;
60537 +#endif
60538 +#ifdef CONFIG_GRKERNSEC_SETXID
60539 + const struct cred *delayed_cred;
60540 +#endif
60541 + struct dentry *gr_chroot_dentry;
60542 + struct acl_subject_label *acl;
60543 + struct acl_role_label *role;
60544 + struct file *exec_file;
60545 + u16 acl_role_id;
60546 + /* is this the task that authenticated to the special role */
60547 + u8 acl_sp_role;
60548 + u8 is_writable;
60549 + u8 brute;
60550 + u8 gr_is_chrooted;
60551 +#endif
60552 +
60553 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
60554 /* Index of current stored address in ret_stack */
60555 int curr_ret_stack;
60556 @@ -1574,6 +1621,51 @@ struct task_struct {
60557 #endif
60558 };
60559
60560 +#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
60561 +#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
60562 +#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
60563 +#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
60564 +/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
60565 +#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
60566 +
60567 +#ifdef CONFIG_PAX_SOFTMODE
60568 +extern int pax_softmode;
60569 +#endif
60570 +
60571 +extern int pax_check_flags(unsigned long *);
60572 +
60573 +/* if tsk != current then task_lock must be held on it */
60574 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
60575 +static inline unsigned long pax_get_flags(struct task_struct *tsk)
60576 +{
60577 + if (likely(tsk->mm))
60578 + return tsk->mm->pax_flags;
60579 + else
60580 + return 0UL;
60581 +}
60582 +
60583 +/* if tsk != current then task_lock must be held on it */
60584 +static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
60585 +{
60586 + if (likely(tsk->mm)) {
60587 + tsk->mm->pax_flags = flags;
60588 + return 0;
60589 + }
60590 + return -EINVAL;
60591 +}
60592 +#endif
60593 +
60594 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
60595 +extern void pax_set_initial_flags(struct linux_binprm *bprm);
60596 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
60597 +extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
60598 +#endif
60599 +
60600 +extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
60601 +extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
60602 +extern void pax_report_refcount_overflow(struct pt_regs *regs);
60603 +extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
60604 +
60605 /* Future-safe accessor for struct task_struct's cpus_allowed. */
60606 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
60607
60608 @@ -2081,7 +2173,9 @@ void yield(void);
60609 extern struct exec_domain default_exec_domain;
60610
60611 union thread_union {
60612 +#ifndef CONFIG_X86
60613 struct thread_info thread_info;
60614 +#endif
60615 unsigned long stack[THREAD_SIZE/sizeof(long)];
60616 };
60617
60618 @@ -2114,6 +2208,7 @@ extern struct pid_namespace init_pid_ns;
60619 */
60620
60621 extern struct task_struct *find_task_by_vpid(pid_t nr);
60622 +extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
60623 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
60624 struct pid_namespace *ns);
60625
60626 @@ -2235,6 +2330,12 @@ static inline void mmdrop(struct mm_struct * mm)
60627 extern void mmput(struct mm_struct *);
60628 /* Grab a reference to a task's mm, if it is not already going away */
60629 extern struct mm_struct *get_task_mm(struct task_struct *task);
60630 +/*
60631 + * Grab a reference to a task's mm, if it is not already going away
60632 + * and ptrace_may_access with the mode parameter passed to it
60633 + * succeeds.
60634 + */
60635 +extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
60636 /* Remove the current tasks stale references to the old mm_struct */
60637 extern void mm_release(struct task_struct *, struct mm_struct *);
60638 /* Allocate a new mm structure and copy contents from tsk->mm */
60639 @@ -2251,7 +2352,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
60640 extern void exit_itimers(struct signal_struct *);
60641 extern void flush_itimer_signals(void);
60642
60643 -extern NORET_TYPE void do_group_exit(int);
60644 +extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
60645
60646 extern void daemonize(const char *, ...);
60647 extern int allow_signal(int);
60648 @@ -2416,13 +2517,17 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
60649
60650 #endif
60651
60652 -static inline int object_is_on_stack(void *obj)
60653 +static inline int object_starts_on_stack(void *obj)
60654 {
60655 - void *stack = task_stack_page(current);
60656 + const void *stack = task_stack_page(current);
60657
60658 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
60659 }
60660
60661 +#ifdef CONFIG_PAX_USERCOPY
60662 +extern int object_is_on_stack(const void *obj, unsigned long len);
60663 +#endif
60664 +
60665 extern void thread_info_cache_init(void);
60666
60667 #ifdef CONFIG_DEBUG_STACK_USAGE
60668 diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
60669 index 899fbb4..1cb4138 100644
60670 --- a/include/linux/screen_info.h
60671 +++ b/include/linux/screen_info.h
60672 @@ -43,7 +43,8 @@ struct screen_info {
60673 __u16 pages; /* 0x32 */
60674 __u16 vesa_attributes; /* 0x34 */
60675 __u32 capabilities; /* 0x36 */
60676 - __u8 _reserved[6]; /* 0x3a */
60677 + __u16 vesapm_size; /* 0x3a */
60678 + __u8 _reserved[4]; /* 0x3c */
60679 } __attribute__((packed));
60680
60681 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
60682 diff --git a/include/linux/security.h b/include/linux/security.h
60683 index e8c619d..e0cbd1c 100644
60684 --- a/include/linux/security.h
60685 +++ b/include/linux/security.h
60686 @@ -37,6 +37,7 @@
60687 #include <linux/xfrm.h>
60688 #include <linux/slab.h>
60689 #include <linux/xattr.h>
60690 +#include <linux/grsecurity.h>
60691 #include <net/flow.h>
60692
60693 /* Maximum number of letters for an LSM name string */
60694 diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
60695 index 0b69a46..b2ffa4c 100644
60696 --- a/include/linux/seq_file.h
60697 +++ b/include/linux/seq_file.h
60698 @@ -24,6 +24,9 @@ struct seq_file {
60699 struct mutex lock;
60700 const struct seq_operations *op;
60701 int poll_event;
60702 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60703 + u64 exec_id;
60704 +#endif
60705 void *private;
60706 };
60707
60708 @@ -33,6 +36,7 @@ struct seq_operations {
60709 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
60710 int (*show) (struct seq_file *m, void *v);
60711 };
60712 +typedef struct seq_operations __no_const seq_operations_no_const;
60713
60714 #define SEQ_SKIP 1
60715
60716 diff --git a/include/linux/shm.h b/include/linux/shm.h
60717 index 92808b8..c28cac4 100644
60718 --- a/include/linux/shm.h
60719 +++ b/include/linux/shm.h
60720 @@ -98,6 +98,10 @@ struct shmid_kernel /* private to the kernel */
60721
60722 /* The task created the shm object. NULL if the task is dead. */
60723 struct task_struct *shm_creator;
60724 +#ifdef CONFIG_GRKERNSEC
60725 + time_t shm_createtime;
60726 + pid_t shm_lapid;
60727 +#endif
60728 };
60729
60730 /* shm_mode upper byte flags */
60731 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
60732 index fe86488..1563c1c 100644
60733 --- a/include/linux/skbuff.h
60734 +++ b/include/linux/skbuff.h
60735 @@ -642,7 +642,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
60736 */
60737 static inline int skb_queue_empty(const struct sk_buff_head *list)
60738 {
60739 - return list->next == (struct sk_buff *)list;
60740 + return list->next == (const struct sk_buff *)list;
60741 }
60742
60743 /**
60744 @@ -655,7 +655,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
60745 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
60746 const struct sk_buff *skb)
60747 {
60748 - return skb->next == (struct sk_buff *)list;
60749 + return skb->next == (const struct sk_buff *)list;
60750 }
60751
60752 /**
60753 @@ -668,7 +668,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
60754 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
60755 const struct sk_buff *skb)
60756 {
60757 - return skb->prev == (struct sk_buff *)list;
60758 + return skb->prev == (const struct sk_buff *)list;
60759 }
60760
60761 /**
60762 @@ -1523,7 +1523,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
60763 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
60764 */
60765 #ifndef NET_SKB_PAD
60766 -#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
60767 +#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
60768 #endif
60769
60770 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
60771 diff --git a/include/linux/slab.h b/include/linux/slab.h
60772 index 573c809..e84c132 100644
60773 --- a/include/linux/slab.h
60774 +++ b/include/linux/slab.h
60775 @@ -11,12 +11,20 @@
60776
60777 #include <linux/gfp.h>
60778 #include <linux/types.h>
60779 +#include <linux/err.h>
60780
60781 /*
60782 * Flags to pass to kmem_cache_create().
60783 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
60784 */
60785 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
60786 +
60787 +#ifdef CONFIG_PAX_USERCOPY
60788 +#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
60789 +#else
60790 +#define SLAB_USERCOPY 0x00000000UL
60791 +#endif
60792 +
60793 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
60794 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
60795 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
60796 @@ -87,10 +95,13 @@
60797 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
60798 * Both make kfree a no-op.
60799 */
60800 -#define ZERO_SIZE_PTR ((void *)16)
60801 +#define ZERO_SIZE_PTR \
60802 +({ \
60803 + BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
60804 + (void *)(-MAX_ERRNO-1L); \
60805 +})
60806
60807 -#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
60808 - (unsigned long)ZERO_SIZE_PTR)
60809 +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
60810
60811 /*
60812 * struct kmem_cache related prototypes
60813 @@ -161,6 +172,7 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
60814 void kfree(const void *);
60815 void kzfree(const void *);
60816 size_t ksize(const void *);
60817 +void check_object_size(const void *ptr, unsigned long n, bool to);
60818
60819 /*
60820 * Allocator specific definitions. These are mainly used to establish optimized
60821 @@ -353,4 +365,59 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
60822
60823 void __init kmem_cache_init_late(void);
60824
60825 +#define kmalloc(x, y) \
60826 +({ \
60827 + void *___retval; \
60828 + intoverflow_t ___x = (intoverflow_t)x; \
60829 + if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n")) \
60830 + ___retval = NULL; \
60831 + else \
60832 + ___retval = kmalloc((size_t)___x, (y)); \
60833 + ___retval; \
60834 +})
60835 +
60836 +#define kmalloc_node(x, y, z) \
60837 +({ \
60838 + void *___retval; \
60839 + intoverflow_t ___x = (intoverflow_t)x; \
60840 + if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
60841 + ___retval = NULL; \
60842 + else \
60843 + ___retval = kmalloc_node((size_t)___x, (y), (z));\
60844 + ___retval; \
60845 +})
60846 +
60847 +#define kzalloc(x, y) \
60848 +({ \
60849 + void *___retval; \
60850 + intoverflow_t ___x = (intoverflow_t)x; \
60851 + if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n")) \
60852 + ___retval = NULL; \
60853 + else \
60854 + ___retval = kzalloc((size_t)___x, (y)); \
60855 + ___retval; \
60856 +})
60857 +
60858 +#define __krealloc(x, y, z) \
60859 +({ \
60860 + void *___retval; \
60861 + intoverflow_t ___y = (intoverflow_t)y; \
60862 + if (WARN(___y > ULONG_MAX, "__krealloc size overflow\n"))\
60863 + ___retval = NULL; \
60864 + else \
60865 + ___retval = __krealloc((x), (size_t)___y, (z)); \
60866 + ___retval; \
60867 +})
60868 +
60869 +#define krealloc(x, y, z) \
60870 +({ \
60871 + void *___retval; \
60872 + intoverflow_t ___y = (intoverflow_t)y; \
60873 + if (WARN(___y > ULONG_MAX, "krealloc size overflow\n")) \
60874 + ___retval = NULL; \
60875 + else \
60876 + ___retval = krealloc((x), (size_t)___y, (z)); \
60877 + ___retval; \
60878 +})
60879 +
60880 #endif /* _LINUX_SLAB_H */
60881 diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
60882 index d00e0ba..1b3bf7b 100644
60883 --- a/include/linux/slab_def.h
60884 +++ b/include/linux/slab_def.h
60885 @@ -68,10 +68,10 @@ struct kmem_cache {
60886 unsigned long node_allocs;
60887 unsigned long node_frees;
60888 unsigned long node_overflow;
60889 - atomic_t allochit;
60890 - atomic_t allocmiss;
60891 - atomic_t freehit;
60892 - atomic_t freemiss;
60893 + atomic_unchecked_t allochit;
60894 + atomic_unchecked_t allocmiss;
60895 + atomic_unchecked_t freehit;
60896 + atomic_unchecked_t freemiss;
60897
60898 /*
60899 * If debugging is enabled, then the allocator can add additional
60900 diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
60901 index a32bcfd..53b71f4 100644
60902 --- a/include/linux/slub_def.h
60903 +++ b/include/linux/slub_def.h
60904 @@ -89,7 +89,7 @@ struct kmem_cache {
60905 struct kmem_cache_order_objects max;
60906 struct kmem_cache_order_objects min;
60907 gfp_t allocflags; /* gfp flags to use on each alloc */
60908 - int refcount; /* Refcount for slab cache destroy */
60909 + atomic_t refcount; /* Refcount for slab cache destroy */
60910 void (*ctor)(void *);
60911 int inuse; /* Offset to metadata */
60912 int align; /* Alignment */
60913 @@ -215,7 +215,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
60914 }
60915
60916 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
60917 -void *__kmalloc(size_t size, gfp_t flags);
60918 +void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
60919
60920 static __always_inline void *
60921 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
60922 diff --git a/include/linux/sonet.h b/include/linux/sonet.h
60923 index de8832d..0147b46 100644
60924 --- a/include/linux/sonet.h
60925 +++ b/include/linux/sonet.h
60926 @@ -61,7 +61,7 @@ struct sonet_stats {
60927 #include <linux/atomic.h>
60928
60929 struct k_sonet_stats {
60930 -#define __HANDLE_ITEM(i) atomic_t i
60931 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
60932 __SONET_ITEMS
60933 #undef __HANDLE_ITEM
60934 };
60935 diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
60936 index 3d8f9c4..69f1c0a 100644
60937 --- a/include/linux/sunrpc/clnt.h
60938 +++ b/include/linux/sunrpc/clnt.h
60939 @@ -172,9 +172,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
60940 {
60941 switch (sap->sa_family) {
60942 case AF_INET:
60943 - return ntohs(((struct sockaddr_in *)sap)->sin_port);
60944 + return ntohs(((const struct sockaddr_in *)sap)->sin_port);
60945 case AF_INET6:
60946 - return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
60947 + return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
60948 }
60949 return 0;
60950 }
60951 @@ -207,7 +207,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
60952 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
60953 const struct sockaddr *src)
60954 {
60955 - const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
60956 + const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
60957 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
60958
60959 dsin->sin_family = ssin->sin_family;
60960 @@ -310,7 +310,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
60961 if (sa->sa_family != AF_INET6)
60962 return 0;
60963
60964 - return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
60965 + return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
60966 }
60967
60968 #endif /* __KERNEL__ */
60969 diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
60970 index e775689..9e206d9 100644
60971 --- a/include/linux/sunrpc/sched.h
60972 +++ b/include/linux/sunrpc/sched.h
60973 @@ -105,6 +105,7 @@ struct rpc_call_ops {
60974 void (*rpc_call_done)(struct rpc_task *, void *);
60975 void (*rpc_release)(void *);
60976 };
60977 +typedef struct rpc_call_ops __no_const rpc_call_ops_no_const;
60978
60979 struct rpc_task_setup {
60980 struct rpc_task *task;
60981 diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
60982 index c14fe86..393245e 100644
60983 --- a/include/linux/sunrpc/svc_rdma.h
60984 +++ b/include/linux/sunrpc/svc_rdma.h
60985 @@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
60986 extern unsigned int svcrdma_max_requests;
60987 extern unsigned int svcrdma_max_req_size;
60988
60989 -extern atomic_t rdma_stat_recv;
60990 -extern atomic_t rdma_stat_read;
60991 -extern atomic_t rdma_stat_write;
60992 -extern atomic_t rdma_stat_sq_starve;
60993 -extern atomic_t rdma_stat_rq_starve;
60994 -extern atomic_t rdma_stat_rq_poll;
60995 -extern atomic_t rdma_stat_rq_prod;
60996 -extern atomic_t rdma_stat_sq_poll;
60997 -extern atomic_t rdma_stat_sq_prod;
60998 +extern atomic_unchecked_t rdma_stat_recv;
60999 +extern atomic_unchecked_t rdma_stat_read;
61000 +extern atomic_unchecked_t rdma_stat_write;
61001 +extern atomic_unchecked_t rdma_stat_sq_starve;
61002 +extern atomic_unchecked_t rdma_stat_rq_starve;
61003 +extern atomic_unchecked_t rdma_stat_rq_poll;
61004 +extern atomic_unchecked_t rdma_stat_rq_prod;
61005 +extern atomic_unchecked_t rdma_stat_sq_poll;
61006 +extern atomic_unchecked_t rdma_stat_sq_prod;
61007
61008 #define RPCRDMA_VERSION 1
61009
61010 diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
61011 index 703cfa3..0b8ca72ac 100644
61012 --- a/include/linux/sysctl.h
61013 +++ b/include/linux/sysctl.h
61014 @@ -155,7 +155,11 @@ enum
61015 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
61016 };
61017
61018 -
61019 +#ifdef CONFIG_PAX_SOFTMODE
61020 +enum {
61021 + PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
61022 +};
61023 +#endif
61024
61025 /* CTL_VM names: */
61026 enum
61027 @@ -968,6 +972,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
61028
61029 extern int proc_dostring(struct ctl_table *, int,
61030 void __user *, size_t *, loff_t *);
61031 +extern int proc_dostring_modpriv(struct ctl_table *, int,
61032 + void __user *, size_t *, loff_t *);
61033 extern int proc_dointvec(struct ctl_table *, int,
61034 void __user *, size_t *, loff_t *);
61035 extern int proc_dointvec_minmax(struct ctl_table *, int,
61036 diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
61037 index a71a292..51bd91d 100644
61038 --- a/include/linux/tracehook.h
61039 +++ b/include/linux/tracehook.h
61040 @@ -54,12 +54,12 @@ struct linux_binprm;
61041 /*
61042 * ptrace report for syscall entry and exit looks identical.
61043 */
61044 -static inline void ptrace_report_syscall(struct pt_regs *regs)
61045 +static inline int ptrace_report_syscall(struct pt_regs *regs)
61046 {
61047 int ptrace = current->ptrace;
61048
61049 if (!(ptrace & PT_PTRACED))
61050 - return;
61051 + return 0;
61052
61053 ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0));
61054
61055 @@ -72,6 +72,8 @@ static inline void ptrace_report_syscall(struct pt_regs *regs)
61056 send_sig(current->exit_code, current, 1);
61057 current->exit_code = 0;
61058 }
61059 +
61060 + return fatal_signal_pending(current);
61061 }
61062
61063 /**
61064 @@ -96,8 +98,7 @@ static inline void ptrace_report_syscall(struct pt_regs *regs)
61065 static inline __must_check int tracehook_report_syscall_entry(
61066 struct pt_regs *regs)
61067 {
61068 - ptrace_report_syscall(regs);
61069 - return 0;
61070 + return ptrace_report_syscall(regs);
61071 }
61072
61073 /**
61074 diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
61075 index ff7dc08..893e1bd 100644
61076 --- a/include/linux/tty_ldisc.h
61077 +++ b/include/linux/tty_ldisc.h
61078 @@ -148,7 +148,7 @@ struct tty_ldisc_ops {
61079
61080 struct module *owner;
61081
61082 - int refcount;
61083 + atomic_t refcount;
61084 };
61085
61086 struct tty_ldisc {
61087 diff --git a/include/linux/types.h b/include/linux/types.h
61088 index 57a9723..dbe234a 100644
61089 --- a/include/linux/types.h
61090 +++ b/include/linux/types.h
61091 @@ -213,10 +213,26 @@ typedef struct {
61092 int counter;
61093 } atomic_t;
61094
61095 +#ifdef CONFIG_PAX_REFCOUNT
61096 +typedef struct {
61097 + int counter;
61098 +} atomic_unchecked_t;
61099 +#else
61100 +typedef atomic_t atomic_unchecked_t;
61101 +#endif
61102 +
61103 #ifdef CONFIG_64BIT
61104 typedef struct {
61105 long counter;
61106 } atomic64_t;
61107 +
61108 +#ifdef CONFIG_PAX_REFCOUNT
61109 +typedef struct {
61110 + long counter;
61111 +} atomic64_unchecked_t;
61112 +#else
61113 +typedef atomic64_t atomic64_unchecked_t;
61114 +#endif
61115 #endif
61116
61117 struct list_head {
61118 diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
61119 index 5ca0951..ab496a5 100644
61120 --- a/include/linux/uaccess.h
61121 +++ b/include/linux/uaccess.h
61122 @@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
61123 long ret; \
61124 mm_segment_t old_fs = get_fs(); \
61125 \
61126 - set_fs(KERNEL_DS); \
61127 pagefault_disable(); \
61128 - ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
61129 - pagefault_enable(); \
61130 + set_fs(KERNEL_DS); \
61131 + ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
61132 set_fs(old_fs); \
61133 + pagefault_enable(); \
61134 ret; \
61135 })
61136
61137 diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
61138 index 99c1b4d..bb94261 100644
61139 --- a/include/linux/unaligned/access_ok.h
61140 +++ b/include/linux/unaligned/access_ok.h
61141 @@ -6,32 +6,32 @@
61142
61143 static inline u16 get_unaligned_le16(const void *p)
61144 {
61145 - return le16_to_cpup((__le16 *)p);
61146 + return le16_to_cpup((const __le16 *)p);
61147 }
61148
61149 static inline u32 get_unaligned_le32(const void *p)
61150 {
61151 - return le32_to_cpup((__le32 *)p);
61152 + return le32_to_cpup((const __le32 *)p);
61153 }
61154
61155 static inline u64 get_unaligned_le64(const void *p)
61156 {
61157 - return le64_to_cpup((__le64 *)p);
61158 + return le64_to_cpup((const __le64 *)p);
61159 }
61160
61161 static inline u16 get_unaligned_be16(const void *p)
61162 {
61163 - return be16_to_cpup((__be16 *)p);
61164 + return be16_to_cpup((const __be16 *)p);
61165 }
61166
61167 static inline u32 get_unaligned_be32(const void *p)
61168 {
61169 - return be32_to_cpup((__be32 *)p);
61170 + return be32_to_cpup((const __be32 *)p);
61171 }
61172
61173 static inline u64 get_unaligned_be64(const void *p)
61174 {
61175 - return be64_to_cpup((__be64 *)p);
61176 + return be64_to_cpup((const __be64 *)p);
61177 }
61178
61179 static inline void put_unaligned_le16(u16 val, void *p)
61180 diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
61181 index e5a40c3..20ab0f6 100644
61182 --- a/include/linux/usb/renesas_usbhs.h
61183 +++ b/include/linux/usb/renesas_usbhs.h
61184 @@ -39,7 +39,7 @@ enum {
61185 */
61186 struct renesas_usbhs_driver_callback {
61187 int (*notify_hotplug)(struct platform_device *pdev);
61188 -};
61189 +} __no_const;
61190
61191 /*
61192 * callback functions for platform
61193 @@ -89,7 +89,7 @@ struct renesas_usbhs_platform_callback {
61194 * VBUS control is needed for Host
61195 */
61196 int (*set_vbus)(struct platform_device *pdev, int enable);
61197 -};
61198 +} __no_const;
61199
61200 /*
61201 * parameters for renesas usbhs
61202 diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
61203 index 6f8fbcf..8259001 100644
61204 --- a/include/linux/vermagic.h
61205 +++ b/include/linux/vermagic.h
61206 @@ -25,9 +25,35 @@
61207 #define MODULE_ARCH_VERMAGIC ""
61208 #endif
61209
61210 +#ifdef CONFIG_PAX_REFCOUNT
61211 +#define MODULE_PAX_REFCOUNT "REFCOUNT "
61212 +#else
61213 +#define MODULE_PAX_REFCOUNT ""
61214 +#endif
61215 +
61216 +#ifdef CONSTIFY_PLUGIN
61217 +#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
61218 +#else
61219 +#define MODULE_CONSTIFY_PLUGIN ""
61220 +#endif
61221 +
61222 +#ifdef STACKLEAK_PLUGIN
61223 +#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
61224 +#else
61225 +#define MODULE_STACKLEAK_PLUGIN ""
61226 +#endif
61227 +
61228 +#ifdef CONFIG_GRKERNSEC
61229 +#define MODULE_GRSEC "GRSEC "
61230 +#else
61231 +#define MODULE_GRSEC ""
61232 +#endif
61233 +
61234 #define VERMAGIC_STRING \
61235 UTS_RELEASE " " \
61236 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
61237 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
61238 - MODULE_ARCH_VERMAGIC
61239 + MODULE_ARCH_VERMAGIC \
61240 + MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
61241 + MODULE_GRSEC
61242
61243 diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
61244 index 4bde182..aec92c1 100644
61245 --- a/include/linux/vmalloc.h
61246 +++ b/include/linux/vmalloc.h
61247 @@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
61248 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
61249 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
61250 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
61251 +
61252 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
61253 +#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
61254 +#endif
61255 +
61256 /* bits [20..32] reserved for arch specific ioremap internals */
61257
61258 /*
61259 @@ -156,4 +161,103 @@ pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
61260 # endif
61261 #endif
61262
61263 +#define vmalloc(x) \
61264 +({ \
61265 + void *___retval; \
61266 + intoverflow_t ___x = (intoverflow_t)x; \
61267 + if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
61268 + ___retval = NULL; \
61269 + else \
61270 + ___retval = vmalloc((unsigned long)___x); \
61271 + ___retval; \
61272 +})
61273 +
61274 +#define vzalloc(x) \
61275 +({ \
61276 + void *___retval; \
61277 + intoverflow_t ___x = (intoverflow_t)x; \
61278 + if (WARN(___x > ULONG_MAX, "vzalloc size overflow\n")) \
61279 + ___retval = NULL; \
61280 + else \
61281 + ___retval = vzalloc((unsigned long)___x); \
61282 + ___retval; \
61283 +})
61284 +
61285 +#define __vmalloc(x, y, z) \
61286 +({ \
61287 + void *___retval; \
61288 + intoverflow_t ___x = (intoverflow_t)x; \
61289 + if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
61290 + ___retval = NULL; \
61291 + else \
61292 + ___retval = __vmalloc((unsigned long)___x, (y), (z));\
61293 + ___retval; \
61294 +})
61295 +
61296 +#define vmalloc_user(x) \
61297 +({ \
61298 + void *___retval; \
61299 + intoverflow_t ___x = (intoverflow_t)x; \
61300 + if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
61301 + ___retval = NULL; \
61302 + else \
61303 + ___retval = vmalloc_user((unsigned long)___x); \
61304 + ___retval; \
61305 +})
61306 +
61307 +#define vmalloc_exec(x) \
61308 +({ \
61309 + void *___retval; \
61310 + intoverflow_t ___x = (intoverflow_t)x; \
61311 + if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
61312 + ___retval = NULL; \
61313 + else \
61314 + ___retval = vmalloc_exec((unsigned long)___x); \
61315 + ___retval; \
61316 +})
61317 +
61318 +#define vmalloc_node(x, y) \
61319 +({ \
61320 + void *___retval; \
61321 + intoverflow_t ___x = (intoverflow_t)x; \
61322 + if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
61323 + ___retval = NULL; \
61324 + else \
61325 + ___retval = vmalloc_node((unsigned long)___x, (y));\
61326 + ___retval; \
61327 +})
61328 +
61329 +#define vzalloc_node(x, y) \
61330 +({ \
61331 + void *___retval; \
61332 + intoverflow_t ___x = (intoverflow_t)x; \
61333 + if (WARN(___x > ULONG_MAX, "vzalloc_node size overflow\n"))\
61334 + ___retval = NULL; \
61335 + else \
61336 + ___retval = vzalloc_node((unsigned long)___x, (y));\
61337 + ___retval; \
61338 +})
61339 +
61340 +#define vmalloc_32(x) \
61341 +({ \
61342 + void *___retval; \
61343 + intoverflow_t ___x = (intoverflow_t)x; \
61344 + if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
61345 + ___retval = NULL; \
61346 + else \
61347 + ___retval = vmalloc_32((unsigned long)___x); \
61348 + ___retval; \
61349 +})
61350 +
61351 +#define vmalloc_32_user(x) \
61352 +({ \
61353 +void *___retval; \
61354 + intoverflow_t ___x = (intoverflow_t)x; \
61355 + if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
61356 + ___retval = NULL; \
61357 + else \
61358 + ___retval = vmalloc_32_user((unsigned long)___x);\
61359 + ___retval; \
61360 +})
61361 +
61362 #endif /* _LINUX_VMALLOC_H */
61363 diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
61364 index 65efb92..137adbb 100644
61365 --- a/include/linux/vmstat.h
61366 +++ b/include/linux/vmstat.h
61367 @@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(int cpu)
61368 /*
61369 * Zone based page accounting with per cpu differentials.
61370 */
61371 -extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
61372 +extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
61373
61374 static inline void zone_page_state_add(long x, struct zone *zone,
61375 enum zone_stat_item item)
61376 {
61377 - atomic_long_add(x, &zone->vm_stat[item]);
61378 - atomic_long_add(x, &vm_stat[item]);
61379 + atomic_long_add_unchecked(x, &zone->vm_stat[item]);
61380 + atomic_long_add_unchecked(x, &vm_stat[item]);
61381 }
61382
61383 static inline unsigned long global_page_state(enum zone_stat_item item)
61384 {
61385 - long x = atomic_long_read(&vm_stat[item]);
61386 + long x = atomic_long_read_unchecked(&vm_stat[item]);
61387 #ifdef CONFIG_SMP
61388 if (x < 0)
61389 x = 0;
61390 @@ -109,7 +109,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
61391 static inline unsigned long zone_page_state(struct zone *zone,
61392 enum zone_stat_item item)
61393 {
61394 - long x = atomic_long_read(&zone->vm_stat[item]);
61395 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
61396 #ifdef CONFIG_SMP
61397 if (x < 0)
61398 x = 0;
61399 @@ -126,7 +126,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
61400 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
61401 enum zone_stat_item item)
61402 {
61403 - long x = atomic_long_read(&zone->vm_stat[item]);
61404 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
61405
61406 #ifdef CONFIG_SMP
61407 int cpu;
61408 @@ -221,8 +221,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
61409
61410 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
61411 {
61412 - atomic_long_inc(&zone->vm_stat[item]);
61413 - atomic_long_inc(&vm_stat[item]);
61414 + atomic_long_inc_unchecked(&zone->vm_stat[item]);
61415 + atomic_long_inc_unchecked(&vm_stat[item]);
61416 }
61417
61418 static inline void __inc_zone_page_state(struct page *page,
61419 @@ -233,8 +233,8 @@ static inline void __inc_zone_page_state(struct page *page,
61420
61421 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
61422 {
61423 - atomic_long_dec(&zone->vm_stat[item]);
61424 - atomic_long_dec(&vm_stat[item]);
61425 + atomic_long_dec_unchecked(&zone->vm_stat[item]);
61426 + atomic_long_dec_unchecked(&vm_stat[item]);
61427 }
61428
61429 static inline void __dec_zone_page_state(struct page *page,
61430 diff --git a/include/linux/xattr.h b/include/linux/xattr.h
61431 index e5d1220..ef6e406 100644
61432 --- a/include/linux/xattr.h
61433 +++ b/include/linux/xattr.h
61434 @@ -57,6 +57,11 @@
61435 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
61436 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
61437
61438 +/* User namespace */
61439 +#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
61440 +#define XATTR_PAX_FLAGS_SUFFIX "flags"
61441 +#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
61442 +
61443 #ifdef __KERNEL__
61444
61445 #include <linux/types.h>
61446 diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
61447 index 4aeff96..b378cdc 100644
61448 --- a/include/media/saa7146_vv.h
61449 +++ b/include/media/saa7146_vv.h
61450 @@ -163,7 +163,7 @@ struct saa7146_ext_vv
61451 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
61452
61453 /* the extension can override this */
61454 - struct v4l2_ioctl_ops ops;
61455 + v4l2_ioctl_ops_no_const ops;
61456 /* pointer to the saa7146 core ops */
61457 const struct v4l2_ioctl_ops *core_ops;
61458
61459 diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
61460 index c7c40f1..4f01585 100644
61461 --- a/include/media/v4l2-dev.h
61462 +++ b/include/media/v4l2-dev.h
61463 @@ -56,7 +56,7 @@ int v4l2_prio_check(struct v4l2_prio_state *global, enum v4l2_priority local);
61464
61465
61466 struct v4l2_file_operations {
61467 - struct module *owner;
61468 + struct module * const owner;
61469 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
61470 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
61471 unsigned int (*poll) (struct file *, struct poll_table_struct *);
61472 @@ -68,6 +68,7 @@ struct v4l2_file_operations {
61473 int (*open) (struct file *);
61474 int (*release) (struct file *);
61475 };
61476 +typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
61477
61478 /*
61479 * Newer version of video_device, handled by videodev2.c
61480 diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
61481 index 4d1c74a..65e1221 100644
61482 --- a/include/media/v4l2-ioctl.h
61483 +++ b/include/media/v4l2-ioctl.h
61484 @@ -274,7 +274,7 @@ struct v4l2_ioctl_ops {
61485 long (*vidioc_default) (struct file *file, void *fh,
61486 bool valid_prio, int cmd, void *arg);
61487 };
61488 -
61489 +typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
61490
61491 /* v4l debugging and diagnostics */
61492
61493 diff --git a/include/net/caif/caif_hsi.h b/include/net/caif/caif_hsi.h
61494 index 8d55251..dfe5b0a 100644
61495 --- a/include/net/caif/caif_hsi.h
61496 +++ b/include/net/caif/caif_hsi.h
61497 @@ -98,7 +98,7 @@ struct cfhsi_drv {
61498 void (*rx_done_cb) (struct cfhsi_drv *drv);
61499 void (*wake_up_cb) (struct cfhsi_drv *drv);
61500 void (*wake_down_cb) (struct cfhsi_drv *drv);
61501 -};
61502 +} __no_const;
61503
61504 /* Structure implemented by HSI device. */
61505 struct cfhsi_dev {
61506 diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
61507 index 9e5425b..8136ffc 100644
61508 --- a/include/net/caif/cfctrl.h
61509 +++ b/include/net/caif/cfctrl.h
61510 @@ -52,7 +52,7 @@ struct cfctrl_rsp {
61511 void (*radioset_rsp)(void);
61512 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
61513 struct cflayer *client_layer);
61514 -};
61515 +} __no_const;
61516
61517 /* Link Setup Parameters for CAIF-Links. */
61518 struct cfctrl_link_param {
61519 @@ -101,8 +101,8 @@ struct cfctrl_request_info {
61520 struct cfctrl {
61521 struct cfsrvl serv;
61522 struct cfctrl_rsp res;
61523 - atomic_t req_seq_no;
61524 - atomic_t rsp_seq_no;
61525 + atomic_unchecked_t req_seq_no;
61526 + atomic_unchecked_t rsp_seq_no;
61527 struct list_head list;
61528 /* Protects from simultaneous access to first_req list */
61529 spinlock_t info_list_lock;
61530 diff --git a/include/net/flow.h b/include/net/flow.h
61531 index 57f15a7..0de26c6 100644
61532 --- a/include/net/flow.h
61533 +++ b/include/net/flow.h
61534 @@ -208,6 +208,6 @@ extern struct flow_cache_object *flow_cache_lookup(
61535
61536 extern void flow_cache_flush(void);
61537 extern void flow_cache_flush_deferred(void);
61538 -extern atomic_t flow_cache_genid;
61539 +extern atomic_unchecked_t flow_cache_genid;
61540
61541 #endif
61542 diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
61543 index e9ff3fc..9d3e5c7 100644
61544 --- a/include/net/inetpeer.h
61545 +++ b/include/net/inetpeer.h
61546 @@ -48,8 +48,8 @@ struct inet_peer {
61547 */
61548 union {
61549 struct {
61550 - atomic_t rid; /* Frag reception counter */
61551 - atomic_t ip_id_count; /* IP ID for the next packet */
61552 + atomic_unchecked_t rid; /* Frag reception counter */
61553 + atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
61554 __u32 tcp_ts;
61555 __u32 tcp_ts_stamp;
61556 };
61557 @@ -113,11 +113,11 @@ static inline int inet_getid(struct inet_peer *p, int more)
61558 more++;
61559 inet_peer_refcheck(p);
61560 do {
61561 - old = atomic_read(&p->ip_id_count);
61562 + old = atomic_read_unchecked(&p->ip_id_count);
61563 new = old + more;
61564 if (!new)
61565 new = 1;
61566 - } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
61567 + } while (atomic_cmpxchg_unchecked(&p->ip_id_count, old, new) != old);
61568 return new;
61569 }
61570
61571 diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
61572 index 10422ef..662570f 100644
61573 --- a/include/net/ip_fib.h
61574 +++ b/include/net/ip_fib.h
61575 @@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
61576
61577 #define FIB_RES_SADDR(net, res) \
61578 ((FIB_RES_NH(res).nh_saddr_genid == \
61579 - atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
61580 + atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
61581 FIB_RES_NH(res).nh_saddr : \
61582 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
61583 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
61584 diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
61585 index e5a7b9a..f4fc44b 100644
61586 --- a/include/net/ip_vs.h
61587 +++ b/include/net/ip_vs.h
61588 @@ -509,7 +509,7 @@ struct ip_vs_conn {
61589 struct ip_vs_conn *control; /* Master control connection */
61590 atomic_t n_control; /* Number of controlled ones */
61591 struct ip_vs_dest *dest; /* real server */
61592 - atomic_t in_pkts; /* incoming packet counter */
61593 + atomic_unchecked_t in_pkts; /* incoming packet counter */
61594
61595 /* packet transmitter for different forwarding methods. If it
61596 mangles the packet, it must return NF_DROP or better NF_STOLEN,
61597 @@ -647,7 +647,7 @@ struct ip_vs_dest {
61598 __be16 port; /* port number of the server */
61599 union nf_inet_addr addr; /* IP address of the server */
61600 volatile unsigned flags; /* dest status flags */
61601 - atomic_t conn_flags; /* flags to copy to conn */
61602 + atomic_unchecked_t conn_flags; /* flags to copy to conn */
61603 atomic_t weight; /* server weight */
61604
61605 atomic_t refcnt; /* reference counter */
61606 diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h
61607 index 69b610a..fe3962c 100644
61608 --- a/include/net/irda/ircomm_core.h
61609 +++ b/include/net/irda/ircomm_core.h
61610 @@ -51,7 +51,7 @@ typedef struct {
61611 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
61612 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
61613 struct ircomm_info *);
61614 -} call_t;
61615 +} __no_const call_t;
61616
61617 struct ircomm_cb {
61618 irda_queue_t queue;
61619 diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
61620 index 59ba38bc..d515662 100644
61621 --- a/include/net/irda/ircomm_tty.h
61622 +++ b/include/net/irda/ircomm_tty.h
61623 @@ -35,6 +35,7 @@
61624 #include <linux/termios.h>
61625 #include <linux/timer.h>
61626 #include <linux/tty.h> /* struct tty_struct */
61627 +#include <asm/local.h>
61628
61629 #include <net/irda/irias_object.h>
61630 #include <net/irda/ircomm_core.h>
61631 @@ -105,8 +106,8 @@ struct ircomm_tty_cb {
61632 unsigned short close_delay;
61633 unsigned short closing_wait; /* time to wait before closing */
61634
61635 - int open_count;
61636 - int blocked_open; /* # of blocked opens */
61637 + local_t open_count;
61638 + local_t blocked_open; /* # of blocked opens */
61639
61640 /* Protect concurent access to :
61641 * o self->open_count
61642 diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
61643 index f2419cf..473679f 100644
61644 --- a/include/net/iucv/af_iucv.h
61645 +++ b/include/net/iucv/af_iucv.h
61646 @@ -139,7 +139,7 @@ struct iucv_sock {
61647 struct iucv_sock_list {
61648 struct hlist_head head;
61649 rwlock_t lock;
61650 - atomic_t autobind_name;
61651 + atomic_unchecked_t autobind_name;
61652 };
61653
61654 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
61655 diff --git a/include/net/neighbour.h b/include/net/neighbour.h
61656 index 2720884..3aa5c25 100644
61657 --- a/include/net/neighbour.h
61658 +++ b/include/net/neighbour.h
61659 @@ -122,7 +122,7 @@ struct neigh_ops {
61660 void (*error_report)(struct neighbour *, struct sk_buff *);
61661 int (*output)(struct neighbour *, struct sk_buff *);
61662 int (*connected_output)(struct neighbour *, struct sk_buff *);
61663 -};
61664 +} __do_const;
61665
61666 struct pneigh_entry {
61667 struct pneigh_entry *next;
61668 diff --git a/include/net/netlink.h b/include/net/netlink.h
61669 index cb1f350..3279d2c 100644
61670 --- a/include/net/netlink.h
61671 +++ b/include/net/netlink.h
61672 @@ -569,7 +569,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
61673 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
61674 {
61675 if (mark)
61676 - skb_trim(skb, (unsigned char *) mark - skb->data);
61677 + skb_trim(skb, (const unsigned char *) mark - skb->data);
61678 }
61679
61680 /**
61681 diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
61682 index d786b4f..4c3dd41 100644
61683 --- a/include/net/netns/ipv4.h
61684 +++ b/include/net/netns/ipv4.h
61685 @@ -56,8 +56,8 @@ struct netns_ipv4 {
61686
61687 unsigned int sysctl_ping_group_range[2];
61688
61689 - atomic_t rt_genid;
61690 - atomic_t dev_addr_genid;
61691 + atomic_unchecked_t rt_genid;
61692 + atomic_unchecked_t dev_addr_genid;
61693
61694 #ifdef CONFIG_IP_MROUTE
61695 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
61696 diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
61697 index 6a72a58..e6a127d 100644
61698 --- a/include/net/sctp/sctp.h
61699 +++ b/include/net/sctp/sctp.h
61700 @@ -318,9 +318,9 @@ do { \
61701
61702 #else /* SCTP_DEBUG */
61703
61704 -#define SCTP_DEBUG_PRINTK(whatever...)
61705 -#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
61706 -#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
61707 +#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
61708 +#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
61709 +#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
61710 #define SCTP_ENABLE_DEBUG
61711 #define SCTP_DISABLE_DEBUG
61712 #define SCTP_ASSERT(expr, str, func)
61713 diff --git a/include/net/sock.h b/include/net/sock.h
61714 index 32e3937..87a1dbc 100644
61715 --- a/include/net/sock.h
61716 +++ b/include/net/sock.h
61717 @@ -277,7 +277,7 @@ struct sock {
61718 #ifdef CONFIG_RPS
61719 __u32 sk_rxhash;
61720 #endif
61721 - atomic_t sk_drops;
61722 + atomic_unchecked_t sk_drops;
61723 int sk_rcvbuf;
61724
61725 struct sk_filter __rcu *sk_filter;
61726 @@ -1402,7 +1402,7 @@ static inline void sk_nocaps_add(struct sock *sk, int flags)
61727 }
61728
61729 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
61730 - char __user *from, char *to,
61731 + char __user *from, unsigned char *to,
61732 int copy, int offset)
61733 {
61734 if (skb->ip_summed == CHECKSUM_NONE) {
61735 diff --git a/include/net/tcp.h b/include/net/tcp.h
61736 index bb18c4d..bb87972 100644
61737 --- a/include/net/tcp.h
61738 +++ b/include/net/tcp.h
61739 @@ -1409,7 +1409,7 @@ struct tcp_seq_afinfo {
61740 char *name;
61741 sa_family_t family;
61742 const struct file_operations *seq_fops;
61743 - struct seq_operations seq_ops;
61744 + seq_operations_no_const seq_ops;
61745 };
61746
61747 struct tcp_iter_state {
61748 diff --git a/include/net/udp.h b/include/net/udp.h
61749 index 3b285f4..0219639 100644
61750 --- a/include/net/udp.h
61751 +++ b/include/net/udp.h
61752 @@ -237,7 +237,7 @@ struct udp_seq_afinfo {
61753 sa_family_t family;
61754 struct udp_table *udp_table;
61755 const struct file_operations *seq_fops;
61756 - struct seq_operations seq_ops;
61757 + seq_operations_no_const seq_ops;
61758 };
61759
61760 struct udp_iter_state {
61761 diff --git a/include/net/xfrm.h b/include/net/xfrm.h
61762 index b203e14..1df3991 100644
61763 --- a/include/net/xfrm.h
61764 +++ b/include/net/xfrm.h
61765 @@ -505,7 +505,7 @@ struct xfrm_policy {
61766 struct timer_list timer;
61767
61768 struct flow_cache_object flo;
61769 - atomic_t genid;
61770 + atomic_unchecked_t genid;
61771 u32 priority;
61772 u32 index;
61773 struct xfrm_mark mark;
61774 diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
61775 index 1a046b1..ee0bef0 100644
61776 --- a/include/rdma/iw_cm.h
61777 +++ b/include/rdma/iw_cm.h
61778 @@ -122,7 +122,7 @@ struct iw_cm_verbs {
61779 int backlog);
61780
61781 int (*destroy_listen)(struct iw_cm_id *cm_id);
61782 -};
61783 +} __no_const;
61784
61785 /**
61786 * iw_create_cm_id - Create an IW CM identifier.
61787 diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
61788 index 5d1a758..1dbf795 100644
61789 --- a/include/scsi/libfc.h
61790 +++ b/include/scsi/libfc.h
61791 @@ -748,6 +748,7 @@ struct libfc_function_template {
61792 */
61793 void (*disc_stop_final) (struct fc_lport *);
61794 };
61795 +typedef struct libfc_function_template __no_const libfc_function_template_no_const;
61796
61797 /**
61798 * struct fc_disc - Discovery context
61799 @@ -851,7 +852,7 @@ struct fc_lport {
61800 struct fc_vport *vport;
61801
61802 /* Operational Information */
61803 - struct libfc_function_template tt;
61804 + libfc_function_template_no_const tt;
61805 u8 link_up;
61806 u8 qfull;
61807 enum fc_lport_state state;
61808 diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
61809 index 5591ed5..13eb457 100644
61810 --- a/include/scsi/scsi_device.h
61811 +++ b/include/scsi/scsi_device.h
61812 @@ -161,9 +161,9 @@ struct scsi_device {
61813 unsigned int max_device_blocked; /* what device_blocked counts down from */
61814 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
61815
61816 - atomic_t iorequest_cnt;
61817 - atomic_t iodone_cnt;
61818 - atomic_t ioerr_cnt;
61819 + atomic_unchecked_t iorequest_cnt;
61820 + atomic_unchecked_t iodone_cnt;
61821 + atomic_unchecked_t ioerr_cnt;
61822
61823 struct device sdev_gendev,
61824 sdev_dev;
61825 diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
61826 index 2a65167..91e01f8 100644
61827 --- a/include/scsi/scsi_transport_fc.h
61828 +++ b/include/scsi/scsi_transport_fc.h
61829 @@ -711,7 +711,7 @@ struct fc_function_template {
61830 unsigned long show_host_system_hostname:1;
61831
61832 unsigned long disable_target_scan:1;
61833 -};
61834 +} __do_const;
61835
61836
61837 /**
61838 diff --git a/include/sound/ak4xxx-adda.h b/include/sound/ak4xxx-adda.h
61839 index 030b87c..98a6954 100644
61840 --- a/include/sound/ak4xxx-adda.h
61841 +++ b/include/sound/ak4xxx-adda.h
61842 @@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
61843 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
61844 unsigned char val);
61845 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
61846 -};
61847 +} __no_const;
61848
61849 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
61850
61851 diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
61852 index 8c05e47..2b5df97 100644
61853 --- a/include/sound/hwdep.h
61854 +++ b/include/sound/hwdep.h
61855 @@ -49,7 +49,7 @@ struct snd_hwdep_ops {
61856 struct snd_hwdep_dsp_status *status);
61857 int (*dsp_load)(struct snd_hwdep *hw,
61858 struct snd_hwdep_dsp_image *image);
61859 -};
61860 +} __no_const;
61861
61862 struct snd_hwdep {
61863 struct snd_card *card;
61864 diff --git a/include/sound/info.h b/include/sound/info.h
61865 index 5492cc4..1a65278 100644
61866 --- a/include/sound/info.h
61867 +++ b/include/sound/info.h
61868 @@ -44,7 +44,7 @@ struct snd_info_entry_text {
61869 struct snd_info_buffer *buffer);
61870 void (*write)(struct snd_info_entry *entry,
61871 struct snd_info_buffer *buffer);
61872 -};
61873 +} __no_const;
61874
61875 struct snd_info_entry_ops {
61876 int (*open)(struct snd_info_entry *entry,
61877 diff --git a/include/sound/pcm.h b/include/sound/pcm.h
61878 index 0cf91b2..b70cae4 100644
61879 --- a/include/sound/pcm.h
61880 +++ b/include/sound/pcm.h
61881 @@ -81,6 +81,7 @@ struct snd_pcm_ops {
61882 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
61883 int (*ack)(struct snd_pcm_substream *substream);
61884 };
61885 +typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
61886
61887 /*
61888 *
61889 diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h
61890 index af1b49e..a5d55a5 100644
61891 --- a/include/sound/sb16_csp.h
61892 +++ b/include/sound/sb16_csp.h
61893 @@ -146,7 +146,7 @@ struct snd_sb_csp_ops {
61894 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
61895 int (*csp_stop) (struct snd_sb_csp * p);
61896 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
61897 -};
61898 +} __no_const;
61899
61900 /*
61901 * CSP private data
61902 diff --git a/include/sound/soc.h b/include/sound/soc.h
61903 index 11cfb59..e3f93f4 100644
61904 --- a/include/sound/soc.h
61905 +++ b/include/sound/soc.h
61906 @@ -683,7 +683,7 @@ struct snd_soc_platform_driver {
61907 /* platform IO - used for platform DAPM */
61908 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
61909 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
61910 -};
61911 +} __do_const;
61912
61913 struct snd_soc_platform {
61914 const char *name;
61915 diff --git a/include/sound/ymfpci.h b/include/sound/ymfpci.h
61916 index 444cd6b..3327cc5 100644
61917 --- a/include/sound/ymfpci.h
61918 +++ b/include/sound/ymfpci.h
61919 @@ -358,7 +358,7 @@ struct snd_ymfpci {
61920 spinlock_t reg_lock;
61921 spinlock_t voice_lock;
61922 wait_queue_head_t interrupt_sleep;
61923 - atomic_t interrupt_sleep_count;
61924 + atomic_unchecked_t interrupt_sleep_count;
61925 struct snd_info_entry *proc_entry;
61926 const struct firmware *dsp_microcode;
61927 const struct firmware *controller_microcode;
61928 diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
61929 index a79886c..b483af6 100644
61930 --- a/include/target/target_core_base.h
61931 +++ b/include/target/target_core_base.h
61932 @@ -346,7 +346,7 @@ struct t10_reservation_ops {
61933 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
61934 int (*t10_pr_register)(struct se_cmd *);
61935 int (*t10_pr_clear)(struct se_cmd *);
61936 -};
61937 +} __no_const;
61938
61939 struct t10_reservation {
61940 /* Reservation effects all target ports */
61941 @@ -465,8 +465,8 @@ struct se_cmd {
61942 atomic_t t_se_count;
61943 atomic_t t_task_cdbs_left;
61944 atomic_t t_task_cdbs_ex_left;
61945 - atomic_t t_task_cdbs_sent;
61946 - atomic_t t_transport_aborted;
61947 + atomic_unchecked_t t_task_cdbs_sent;
61948 + atomic_unchecked_t t_transport_aborted;
61949 atomic_t t_transport_active;
61950 atomic_t t_transport_complete;
61951 atomic_t t_transport_queue_active;
61952 @@ -704,7 +704,7 @@ struct se_device {
61953 /* Active commands on this virtual SE device */
61954 atomic_t simple_cmds;
61955 atomic_t depth_left;
61956 - atomic_t dev_ordered_id;
61957 + atomic_unchecked_t dev_ordered_id;
61958 atomic_t execute_tasks;
61959 atomic_t dev_ordered_sync;
61960 atomic_t dev_qf_count;
61961 diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
61962 index 1c09820..7f5ec79 100644
61963 --- a/include/trace/events/irq.h
61964 +++ b/include/trace/events/irq.h
61965 @@ -36,7 +36,7 @@ struct softirq_action;
61966 */
61967 TRACE_EVENT(irq_handler_entry,
61968
61969 - TP_PROTO(int irq, struct irqaction *action),
61970 + TP_PROTO(int irq, const struct irqaction *action),
61971
61972 TP_ARGS(irq, action),
61973
61974 @@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
61975 */
61976 TRACE_EVENT(irq_handler_exit,
61977
61978 - TP_PROTO(int irq, struct irqaction *action, int ret),
61979 + TP_PROTO(int irq, const struct irqaction *action, int ret),
61980
61981 TP_ARGS(irq, action, ret),
61982
61983 diff --git a/include/video/udlfb.h b/include/video/udlfb.h
61984 index c41f308..6918de3 100644
61985 --- a/include/video/udlfb.h
61986 +++ b/include/video/udlfb.h
61987 @@ -52,10 +52,10 @@ struct dlfb_data {
61988 u32 pseudo_palette[256];
61989 int blank_mode; /*one of FB_BLANK_ */
61990 /* blit-only rendering path metrics, exposed through sysfs */
61991 - atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
61992 - atomic_t bytes_identical; /* saved effort with backbuffer comparison */
61993 - atomic_t bytes_sent; /* to usb, after compression including overhead */
61994 - atomic_t cpu_kcycles_used; /* transpired during pixel processing */
61995 + atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
61996 + atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
61997 + atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
61998 + atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
61999 };
62000
62001 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
62002 diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
62003 index 0993a22..32ba2fe 100644
62004 --- a/include/video/uvesafb.h
62005 +++ b/include/video/uvesafb.h
62006 @@ -177,6 +177,7 @@ struct uvesafb_par {
62007 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
62008 u8 pmi_setpal; /* PMI for palette changes */
62009 u16 *pmi_base; /* protected mode interface location */
62010 + u8 *pmi_code; /* protected mode code location */
62011 void *pmi_start;
62012 void *pmi_pal;
62013 u8 *vbe_state_orig; /*
62014 diff --git a/init/Kconfig b/init/Kconfig
62015 index 43298f9..2f56c12 100644
62016 --- a/init/Kconfig
62017 +++ b/init/Kconfig
62018 @@ -1214,7 +1214,7 @@ config SLUB_DEBUG
62019
62020 config COMPAT_BRK
62021 bool "Disable heap randomization"
62022 - default y
62023 + default n
62024 help
62025 Randomizing heap placement makes heap exploits harder, but it
62026 also breaks ancient binaries (including anything libc5 based).
62027 diff --git a/init/do_mounts.c b/init/do_mounts.c
62028 index db6e5ee..7677ff7 100644
62029 --- a/init/do_mounts.c
62030 +++ b/init/do_mounts.c
62031 @@ -325,11 +325,11 @@ static void __init get_fs_names(char *page)
62032
62033 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
62034 {
62035 - int err = sys_mount(name, "/root", fs, flags, data);
62036 + int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
62037 if (err)
62038 return err;
62039
62040 - sys_chdir((const char __user __force *)"/root");
62041 + sys_chdir((const char __force_user*)"/root");
62042 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
62043 printk(KERN_INFO
62044 "VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
62045 @@ -448,18 +448,18 @@ void __init change_floppy(char *fmt, ...)
62046 va_start(args, fmt);
62047 vsprintf(buf, fmt, args);
62048 va_end(args);
62049 - fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
62050 + fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
62051 if (fd >= 0) {
62052 sys_ioctl(fd, FDEJECT, 0);
62053 sys_close(fd);
62054 }
62055 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
62056 - fd = sys_open("/dev/console", O_RDWR, 0);
62057 + fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
62058 if (fd >= 0) {
62059 sys_ioctl(fd, TCGETS, (long)&termios);
62060 termios.c_lflag &= ~ICANON;
62061 sys_ioctl(fd, TCSETSF, (long)&termios);
62062 - sys_read(fd, &c, 1);
62063 + sys_read(fd, (char __user *)&c, 1);
62064 termios.c_lflag |= ICANON;
62065 sys_ioctl(fd, TCSETSF, (long)&termios);
62066 sys_close(fd);
62067 @@ -553,6 +553,6 @@ void __init prepare_namespace(void)
62068 mount_root();
62069 out:
62070 devtmpfs_mount("dev");
62071 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
62072 - sys_chroot((const char __user __force *)".");
62073 + sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
62074 + sys_chroot((const char __force_user *)".");
62075 }
62076 diff --git a/init/do_mounts.h b/init/do_mounts.h
62077 index f5b978a..69dbfe8 100644
62078 --- a/init/do_mounts.h
62079 +++ b/init/do_mounts.h
62080 @@ -15,15 +15,15 @@ extern int root_mountflags;
62081
62082 static inline int create_dev(char *name, dev_t dev)
62083 {
62084 - sys_unlink(name);
62085 - return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
62086 + sys_unlink((char __force_user *)name);
62087 + return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
62088 }
62089
62090 #if BITS_PER_LONG == 32
62091 static inline u32 bstat(char *name)
62092 {
62093 struct stat64 stat;
62094 - if (sys_stat64(name, &stat) != 0)
62095 + if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
62096 return 0;
62097 if (!S_ISBLK(stat.st_mode))
62098 return 0;
62099 @@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
62100 static inline u32 bstat(char *name)
62101 {
62102 struct stat stat;
62103 - if (sys_newstat(name, &stat) != 0)
62104 + if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
62105 return 0;
62106 if (!S_ISBLK(stat.st_mode))
62107 return 0;
62108 diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
62109 index 3098a38..253064e 100644
62110 --- a/init/do_mounts_initrd.c
62111 +++ b/init/do_mounts_initrd.c
62112 @@ -44,13 +44,13 @@ static void __init handle_initrd(void)
62113 create_dev("/dev/root.old", Root_RAM0);
62114 /* mount initrd on rootfs' /root */
62115 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
62116 - sys_mkdir("/old", 0700);
62117 - root_fd = sys_open("/", 0, 0);
62118 - old_fd = sys_open("/old", 0, 0);
62119 + sys_mkdir((const char __force_user *)"/old", 0700);
62120 + root_fd = sys_open((const char __force_user *)"/", 0, 0);
62121 + old_fd = sys_open((const char __force_user *)"/old", 0, 0);
62122 /* move initrd over / and chdir/chroot in initrd root */
62123 - sys_chdir("/root");
62124 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
62125 - sys_chroot(".");
62126 + sys_chdir((const char __force_user *)"/root");
62127 + sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
62128 + sys_chroot((const char __force_user *)".");
62129
62130 /*
62131 * In case that a resume from disk is carried out by linuxrc or one of
62132 @@ -67,15 +67,15 @@ static void __init handle_initrd(void)
62133
62134 /* move initrd to rootfs' /old */
62135 sys_fchdir(old_fd);
62136 - sys_mount("/", ".", NULL, MS_MOVE, NULL);
62137 + sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
62138 /* switch root and cwd back to / of rootfs */
62139 sys_fchdir(root_fd);
62140 - sys_chroot(".");
62141 + sys_chroot((const char __force_user *)".");
62142 sys_close(old_fd);
62143 sys_close(root_fd);
62144
62145 if (new_decode_dev(real_root_dev) == Root_RAM0) {
62146 - sys_chdir("/old");
62147 + sys_chdir((const char __force_user *)"/old");
62148 return;
62149 }
62150
62151 @@ -83,17 +83,17 @@ static void __init handle_initrd(void)
62152 mount_root();
62153
62154 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
62155 - error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
62156 + error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
62157 if (!error)
62158 printk("okay\n");
62159 else {
62160 - int fd = sys_open("/dev/root.old", O_RDWR, 0);
62161 + int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
62162 if (error == -ENOENT)
62163 printk("/initrd does not exist. Ignored.\n");
62164 else
62165 printk("failed\n");
62166 printk(KERN_NOTICE "Unmounting old root\n");
62167 - sys_umount("/old", MNT_DETACH);
62168 + sys_umount((char __force_user *)"/old", MNT_DETACH);
62169 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
62170 if (fd < 0) {
62171 error = fd;
62172 @@ -116,11 +116,11 @@ int __init initrd_load(void)
62173 * mounted in the normal path.
62174 */
62175 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
62176 - sys_unlink("/initrd.image");
62177 + sys_unlink((const char __force_user *)"/initrd.image");
62178 handle_initrd();
62179 return 1;
62180 }
62181 }
62182 - sys_unlink("/initrd.image");
62183 + sys_unlink((const char __force_user *)"/initrd.image");
62184 return 0;
62185 }
62186 diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
62187 index 32c4799..c27ee74 100644
62188 --- a/init/do_mounts_md.c
62189 +++ b/init/do_mounts_md.c
62190 @@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
62191 partitioned ? "_d" : "", minor,
62192 md_setup_args[ent].device_names);
62193
62194 - fd = sys_open(name, 0, 0);
62195 + fd = sys_open((char __force_user *)name, 0, 0);
62196 if (fd < 0) {
62197 printk(KERN_ERR "md: open failed - cannot start "
62198 "array %s\n", name);
62199 @@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
62200 * array without it
62201 */
62202 sys_close(fd);
62203 - fd = sys_open(name, 0, 0);
62204 + fd = sys_open((char __force_user *)name, 0, 0);
62205 sys_ioctl(fd, BLKRRPART, 0);
62206 }
62207 sys_close(fd);
62208 @@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
62209
62210 wait_for_device_probe();
62211
62212 - fd = sys_open((const char __user __force *) "/dev/md0", 0, 0);
62213 + fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
62214 if (fd >= 0) {
62215 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
62216 sys_close(fd);
62217 diff --git a/init/initramfs.c b/init/initramfs.c
62218 index 2531811..040d4d4 100644
62219 --- a/init/initramfs.c
62220 +++ b/init/initramfs.c
62221 @@ -74,7 +74,7 @@ static void __init free_hash(void)
62222 }
62223 }
62224
62225 -static long __init do_utime(char __user *filename, time_t mtime)
62226 +static long __init do_utime(__force char __user *filename, time_t mtime)
62227 {
62228 struct timespec t[2];
62229
62230 @@ -109,7 +109,7 @@ static void __init dir_utime(void)
62231 struct dir_entry *de, *tmp;
62232 list_for_each_entry_safe(de, tmp, &dir_list, list) {
62233 list_del(&de->list);
62234 - do_utime(de->name, de->mtime);
62235 + do_utime((char __force_user *)de->name, de->mtime);
62236 kfree(de->name);
62237 kfree(de);
62238 }
62239 @@ -271,7 +271,7 @@ static int __init maybe_link(void)
62240 if (nlink >= 2) {
62241 char *old = find_link(major, minor, ino, mode, collected);
62242 if (old)
62243 - return (sys_link(old, collected) < 0) ? -1 : 1;
62244 + return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
62245 }
62246 return 0;
62247 }
62248 @@ -280,11 +280,11 @@ static void __init clean_path(char *path, mode_t mode)
62249 {
62250 struct stat st;
62251
62252 - if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
62253 + if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
62254 if (S_ISDIR(st.st_mode))
62255 - sys_rmdir(path);
62256 + sys_rmdir((char __force_user *)path);
62257 else
62258 - sys_unlink(path);
62259 + sys_unlink((char __force_user *)path);
62260 }
62261 }
62262
62263 @@ -305,7 +305,7 @@ static int __init do_name(void)
62264 int openflags = O_WRONLY|O_CREAT;
62265 if (ml != 1)
62266 openflags |= O_TRUNC;
62267 - wfd = sys_open(collected, openflags, mode);
62268 + wfd = sys_open((char __force_user *)collected, openflags, mode);
62269
62270 if (wfd >= 0) {
62271 sys_fchown(wfd, uid, gid);
62272 @@ -317,17 +317,17 @@ static int __init do_name(void)
62273 }
62274 }
62275 } else if (S_ISDIR(mode)) {
62276 - sys_mkdir(collected, mode);
62277 - sys_chown(collected, uid, gid);
62278 - sys_chmod(collected, mode);
62279 + sys_mkdir((char __force_user *)collected, mode);
62280 + sys_chown((char __force_user *)collected, uid, gid);
62281 + sys_chmod((char __force_user *)collected, mode);
62282 dir_add(collected, mtime);
62283 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
62284 S_ISFIFO(mode) || S_ISSOCK(mode)) {
62285 if (maybe_link() == 0) {
62286 - sys_mknod(collected, mode, rdev);
62287 - sys_chown(collected, uid, gid);
62288 - sys_chmod(collected, mode);
62289 - do_utime(collected, mtime);
62290 + sys_mknod((char __force_user *)collected, mode, rdev);
62291 + sys_chown((char __force_user *)collected, uid, gid);
62292 + sys_chmod((char __force_user *)collected, mode);
62293 + do_utime((char __force_user *)collected, mtime);
62294 }
62295 }
62296 return 0;
62297 @@ -336,15 +336,15 @@ static int __init do_name(void)
62298 static int __init do_copy(void)
62299 {
62300 if (count >= body_len) {
62301 - sys_write(wfd, victim, body_len);
62302 + sys_write(wfd, (char __force_user *)victim, body_len);
62303 sys_close(wfd);
62304 - do_utime(vcollected, mtime);
62305 + do_utime((char __force_user *)vcollected, mtime);
62306 kfree(vcollected);
62307 eat(body_len);
62308 state = SkipIt;
62309 return 0;
62310 } else {
62311 - sys_write(wfd, victim, count);
62312 + sys_write(wfd, (char __force_user *)victim, count);
62313 body_len -= count;
62314 eat(count);
62315 return 1;
62316 @@ -355,9 +355,9 @@ static int __init do_symlink(void)
62317 {
62318 collected[N_ALIGN(name_len) + body_len] = '\0';
62319 clean_path(collected, 0);
62320 - sys_symlink(collected + N_ALIGN(name_len), collected);
62321 - sys_lchown(collected, uid, gid);
62322 - do_utime(collected, mtime);
62323 + sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
62324 + sys_lchown((char __force_user *)collected, uid, gid);
62325 + do_utime((char __force_user *)collected, mtime);
62326 state = SkipIt;
62327 next_state = Reset;
62328 return 0;
62329 diff --git a/init/main.c b/init/main.c
62330 index 217ed23..ec5406f 100644
62331 --- a/init/main.c
62332 +++ b/init/main.c
62333 @@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void) { }
62334 extern void tc_init(void);
62335 #endif
62336
62337 +extern void grsecurity_init(void);
62338 +
62339 /*
62340 * Debug helper: via this flag we know that we are in 'early bootup code'
62341 * where only the boot processor is running with IRQ disabled. This means
62342 @@ -149,6 +151,49 @@ static int __init set_reset_devices(char *str)
62343
62344 __setup("reset_devices", set_reset_devices);
62345
62346 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
62347 +extern char pax_enter_kernel_user[];
62348 +extern char pax_exit_kernel_user[];
62349 +extern pgdval_t clone_pgd_mask;
62350 +#endif
62351 +
62352 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
62353 +static int __init setup_pax_nouderef(char *str)
62354 +{
62355 +#ifdef CONFIG_X86_32
62356 + unsigned int cpu;
62357 + struct desc_struct *gdt;
62358 +
62359 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
62360 + gdt = get_cpu_gdt_table(cpu);
62361 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
62362 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
62363 + gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
62364 + gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
62365 + }
62366 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
62367 +#else
62368 + memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
62369 + memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
62370 + clone_pgd_mask = ~(pgdval_t)0UL;
62371 +#endif
62372 +
62373 + return 0;
62374 +}
62375 +early_param("pax_nouderef", setup_pax_nouderef);
62376 +#endif
62377 +
62378 +#ifdef CONFIG_PAX_SOFTMODE
62379 +int pax_softmode;
62380 +
62381 +static int __init setup_pax_softmode(char *str)
62382 +{
62383 + get_option(&str, &pax_softmode);
62384 + return 1;
62385 +}
62386 +__setup("pax_softmode=", setup_pax_softmode);
62387 +#endif
62388 +
62389 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
62390 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
62391 static const char *panic_later, *panic_param;
62392 @@ -681,6 +726,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
62393 {
62394 int count = preempt_count();
62395 int ret;
62396 + const char *msg1 = "", *msg2 = "";
62397
62398 if (initcall_debug)
62399 ret = do_one_initcall_debug(fn);
62400 @@ -693,15 +739,15 @@ int __init_or_module do_one_initcall(initcall_t fn)
62401 sprintf(msgbuf, "error code %d ", ret);
62402
62403 if (preempt_count() != count) {
62404 - strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
62405 + msg1 = " preemption imbalance";
62406 preempt_count() = count;
62407 }
62408 if (irqs_disabled()) {
62409 - strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
62410 + msg2 = " disabled interrupts";
62411 local_irq_enable();
62412 }
62413 - if (msgbuf[0]) {
62414 - printk("initcall %pF returned with %s\n", fn, msgbuf);
62415 + if (msgbuf[0] || *msg1 || *msg2) {
62416 + printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
62417 }
62418
62419 return ret;
62420 @@ -820,7 +866,7 @@ static int __init kernel_init(void * unused)
62421 do_basic_setup();
62422
62423 /* Open the /dev/console on the rootfs, this should never fail */
62424 - if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
62425 + if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
62426 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
62427
62428 (void) sys_dup(0);
62429 @@ -833,11 +879,13 @@ static int __init kernel_init(void * unused)
62430 if (!ramdisk_execute_command)
62431 ramdisk_execute_command = "/init";
62432
62433 - if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
62434 + if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
62435 ramdisk_execute_command = NULL;
62436 prepare_namespace();
62437 }
62438
62439 + grsecurity_init();
62440 +
62441 /*
62442 * Ok, we have completed the initial bootup, and
62443 * we're essentially up and running. Get rid of the
62444 diff --git a/ipc/mqueue.c b/ipc/mqueue.c
62445 index 5b4293d..f179875 100644
62446 --- a/ipc/mqueue.c
62447 +++ b/ipc/mqueue.c
62448 @@ -156,6 +156,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
62449 mq_bytes = (mq_msg_tblsz +
62450 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
62451
62452 + gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
62453 spin_lock(&mq_lock);
62454 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
62455 u->mq_bytes + mq_bytes > task_rlimit(p, RLIMIT_MSGQUEUE)) {
62456 diff --git a/ipc/msg.c b/ipc/msg.c
62457 index 7385de2..a8180e0 100644
62458 --- a/ipc/msg.c
62459 +++ b/ipc/msg.c
62460 @@ -309,18 +309,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
62461 return security_msg_queue_associate(msq, msgflg);
62462 }
62463
62464 +static struct ipc_ops msg_ops = {
62465 + .getnew = newque,
62466 + .associate = msg_security,
62467 + .more_checks = NULL
62468 +};
62469 +
62470 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
62471 {
62472 struct ipc_namespace *ns;
62473 - struct ipc_ops msg_ops;
62474 struct ipc_params msg_params;
62475
62476 ns = current->nsproxy->ipc_ns;
62477
62478 - msg_ops.getnew = newque;
62479 - msg_ops.associate = msg_security;
62480 - msg_ops.more_checks = NULL;
62481 -
62482 msg_params.key = key;
62483 msg_params.flg = msgflg;
62484
62485 diff --git a/ipc/sem.c b/ipc/sem.c
62486 index 5215a81..cfc0cac 100644
62487 --- a/ipc/sem.c
62488 +++ b/ipc/sem.c
62489 @@ -364,10 +364,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
62490 return 0;
62491 }
62492
62493 +static struct ipc_ops sem_ops = {
62494 + .getnew = newary,
62495 + .associate = sem_security,
62496 + .more_checks = sem_more_checks
62497 +};
62498 +
62499 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
62500 {
62501 struct ipc_namespace *ns;
62502 - struct ipc_ops sem_ops;
62503 struct ipc_params sem_params;
62504
62505 ns = current->nsproxy->ipc_ns;
62506 @@ -375,10 +380,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
62507 if (nsems < 0 || nsems > ns->sc_semmsl)
62508 return -EINVAL;
62509
62510 - sem_ops.getnew = newary;
62511 - sem_ops.associate = sem_security;
62512 - sem_ops.more_checks = sem_more_checks;
62513 -
62514 sem_params.key = key;
62515 sem_params.flg = semflg;
62516 sem_params.u.nsems = nsems;
62517 diff --git a/ipc/shm.c b/ipc/shm.c
62518 index b76be5b..859e750 100644
62519 --- a/ipc/shm.c
62520 +++ b/ipc/shm.c
62521 @@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
62522 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
62523 #endif
62524
62525 +#ifdef CONFIG_GRKERNSEC
62526 +extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62527 + const time_t shm_createtime, const uid_t cuid,
62528 + const int shmid);
62529 +extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62530 + const time_t shm_createtime);
62531 +#endif
62532 +
62533 void shm_init_ns(struct ipc_namespace *ns)
62534 {
62535 ns->shm_ctlmax = SHMMAX;
62536 @@ -508,6 +516,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
62537 shp->shm_lprid = 0;
62538 shp->shm_atim = shp->shm_dtim = 0;
62539 shp->shm_ctim = get_seconds();
62540 +#ifdef CONFIG_GRKERNSEC
62541 + {
62542 + struct timespec timeval;
62543 + do_posix_clock_monotonic_gettime(&timeval);
62544 +
62545 + shp->shm_createtime = timeval.tv_sec;
62546 + }
62547 +#endif
62548 shp->shm_segsz = size;
62549 shp->shm_nattch = 0;
62550 shp->shm_file = file;
62551 @@ -559,18 +575,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
62552 return 0;
62553 }
62554
62555 +static struct ipc_ops shm_ops = {
62556 + .getnew = newseg,
62557 + .associate = shm_security,
62558 + .more_checks = shm_more_checks
62559 +};
62560 +
62561 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
62562 {
62563 struct ipc_namespace *ns;
62564 - struct ipc_ops shm_ops;
62565 struct ipc_params shm_params;
62566
62567 ns = current->nsproxy->ipc_ns;
62568
62569 - shm_ops.getnew = newseg;
62570 - shm_ops.associate = shm_security;
62571 - shm_ops.more_checks = shm_more_checks;
62572 -
62573 shm_params.key = key;
62574 shm_params.flg = shmflg;
62575 shm_params.u.size = size;
62576 @@ -988,6 +1005,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
62577 f_mode = FMODE_READ | FMODE_WRITE;
62578 }
62579 if (shmflg & SHM_EXEC) {
62580 +
62581 +#ifdef CONFIG_PAX_MPROTECT
62582 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
62583 + goto out;
62584 +#endif
62585 +
62586 prot |= PROT_EXEC;
62587 acc_mode |= S_IXUGO;
62588 }
62589 @@ -1011,9 +1034,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
62590 if (err)
62591 goto out_unlock;
62592
62593 +#ifdef CONFIG_GRKERNSEC
62594 + if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
62595 + shp->shm_perm.cuid, shmid) ||
62596 + !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
62597 + err = -EACCES;
62598 + goto out_unlock;
62599 + }
62600 +#endif
62601 +
62602 path = shp->shm_file->f_path;
62603 path_get(&path);
62604 shp->shm_nattch++;
62605 +#ifdef CONFIG_GRKERNSEC
62606 + shp->shm_lapid = current->pid;
62607 +#endif
62608 size = i_size_read(path.dentry->d_inode);
62609 shm_unlock(shp);
62610
62611 diff --git a/kernel/acct.c b/kernel/acct.c
62612 index fa7eb3d..7faf116 100644
62613 --- a/kernel/acct.c
62614 +++ b/kernel/acct.c
62615 @@ -570,7 +570,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
62616 */
62617 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
62618 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
62619 - file->f_op->write(file, (char *)&ac,
62620 + file->f_op->write(file, (char __force_user *)&ac,
62621 sizeof(acct_t), &file->f_pos);
62622 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
62623 set_fs(fs);
62624 diff --git a/kernel/audit.c b/kernel/audit.c
62625 index 09fae26..ed71d5b 100644
62626 --- a/kernel/audit.c
62627 +++ b/kernel/audit.c
62628 @@ -115,7 +115,7 @@ u32 audit_sig_sid = 0;
62629 3) suppressed due to audit_rate_limit
62630 4) suppressed due to audit_backlog_limit
62631 */
62632 -static atomic_t audit_lost = ATOMIC_INIT(0);
62633 +static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
62634
62635 /* The netlink socket. */
62636 static struct sock *audit_sock;
62637 @@ -237,7 +237,7 @@ void audit_log_lost(const char *message)
62638 unsigned long now;
62639 int print;
62640
62641 - atomic_inc(&audit_lost);
62642 + atomic_inc_unchecked(&audit_lost);
62643
62644 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
62645
62646 @@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
62647 printk(KERN_WARNING
62648 "audit: audit_lost=%d audit_rate_limit=%d "
62649 "audit_backlog_limit=%d\n",
62650 - atomic_read(&audit_lost),
62651 + atomic_read_unchecked(&audit_lost),
62652 audit_rate_limit,
62653 audit_backlog_limit);
62654 audit_panic(message);
62655 @@ -689,7 +689,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
62656 status_set.pid = audit_pid;
62657 status_set.rate_limit = audit_rate_limit;
62658 status_set.backlog_limit = audit_backlog_limit;
62659 - status_set.lost = atomic_read(&audit_lost);
62660 + status_set.lost = atomic_read_unchecked(&audit_lost);
62661 status_set.backlog = skb_queue_len(&audit_skb_queue);
62662 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
62663 &status_set, sizeof(status_set));
62664 @@ -1260,12 +1260,13 @@ static void audit_log_vformat(struct audit_buffer *ab, const char *fmt,
62665 avail = audit_expand(ab,
62666 max_t(unsigned, AUDIT_BUFSIZ, 1+len-avail));
62667 if (!avail)
62668 - goto out;
62669 + goto out_va_end;
62670 len = vsnprintf(skb_tail_pointer(skb), avail, fmt, args2);
62671 }
62672 - va_end(args2);
62673 if (len > 0)
62674 skb_put(skb, len);
62675 +out_va_end:
62676 + va_end(args2);
62677 out:
62678 return;
62679 }
62680 diff --git a/kernel/auditsc.c b/kernel/auditsc.c
62681 index 47b7fc1..c003c33 100644
62682 --- a/kernel/auditsc.c
62683 +++ b/kernel/auditsc.c
62684 @@ -1166,8 +1166,8 @@ static void audit_log_execve_info(struct audit_context *context,
62685 struct audit_buffer **ab,
62686 struct audit_aux_data_execve *axi)
62687 {
62688 - int i;
62689 - size_t len, len_sent = 0;
62690 + int i, len;
62691 + size_t len_sent = 0;
62692 const char __user *p;
62693 char *buf;
62694
62695 @@ -2118,7 +2118,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
62696 }
62697
62698 /* global counter which is incremented every time something logs in */
62699 -static atomic_t session_id = ATOMIC_INIT(0);
62700 +static atomic_unchecked_t session_id = ATOMIC_INIT(0);
62701
62702 /**
62703 * audit_set_loginuid - set a task's audit_context loginuid
62704 @@ -2131,7 +2131,7 @@ static atomic_t session_id = ATOMIC_INIT(0);
62705 */
62706 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
62707 {
62708 - unsigned int sessionid = atomic_inc_return(&session_id);
62709 + unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
62710 struct audit_context *context = task->audit_context;
62711
62712 if (context && context->in_syscall) {
62713 diff --git a/kernel/capability.c b/kernel/capability.c
62714 index b463871..fa3ea1f 100644
62715 --- a/kernel/capability.c
62716 +++ b/kernel/capability.c
62717 @@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
62718 * before modification is attempted and the application
62719 * fails.
62720 */
62721 + if (tocopy > ARRAY_SIZE(kdata))
62722 + return -EFAULT;
62723 +
62724 if (copy_to_user(dataptr, kdata, tocopy
62725 * sizeof(struct __user_cap_data_struct))) {
62726 return -EFAULT;
62727 @@ -374,7 +377,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
62728 BUG();
62729 }
62730
62731 - if (security_capable(ns, current_cred(), cap) == 0) {
62732 + if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable(cap)) {
62733 current->flags |= PF_SUPERPRIV;
62734 return true;
62735 }
62736 @@ -382,6 +385,27 @@ bool ns_capable(struct user_namespace *ns, int cap)
62737 }
62738 EXPORT_SYMBOL(ns_capable);
62739
62740 +bool ns_capable_nolog(struct user_namespace *ns, int cap)
62741 +{
62742 + if (unlikely(!cap_valid(cap))) {
62743 + printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
62744 + BUG();
62745 + }
62746 +
62747 + if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable_nolog(cap)) {
62748 + current->flags |= PF_SUPERPRIV;
62749 + return true;
62750 + }
62751 + return false;
62752 +}
62753 +EXPORT_SYMBOL(ns_capable_nolog);
62754 +
62755 +bool capable_nolog(int cap)
62756 +{
62757 + return ns_capable_nolog(&init_user_ns, cap);
62758 +}
62759 +EXPORT_SYMBOL(capable_nolog);
62760 +
62761 /**
62762 * task_ns_capable - Determine whether current task has a superior
62763 * capability targeted at a specific task's user namespace.
62764 @@ -396,6 +420,12 @@ bool task_ns_capable(struct task_struct *t, int cap)
62765 }
62766 EXPORT_SYMBOL(task_ns_capable);
62767
62768 +bool task_ns_capable_nolog(struct task_struct *t, int cap)
62769 +{
62770 + return ns_capable_nolog(task_cred_xxx(t, user)->user_ns, cap);
62771 +}
62772 +EXPORT_SYMBOL(task_ns_capable_nolog);
62773 +
62774 /**
62775 * nsown_capable - Check superior capability to one's own user_ns
62776 * @cap: The capability in question
62777 diff --git a/kernel/compat.c b/kernel/compat.c
62778 index f346ced..aa2b1f4 100644
62779 --- a/kernel/compat.c
62780 +++ b/kernel/compat.c
62781 @@ -13,6 +13,7 @@
62782
62783 #include <linux/linkage.h>
62784 #include <linux/compat.h>
62785 +#include <linux/module.h>
62786 #include <linux/errno.h>
62787 #include <linux/time.h>
62788 #include <linux/signal.h>
62789 @@ -168,7 +169,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
62790 mm_segment_t oldfs;
62791 long ret;
62792
62793 - restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
62794 + restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
62795 oldfs = get_fs();
62796 set_fs(KERNEL_DS);
62797 ret = hrtimer_nanosleep_restart(restart);
62798 @@ -200,7 +201,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
62799 oldfs = get_fs();
62800 set_fs(KERNEL_DS);
62801 ret = hrtimer_nanosleep(&tu,
62802 - rmtp ? (struct timespec __user *)&rmt : NULL,
62803 + rmtp ? (struct timespec __force_user *)&rmt : NULL,
62804 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
62805 set_fs(oldfs);
62806
62807 @@ -309,7 +310,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
62808 mm_segment_t old_fs = get_fs();
62809
62810 set_fs(KERNEL_DS);
62811 - ret = sys_sigpending((old_sigset_t __user *) &s);
62812 + ret = sys_sigpending((old_sigset_t __force_user *) &s);
62813 set_fs(old_fs);
62814 if (ret == 0)
62815 ret = put_user(s, set);
62816 @@ -332,8 +333,8 @@ asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set,
62817 old_fs = get_fs();
62818 set_fs(KERNEL_DS);
62819 ret = sys_sigprocmask(how,
62820 - set ? (old_sigset_t __user *) &s : NULL,
62821 - oset ? (old_sigset_t __user *) &s : NULL);
62822 + set ? (old_sigset_t __force_user *) &s : NULL,
62823 + oset ? (old_sigset_t __force_user *) &s : NULL);
62824 set_fs(old_fs);
62825 if (ret == 0)
62826 if (oset)
62827 @@ -370,7 +371,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
62828 mm_segment_t old_fs = get_fs();
62829
62830 set_fs(KERNEL_DS);
62831 - ret = sys_old_getrlimit(resource, &r);
62832 + ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
62833 set_fs(old_fs);
62834
62835 if (!ret) {
62836 @@ -442,7 +443,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
62837 mm_segment_t old_fs = get_fs();
62838
62839 set_fs(KERNEL_DS);
62840 - ret = sys_getrusage(who, (struct rusage __user *) &r);
62841 + ret = sys_getrusage(who, (struct rusage __force_user *) &r);
62842 set_fs(old_fs);
62843
62844 if (ret)
62845 @@ -469,8 +470,8 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
62846 set_fs (KERNEL_DS);
62847 ret = sys_wait4(pid,
62848 (stat_addr ?
62849 - (unsigned int __user *) &status : NULL),
62850 - options, (struct rusage __user *) &r);
62851 + (unsigned int __force_user *) &status : NULL),
62852 + options, (struct rusage __force_user *) &r);
62853 set_fs (old_fs);
62854
62855 if (ret > 0) {
62856 @@ -495,8 +496,8 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
62857 memset(&info, 0, sizeof(info));
62858
62859 set_fs(KERNEL_DS);
62860 - ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
62861 - uru ? (struct rusage __user *)&ru : NULL);
62862 + ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
62863 + uru ? (struct rusage __force_user *)&ru : NULL);
62864 set_fs(old_fs);
62865
62866 if ((ret < 0) || (info.si_signo == 0))
62867 @@ -626,8 +627,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
62868 oldfs = get_fs();
62869 set_fs(KERNEL_DS);
62870 err = sys_timer_settime(timer_id, flags,
62871 - (struct itimerspec __user *) &newts,
62872 - (struct itimerspec __user *) &oldts);
62873 + (struct itimerspec __force_user *) &newts,
62874 + (struct itimerspec __force_user *) &oldts);
62875 set_fs(oldfs);
62876 if (!err && old && put_compat_itimerspec(old, &oldts))
62877 return -EFAULT;
62878 @@ -644,7 +645,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
62879 oldfs = get_fs();
62880 set_fs(KERNEL_DS);
62881 err = sys_timer_gettime(timer_id,
62882 - (struct itimerspec __user *) &ts);
62883 + (struct itimerspec __force_user *) &ts);
62884 set_fs(oldfs);
62885 if (!err && put_compat_itimerspec(setting, &ts))
62886 return -EFAULT;
62887 @@ -663,7 +664,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
62888 oldfs = get_fs();
62889 set_fs(KERNEL_DS);
62890 err = sys_clock_settime(which_clock,
62891 - (struct timespec __user *) &ts);
62892 + (struct timespec __force_user *) &ts);
62893 set_fs(oldfs);
62894 return err;
62895 }
62896 @@ -678,7 +679,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
62897 oldfs = get_fs();
62898 set_fs(KERNEL_DS);
62899 err = sys_clock_gettime(which_clock,
62900 - (struct timespec __user *) &ts);
62901 + (struct timespec __force_user *) &ts);
62902 set_fs(oldfs);
62903 if (!err && put_compat_timespec(&ts, tp))
62904 return -EFAULT;
62905 @@ -698,7 +699,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
62906
62907 oldfs = get_fs();
62908 set_fs(KERNEL_DS);
62909 - ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
62910 + ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
62911 set_fs(oldfs);
62912
62913 err = compat_put_timex(utp, &txc);
62914 @@ -718,7 +719,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
62915 oldfs = get_fs();
62916 set_fs(KERNEL_DS);
62917 err = sys_clock_getres(which_clock,
62918 - (struct timespec __user *) &ts);
62919 + (struct timespec __force_user *) &ts);
62920 set_fs(oldfs);
62921 if (!err && tp && put_compat_timespec(&ts, tp))
62922 return -EFAULT;
62923 @@ -730,9 +731,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
62924 long err;
62925 mm_segment_t oldfs;
62926 struct timespec tu;
62927 - struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
62928 + struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
62929
62930 - restart->nanosleep.rmtp = (struct timespec __user *) &tu;
62931 + restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
62932 oldfs = get_fs();
62933 set_fs(KERNEL_DS);
62934 err = clock_nanosleep_restart(restart);
62935 @@ -764,8 +765,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
62936 oldfs = get_fs();
62937 set_fs(KERNEL_DS);
62938 err = sys_clock_nanosleep(which_clock, flags,
62939 - (struct timespec __user *) &in,
62940 - (struct timespec __user *) &out);
62941 + (struct timespec __force_user *) &in,
62942 + (struct timespec __force_user *) &out);
62943 set_fs(oldfs);
62944
62945 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
62946 diff --git a/kernel/configs.c b/kernel/configs.c
62947 index 42e8fa0..9e7406b 100644
62948 --- a/kernel/configs.c
62949 +++ b/kernel/configs.c
62950 @@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
62951 struct proc_dir_entry *entry;
62952
62953 /* create the current config file */
62954 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
62955 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
62956 + entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
62957 + &ikconfig_file_ops);
62958 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62959 + entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
62960 + &ikconfig_file_ops);
62961 +#endif
62962 +#else
62963 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
62964 &ikconfig_file_ops);
62965 +#endif
62966 +
62967 if (!entry)
62968 return -ENOMEM;
62969
62970 diff --git a/kernel/cred.c b/kernel/cred.c
62971 index 5791612..a3c04dc 100644
62972 --- a/kernel/cred.c
62973 +++ b/kernel/cred.c
62974 @@ -204,6 +204,15 @@ void exit_creds(struct task_struct *tsk)
62975 validate_creds(cred);
62976 put_cred(cred);
62977 }
62978 +
62979 +#ifdef CONFIG_GRKERNSEC_SETXID
62980 + cred = (struct cred *) tsk->delayed_cred;
62981 + if (cred) {
62982 + tsk->delayed_cred = NULL;
62983 + validate_creds(cred);
62984 + put_cred(cred);
62985 + }
62986 +#endif
62987 }
62988
62989 /**
62990 @@ -470,7 +479,7 @@ error_put:
62991 * Always returns 0 thus allowing this function to be tail-called at the end
62992 * of, say, sys_setgid().
62993 */
62994 -int commit_creds(struct cred *new)
62995 +static int __commit_creds(struct cred *new)
62996 {
62997 struct task_struct *task = current;
62998 const struct cred *old = task->real_cred;
62999 @@ -489,6 +498,8 @@ int commit_creds(struct cred *new)
63000
63001 get_cred(new); /* we will require a ref for the subj creds too */
63002
63003 + gr_set_role_label(task, new->uid, new->gid);
63004 +
63005 /* dumpability changes */
63006 if (old->euid != new->euid ||
63007 old->egid != new->egid ||
63008 @@ -538,6 +549,92 @@ int commit_creds(struct cred *new)
63009 put_cred(old);
63010 return 0;
63011 }
63012 +#ifdef CONFIG_GRKERNSEC_SETXID
63013 +extern int set_user(struct cred *new);
63014 +
63015 +void gr_delayed_cred_worker(void)
63016 +{
63017 + const struct cred *new = current->delayed_cred;
63018 + struct cred *ncred;
63019 +
63020 + current->delayed_cred = NULL;
63021 +
63022 + if (current_uid() && new != NULL) {
63023 + // from doing get_cred on it when queueing this
63024 + put_cred(new);
63025 + return;
63026 + } else if (new == NULL)
63027 + return;
63028 +
63029 + ncred = prepare_creds();
63030 + if (!ncred)
63031 + goto die;
63032 + // uids
63033 + ncred->uid = new->uid;
63034 + ncred->euid = new->euid;
63035 + ncred->suid = new->suid;
63036 + ncred->fsuid = new->fsuid;
63037 + // gids
63038 + ncred->gid = new->gid;
63039 + ncred->egid = new->egid;
63040 + ncred->sgid = new->sgid;
63041 + ncred->fsgid = new->fsgid;
63042 + // groups
63043 + if (set_groups(ncred, new->group_info) < 0) {
63044 + abort_creds(ncred);
63045 + goto die;
63046 + }
63047 + // caps
63048 + ncred->securebits = new->securebits;
63049 + ncred->cap_inheritable = new->cap_inheritable;
63050 + ncred->cap_permitted = new->cap_permitted;
63051 + ncred->cap_effective = new->cap_effective;
63052 + ncred->cap_bset = new->cap_bset;
63053 +
63054 + if (set_user(ncred)) {
63055 + abort_creds(ncred);
63056 + goto die;
63057 + }
63058 +
63059 + // from doing get_cred on it when queueing this
63060 + put_cred(new);
63061 +
63062 + __commit_creds(ncred);
63063 + return;
63064 +die:
63065 + // from doing get_cred on it when queueing this
63066 + put_cred(new);
63067 + do_group_exit(SIGKILL);
63068 +}
63069 +#endif
63070 +
63071 +int commit_creds(struct cred *new)
63072 +{
63073 +#ifdef CONFIG_GRKERNSEC_SETXID
63074 + struct task_struct *t;
63075 +
63076 + /* we won't get called with tasklist_lock held for writing
63077 + and interrupts disabled as the cred struct in that case is
63078 + init_cred
63079 + */
63080 + if (grsec_enable_setxid && !current_is_single_threaded() &&
63081 + !current_uid() && new->uid) {
63082 + rcu_read_lock();
63083 + read_lock(&tasklist_lock);
63084 + for (t = next_thread(current); t != current;
63085 + t = next_thread(t)) {
63086 + if (t->delayed_cred == NULL) {
63087 + t->delayed_cred = get_cred(new);
63088 + set_tsk_need_resched(t);
63089 + }
63090 + }
63091 + read_unlock(&tasklist_lock);
63092 + rcu_read_unlock();
63093 + }
63094 +#endif
63095 + return __commit_creds(new);
63096 +}
63097 +
63098 EXPORT_SYMBOL(commit_creds);
63099
63100 /**
63101 diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
63102 index 0d7c087..01b8cef 100644
63103 --- a/kernel/debug/debug_core.c
63104 +++ b/kernel/debug/debug_core.c
63105 @@ -119,7 +119,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
63106 */
63107 static atomic_t masters_in_kgdb;
63108 static atomic_t slaves_in_kgdb;
63109 -static atomic_t kgdb_break_tasklet_var;
63110 +static atomic_unchecked_t kgdb_break_tasklet_var;
63111 atomic_t kgdb_setting_breakpoint;
63112
63113 struct task_struct *kgdb_usethread;
63114 @@ -129,7 +129,7 @@ int kgdb_single_step;
63115 static pid_t kgdb_sstep_pid;
63116
63117 /* to keep track of the CPU which is doing the single stepping*/
63118 -atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
63119 +atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
63120
63121 /*
63122 * If you are debugging a problem where roundup (the collection of
63123 @@ -542,7 +542,7 @@ return_normal:
63124 * kernel will only try for the value of sstep_tries before
63125 * giving up and continuing on.
63126 */
63127 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
63128 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
63129 (kgdb_info[cpu].task &&
63130 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
63131 atomic_set(&kgdb_active, -1);
63132 @@ -636,8 +636,8 @@ cpu_master_loop:
63133 }
63134
63135 kgdb_restore:
63136 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
63137 - int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
63138 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
63139 + int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
63140 if (kgdb_info[sstep_cpu].task)
63141 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
63142 else
63143 @@ -834,18 +834,18 @@ static void kgdb_unregister_callbacks(void)
63144 static void kgdb_tasklet_bpt(unsigned long ing)
63145 {
63146 kgdb_breakpoint();
63147 - atomic_set(&kgdb_break_tasklet_var, 0);
63148 + atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
63149 }
63150
63151 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
63152
63153 void kgdb_schedule_breakpoint(void)
63154 {
63155 - if (atomic_read(&kgdb_break_tasklet_var) ||
63156 + if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
63157 atomic_read(&kgdb_active) != -1 ||
63158 atomic_read(&kgdb_setting_breakpoint))
63159 return;
63160 - atomic_inc(&kgdb_break_tasklet_var);
63161 + atomic_inc_unchecked(&kgdb_break_tasklet_var);
63162 tasklet_schedule(&kgdb_tasklet_breakpoint);
63163 }
63164 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
63165 diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
63166 index 63786e7..0780cac 100644
63167 --- a/kernel/debug/kdb/kdb_main.c
63168 +++ b/kernel/debug/kdb/kdb_main.c
63169 @@ -1980,7 +1980,7 @@ static int kdb_lsmod(int argc, const char **argv)
63170 list_for_each_entry(mod, kdb_modules, list) {
63171
63172 kdb_printf("%-20s%8u 0x%p ", mod->name,
63173 - mod->core_size, (void *)mod);
63174 + mod->core_size_rx + mod->core_size_rw, (void *)mod);
63175 #ifdef CONFIG_MODULE_UNLOAD
63176 kdb_printf("%4d ", module_refcount(mod));
63177 #endif
63178 @@ -1990,7 +1990,7 @@ static int kdb_lsmod(int argc, const char **argv)
63179 kdb_printf(" (Loading)");
63180 else
63181 kdb_printf(" (Live)");
63182 - kdb_printf(" 0x%p", mod->module_core);
63183 + kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
63184
63185 #ifdef CONFIG_MODULE_UNLOAD
63186 {
63187 diff --git a/kernel/events/core.c b/kernel/events/core.c
63188 index 58690af..d903d75 100644
63189 --- a/kernel/events/core.c
63190 +++ b/kernel/events/core.c
63191 @@ -173,7 +173,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
63192 return 0;
63193 }
63194
63195 -static atomic64_t perf_event_id;
63196 +static atomic64_unchecked_t perf_event_id;
63197
63198 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
63199 enum event_type_t event_type);
63200 @@ -2540,7 +2540,7 @@ static void __perf_event_read(void *info)
63201
63202 static inline u64 perf_event_count(struct perf_event *event)
63203 {
63204 - return local64_read(&event->count) + atomic64_read(&event->child_count);
63205 + return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
63206 }
63207
63208 static u64 perf_event_read(struct perf_event *event)
63209 @@ -3065,9 +3065,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
63210 mutex_lock(&event->child_mutex);
63211 total += perf_event_read(event);
63212 *enabled += event->total_time_enabled +
63213 - atomic64_read(&event->child_total_time_enabled);
63214 + atomic64_read_unchecked(&event->child_total_time_enabled);
63215 *running += event->total_time_running +
63216 - atomic64_read(&event->child_total_time_running);
63217 + atomic64_read_unchecked(&event->child_total_time_running);
63218
63219 list_for_each_entry(child, &event->child_list, child_list) {
63220 total += perf_event_read(child);
63221 @@ -3474,10 +3474,10 @@ void perf_event_update_userpage(struct perf_event *event)
63222 userpg->offset -= local64_read(&event->hw.prev_count);
63223
63224 userpg->time_enabled = enabled +
63225 - atomic64_read(&event->child_total_time_enabled);
63226 + atomic64_read_unchecked(&event->child_total_time_enabled);
63227
63228 userpg->time_running = running +
63229 - atomic64_read(&event->child_total_time_running);
63230 + atomic64_read_unchecked(&event->child_total_time_running);
63231
63232 barrier();
63233 ++userpg->lock;
63234 @@ -3906,11 +3906,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
63235 values[n++] = perf_event_count(event);
63236 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
63237 values[n++] = enabled +
63238 - atomic64_read(&event->child_total_time_enabled);
63239 + atomic64_read_unchecked(&event->child_total_time_enabled);
63240 }
63241 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
63242 values[n++] = running +
63243 - atomic64_read(&event->child_total_time_running);
63244 + atomic64_read_unchecked(&event->child_total_time_running);
63245 }
63246 if (read_format & PERF_FORMAT_ID)
63247 values[n++] = primary_event_id(event);
63248 @@ -4561,12 +4561,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
63249 * need to add enough zero bytes after the string to handle
63250 * the 64bit alignment we do later.
63251 */
63252 - buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
63253 + buf = kzalloc(PATH_MAX, GFP_KERNEL);
63254 if (!buf) {
63255 name = strncpy(tmp, "//enomem", sizeof(tmp));
63256 goto got_name;
63257 }
63258 - name = d_path(&file->f_path, buf, PATH_MAX);
63259 + name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
63260 if (IS_ERR(name)) {
63261 name = strncpy(tmp, "//toolong", sizeof(tmp));
63262 goto got_name;
63263 @@ -5921,7 +5921,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
63264 event->parent = parent_event;
63265
63266 event->ns = get_pid_ns(current->nsproxy->pid_ns);
63267 - event->id = atomic64_inc_return(&perf_event_id);
63268 + event->id = atomic64_inc_return_unchecked(&perf_event_id);
63269
63270 event->state = PERF_EVENT_STATE_INACTIVE;
63271
63272 @@ -6443,10 +6443,10 @@ static void sync_child_event(struct perf_event *child_event,
63273 /*
63274 * Add back the child's count to the parent's count:
63275 */
63276 - atomic64_add(child_val, &parent_event->child_count);
63277 - atomic64_add(child_event->total_time_enabled,
63278 + atomic64_add_unchecked(child_val, &parent_event->child_count);
63279 + atomic64_add_unchecked(child_event->total_time_enabled,
63280 &parent_event->child_total_time_enabled);
63281 - atomic64_add(child_event->total_time_running,
63282 + atomic64_add_unchecked(child_event->total_time_running,
63283 &parent_event->child_total_time_running);
63284
63285 /*
63286 diff --git a/kernel/exit.c b/kernel/exit.c
63287 index e6e01b9..619f837 100644
63288 --- a/kernel/exit.c
63289 +++ b/kernel/exit.c
63290 @@ -57,6 +57,10 @@
63291 #include <asm/pgtable.h>
63292 #include <asm/mmu_context.h>
63293
63294 +#ifdef CONFIG_GRKERNSEC
63295 +extern rwlock_t grsec_exec_file_lock;
63296 +#endif
63297 +
63298 static void exit_mm(struct task_struct * tsk);
63299
63300 static void __unhash_process(struct task_struct *p, bool group_dead)
63301 @@ -168,6 +172,10 @@ void release_task(struct task_struct * p)
63302 struct task_struct *leader;
63303 int zap_leader;
63304 repeat:
63305 +#ifdef CONFIG_NET
63306 + gr_del_task_from_ip_table(p);
63307 +#endif
63308 +
63309 /* don't need to get the RCU readlock here - the process is dead and
63310 * can't be modifying its own credentials. But shut RCU-lockdep up */
63311 rcu_read_lock();
63312 @@ -380,7 +388,7 @@ int allow_signal(int sig)
63313 * know it'll be handled, so that they don't get converted to
63314 * SIGKILL or just silently dropped.
63315 */
63316 - current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
63317 + current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
63318 recalc_sigpending();
63319 spin_unlock_irq(&current->sighand->siglock);
63320 return 0;
63321 @@ -416,6 +424,17 @@ void daemonize(const char *name, ...)
63322 vsnprintf(current->comm, sizeof(current->comm), name, args);
63323 va_end(args);
63324
63325 +#ifdef CONFIG_GRKERNSEC
63326 + write_lock(&grsec_exec_file_lock);
63327 + if (current->exec_file) {
63328 + fput(current->exec_file);
63329 + current->exec_file = NULL;
63330 + }
63331 + write_unlock(&grsec_exec_file_lock);
63332 +#endif
63333 +
63334 + gr_set_kernel_label(current);
63335 +
63336 /*
63337 * If we were started as result of loading a module, close all of the
63338 * user space pages. We don't need them, and if we didn't close them
63339 @@ -893,6 +912,8 @@ NORET_TYPE void do_exit(long code)
63340 struct task_struct *tsk = current;
63341 int group_dead;
63342
63343 + set_fs(USER_DS);
63344 +
63345 profile_task_exit(tsk);
63346
63347 WARN_ON(blk_needs_flush_plug(tsk));
63348 @@ -909,7 +930,6 @@ NORET_TYPE void do_exit(long code)
63349 * mm_release()->clear_child_tid() from writing to a user-controlled
63350 * kernel address.
63351 */
63352 - set_fs(USER_DS);
63353
63354 ptrace_event(PTRACE_EVENT_EXIT, code);
63355
63356 @@ -971,6 +991,9 @@ NORET_TYPE void do_exit(long code)
63357 tsk->exit_code = code;
63358 taskstats_exit(tsk, group_dead);
63359
63360 + gr_acl_handle_psacct(tsk, code);
63361 + gr_acl_handle_exit();
63362 +
63363 exit_mm(tsk);
63364
63365 if (group_dead)
63366 diff --git a/kernel/fork.c b/kernel/fork.c
63367 index da4a6a1..0973380 100644
63368 --- a/kernel/fork.c
63369 +++ b/kernel/fork.c
63370 @@ -280,7 +280,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
63371 *stackend = STACK_END_MAGIC; /* for overflow detection */
63372
63373 #ifdef CONFIG_CC_STACKPROTECTOR
63374 - tsk->stack_canary = get_random_int();
63375 + tsk->stack_canary = pax_get_random_long();
63376 #endif
63377
63378 /*
63379 @@ -304,13 +304,77 @@ out:
63380 }
63381
63382 #ifdef CONFIG_MMU
63383 +static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct vm_area_struct *mpnt)
63384 +{
63385 + struct vm_area_struct *tmp;
63386 + unsigned long charge;
63387 + struct mempolicy *pol;
63388 + struct file *file;
63389 +
63390 + charge = 0;
63391 + if (mpnt->vm_flags & VM_ACCOUNT) {
63392 + unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
63393 + if (security_vm_enough_memory(len))
63394 + goto fail_nomem;
63395 + charge = len;
63396 + }
63397 + tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
63398 + if (!tmp)
63399 + goto fail_nomem;
63400 + *tmp = *mpnt;
63401 + tmp->vm_mm = mm;
63402 + INIT_LIST_HEAD(&tmp->anon_vma_chain);
63403 + pol = mpol_dup(vma_policy(mpnt));
63404 + if (IS_ERR(pol))
63405 + goto fail_nomem_policy;
63406 + vma_set_policy(tmp, pol);
63407 + if (anon_vma_fork(tmp, mpnt))
63408 + goto fail_nomem_anon_vma_fork;
63409 + tmp->vm_flags &= ~VM_LOCKED;
63410 + tmp->vm_next = tmp->vm_prev = NULL;
63411 + tmp->vm_mirror = NULL;
63412 + file = tmp->vm_file;
63413 + if (file) {
63414 + struct inode *inode = file->f_path.dentry->d_inode;
63415 + struct address_space *mapping = file->f_mapping;
63416 +
63417 + get_file(file);
63418 + if (tmp->vm_flags & VM_DENYWRITE)
63419 + atomic_dec(&inode->i_writecount);
63420 + mutex_lock(&mapping->i_mmap_mutex);
63421 + if (tmp->vm_flags & VM_SHARED)
63422 + mapping->i_mmap_writable++;
63423 + flush_dcache_mmap_lock(mapping);
63424 + /* insert tmp into the share list, just after mpnt */
63425 + vma_prio_tree_add(tmp, mpnt);
63426 + flush_dcache_mmap_unlock(mapping);
63427 + mutex_unlock(&mapping->i_mmap_mutex);
63428 + }
63429 +
63430 + /*
63431 + * Clear hugetlb-related page reserves for children. This only
63432 + * affects MAP_PRIVATE mappings. Faults generated by the child
63433 + * are not guaranteed to succeed, even if read-only
63434 + */
63435 + if (is_vm_hugetlb_page(tmp))
63436 + reset_vma_resv_huge_pages(tmp);
63437 +
63438 + return tmp;
63439 +
63440 +fail_nomem_anon_vma_fork:
63441 + mpol_put(pol);
63442 +fail_nomem_policy:
63443 + kmem_cache_free(vm_area_cachep, tmp);
63444 +fail_nomem:
63445 + vm_unacct_memory(charge);
63446 + return NULL;
63447 +}
63448 +
63449 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
63450 {
63451 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
63452 struct rb_node **rb_link, *rb_parent;
63453 int retval;
63454 - unsigned long charge;
63455 - struct mempolicy *pol;
63456
63457 down_write(&oldmm->mmap_sem);
63458 flush_cache_dup_mm(oldmm);
63459 @@ -322,8 +386,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
63460 mm->locked_vm = 0;
63461 mm->mmap = NULL;
63462 mm->mmap_cache = NULL;
63463 - mm->free_area_cache = oldmm->mmap_base;
63464 - mm->cached_hole_size = ~0UL;
63465 + mm->free_area_cache = oldmm->free_area_cache;
63466 + mm->cached_hole_size = oldmm->cached_hole_size;
63467 mm->map_count = 0;
63468 cpumask_clear(mm_cpumask(mm));
63469 mm->mm_rb = RB_ROOT;
63470 @@ -339,8 +403,6 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
63471
63472 prev = NULL;
63473 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
63474 - struct file *file;
63475 -
63476 if (mpnt->vm_flags & VM_DONTCOPY) {
63477 long pages = vma_pages(mpnt);
63478 mm->total_vm -= pages;
63479 @@ -348,53 +410,11 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
63480 -pages);
63481 continue;
63482 }
63483 - charge = 0;
63484 - if (mpnt->vm_flags & VM_ACCOUNT) {
63485 - unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
63486 - if (security_vm_enough_memory(len))
63487 - goto fail_nomem;
63488 - charge = len;
63489 + tmp = dup_vma(mm, mpnt);
63490 + if (!tmp) {
63491 + retval = -ENOMEM;
63492 + goto out;
63493 }
63494 - tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
63495 - if (!tmp)
63496 - goto fail_nomem;
63497 - *tmp = *mpnt;
63498 - INIT_LIST_HEAD(&tmp->anon_vma_chain);
63499 - pol = mpol_dup(vma_policy(mpnt));
63500 - retval = PTR_ERR(pol);
63501 - if (IS_ERR(pol))
63502 - goto fail_nomem_policy;
63503 - vma_set_policy(tmp, pol);
63504 - tmp->vm_mm = mm;
63505 - if (anon_vma_fork(tmp, mpnt))
63506 - goto fail_nomem_anon_vma_fork;
63507 - tmp->vm_flags &= ~VM_LOCKED;
63508 - tmp->vm_next = tmp->vm_prev = NULL;
63509 - file = tmp->vm_file;
63510 - if (file) {
63511 - struct inode *inode = file->f_path.dentry->d_inode;
63512 - struct address_space *mapping = file->f_mapping;
63513 -
63514 - get_file(file);
63515 - if (tmp->vm_flags & VM_DENYWRITE)
63516 - atomic_dec(&inode->i_writecount);
63517 - mutex_lock(&mapping->i_mmap_mutex);
63518 - if (tmp->vm_flags & VM_SHARED)
63519 - mapping->i_mmap_writable++;
63520 - flush_dcache_mmap_lock(mapping);
63521 - /* insert tmp into the share list, just after mpnt */
63522 - vma_prio_tree_add(tmp, mpnt);
63523 - flush_dcache_mmap_unlock(mapping);
63524 - mutex_unlock(&mapping->i_mmap_mutex);
63525 - }
63526 -
63527 - /*
63528 - * Clear hugetlb-related page reserves for children. This only
63529 - * affects MAP_PRIVATE mappings. Faults generated by the child
63530 - * are not guaranteed to succeed, even if read-only
63531 - */
63532 - if (is_vm_hugetlb_page(tmp))
63533 - reset_vma_resv_huge_pages(tmp);
63534
63535 /*
63536 * Link in the new vma and copy the page table entries.
63537 @@ -417,6 +437,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
63538 if (retval)
63539 goto out;
63540 }
63541 +
63542 +#ifdef CONFIG_PAX_SEGMEXEC
63543 + if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
63544 + struct vm_area_struct *mpnt_m;
63545 +
63546 + for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
63547 + BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
63548 +
63549 + if (!mpnt->vm_mirror)
63550 + continue;
63551 +
63552 + if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
63553 + BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
63554 + mpnt->vm_mirror = mpnt_m;
63555 + } else {
63556 + BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
63557 + mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
63558 + mpnt_m->vm_mirror->vm_mirror = mpnt_m;
63559 + mpnt->vm_mirror->vm_mirror = mpnt;
63560 + }
63561 + }
63562 + BUG_ON(mpnt_m);
63563 + }
63564 +#endif
63565 +
63566 /* a new mm has just been created */
63567 arch_dup_mmap(oldmm, mm);
63568 retval = 0;
63569 @@ -425,14 +470,6 @@ out:
63570 flush_tlb_mm(oldmm);
63571 up_write(&oldmm->mmap_sem);
63572 return retval;
63573 -fail_nomem_anon_vma_fork:
63574 - mpol_put(pol);
63575 -fail_nomem_policy:
63576 - kmem_cache_free(vm_area_cachep, tmp);
63577 -fail_nomem:
63578 - retval = -ENOMEM;
63579 - vm_unacct_memory(charge);
63580 - goto out;
63581 }
63582
63583 static inline int mm_alloc_pgd(struct mm_struct *mm)
63584 @@ -644,6 +681,26 @@ struct mm_struct *get_task_mm(struct task_struct *task)
63585 }
63586 EXPORT_SYMBOL_GPL(get_task_mm);
63587
63588 +struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
63589 +{
63590 + struct mm_struct *mm;
63591 + int err;
63592 +
63593 + err = mutex_lock_killable(&task->signal->cred_guard_mutex);
63594 + if (err)
63595 + return ERR_PTR(err);
63596 +
63597 + mm = get_task_mm(task);
63598 + if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
63599 + (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
63600 + mmput(mm);
63601 + mm = ERR_PTR(-EACCES);
63602 + }
63603 + mutex_unlock(&task->signal->cred_guard_mutex);
63604 +
63605 + return mm;
63606 +}
63607 +
63608 /* Please note the differences between mmput and mm_release.
63609 * mmput is called whenever we stop holding onto a mm_struct,
63610 * error success whatever.
63611 @@ -829,13 +886,14 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
63612 spin_unlock(&fs->lock);
63613 return -EAGAIN;
63614 }
63615 - fs->users++;
63616 + atomic_inc(&fs->users);
63617 spin_unlock(&fs->lock);
63618 return 0;
63619 }
63620 tsk->fs = copy_fs_struct(fs);
63621 if (!tsk->fs)
63622 return -ENOMEM;
63623 + gr_set_chroot_entries(tsk, &tsk->fs->root);
63624 return 0;
63625 }
63626
63627 @@ -1097,6 +1155,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
63628 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
63629 #endif
63630 retval = -EAGAIN;
63631 +
63632 + gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
63633 +
63634 if (atomic_read(&p->real_cred->user->processes) >=
63635 task_rlimit(p, RLIMIT_NPROC)) {
63636 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
63637 @@ -1256,6 +1317,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
63638 if (clone_flags & CLONE_THREAD)
63639 p->tgid = current->tgid;
63640
63641 + gr_copy_label(p);
63642 +
63643 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
63644 /*
63645 * Clear TID on mm_release()?
63646 @@ -1418,6 +1481,8 @@ bad_fork_cleanup_count:
63647 bad_fork_free:
63648 free_task(p);
63649 fork_out:
63650 + gr_log_forkfail(retval);
63651 +
63652 return ERR_PTR(retval);
63653 }
63654
63655 @@ -1518,6 +1583,8 @@ long do_fork(unsigned long clone_flags,
63656 if (clone_flags & CLONE_PARENT_SETTID)
63657 put_user(nr, parent_tidptr);
63658
63659 + gr_handle_brute_check();
63660 +
63661 if (clone_flags & CLONE_VFORK) {
63662 p->vfork_done = &vfork;
63663 init_completion(&vfork);
63664 @@ -1627,7 +1694,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
63665 return 0;
63666
63667 /* don't need lock here; in the worst case we'll do useless copy */
63668 - if (fs->users == 1)
63669 + if (atomic_read(&fs->users) == 1)
63670 return 0;
63671
63672 *new_fsp = copy_fs_struct(fs);
63673 @@ -1716,7 +1783,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
63674 fs = current->fs;
63675 spin_lock(&fs->lock);
63676 current->fs = new_fs;
63677 - if (--fs->users)
63678 + gr_set_chroot_entries(current, &current->fs->root);
63679 + if (atomic_dec_return(&fs->users))
63680 new_fs = NULL;
63681 else
63682 new_fs = fs;
63683 diff --git a/kernel/futex.c b/kernel/futex.c
63684 index 1614be2..37abc7e 100644
63685 --- a/kernel/futex.c
63686 +++ b/kernel/futex.c
63687 @@ -54,6 +54,7 @@
63688 #include <linux/mount.h>
63689 #include <linux/pagemap.h>
63690 #include <linux/syscalls.h>
63691 +#include <linux/ptrace.h>
63692 #include <linux/signal.h>
63693 #include <linux/export.h>
63694 #include <linux/magic.h>
63695 @@ -238,6 +239,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
63696 struct page *page, *page_head;
63697 int err, ro = 0;
63698
63699 +#ifdef CONFIG_PAX_SEGMEXEC
63700 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
63701 + return -EFAULT;
63702 +#endif
63703 +
63704 /*
63705 * The futex address must be "naturally" aligned.
63706 */
63707 @@ -2459,6 +2465,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
63708 if (!p)
63709 goto err_unlock;
63710 ret = -EPERM;
63711 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63712 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
63713 + goto err_unlock;
63714 +#endif
63715 pcred = __task_cred(p);
63716 /* If victim is in different user_ns, then uids are not
63717 comparable, so we must have CAP_SYS_PTRACE */
63718 @@ -2724,6 +2734,7 @@ static int __init futex_init(void)
63719 {
63720 u32 curval;
63721 int i;
63722 + mm_segment_t oldfs;
63723
63724 /*
63725 * This will fail and we want it. Some arch implementations do
63726 @@ -2735,8 +2746,11 @@ static int __init futex_init(void)
63727 * implementation, the non-functional ones will return
63728 * -ENOSYS.
63729 */
63730 + oldfs = get_fs();
63731 + set_fs(USER_DS);
63732 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
63733 futex_cmpxchg_enabled = 1;
63734 + set_fs(oldfs);
63735
63736 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
63737 plist_head_init(&futex_queues[i].chain);
63738 diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
63739 index 5f9e689..582d46d 100644
63740 --- a/kernel/futex_compat.c
63741 +++ b/kernel/futex_compat.c
63742 @@ -10,6 +10,7 @@
63743 #include <linux/compat.h>
63744 #include <linux/nsproxy.h>
63745 #include <linux/futex.h>
63746 +#include <linux/ptrace.h>
63747
63748 #include <asm/uaccess.h>
63749
63750 @@ -136,7 +137,8 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
63751 {
63752 struct compat_robust_list_head __user *head;
63753 unsigned long ret;
63754 - const struct cred *cred = current_cred(), *pcred;
63755 + const struct cred *cred = current_cred();
63756 + const struct cred *pcred;
63757
63758 if (!futex_cmpxchg_enabled)
63759 return -ENOSYS;
63760 @@ -152,6 +154,10 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
63761 if (!p)
63762 goto err_unlock;
63763 ret = -EPERM;
63764 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63765 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
63766 + goto err_unlock;
63767 +#endif
63768 pcred = __task_cred(p);
63769 /* If victim is in different user_ns, then uids are not
63770 comparable, so we must have CAP_SYS_PTRACE */
63771 diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
63772 index 9b22d03..6295b62 100644
63773 --- a/kernel/gcov/base.c
63774 +++ b/kernel/gcov/base.c
63775 @@ -102,11 +102,6 @@ void gcov_enable_events(void)
63776 }
63777
63778 #ifdef CONFIG_MODULES
63779 -static inline int within(void *addr, void *start, unsigned long size)
63780 -{
63781 - return ((addr >= start) && (addr < start + size));
63782 -}
63783 -
63784 /* Update list and generate events when modules are unloaded. */
63785 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
63786 void *data)
63787 @@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
63788 prev = NULL;
63789 /* Remove entries located in module from linked list. */
63790 for (info = gcov_info_head; info; info = info->next) {
63791 - if (within(info, mod->module_core, mod->core_size)) {
63792 + if (within_module_core_rw((unsigned long)info, mod)) {
63793 if (prev)
63794 prev->next = info->next;
63795 else
63796 diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
63797 index ae34bf5..4e2f3d0 100644
63798 --- a/kernel/hrtimer.c
63799 +++ b/kernel/hrtimer.c
63800 @@ -1393,7 +1393,7 @@ void hrtimer_peek_ahead_timers(void)
63801 local_irq_restore(flags);
63802 }
63803
63804 -static void run_hrtimer_softirq(struct softirq_action *h)
63805 +static void run_hrtimer_softirq(void)
63806 {
63807 hrtimer_peek_ahead_timers();
63808 }
63809 diff --git a/kernel/jump_label.c b/kernel/jump_label.c
63810 index 66ff710..05a5128 100644
63811 --- a/kernel/jump_label.c
63812 +++ b/kernel/jump_label.c
63813 @@ -55,7 +55,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
63814
63815 size = (((unsigned long)stop - (unsigned long)start)
63816 / sizeof(struct jump_entry));
63817 + pax_open_kernel();
63818 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
63819 + pax_close_kernel();
63820 }
63821
63822 static void jump_label_update(struct jump_label_key *key, int enable);
63823 @@ -303,10 +305,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
63824 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
63825 struct jump_entry *iter;
63826
63827 + pax_open_kernel();
63828 for (iter = iter_start; iter < iter_stop; iter++) {
63829 if (within_module_init(iter->code, mod))
63830 iter->code = 0;
63831 }
63832 + pax_close_kernel();
63833 }
63834
63835 static int
63836 diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
63837 index 079f1d3..a407562 100644
63838 --- a/kernel/kallsyms.c
63839 +++ b/kernel/kallsyms.c
63840 @@ -11,6 +11,9 @@
63841 * Changed the compression method from stem compression to "table lookup"
63842 * compression (see scripts/kallsyms.c for a more complete description)
63843 */
63844 +#ifdef CONFIG_GRKERNSEC_HIDESYM
63845 +#define __INCLUDED_BY_HIDESYM 1
63846 +#endif
63847 #include <linux/kallsyms.h>
63848 #include <linux/module.h>
63849 #include <linux/init.h>
63850 @@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
63851
63852 static inline int is_kernel_inittext(unsigned long addr)
63853 {
63854 + if (system_state != SYSTEM_BOOTING)
63855 + return 0;
63856 +
63857 if (addr >= (unsigned long)_sinittext
63858 && addr <= (unsigned long)_einittext)
63859 return 1;
63860 return 0;
63861 }
63862
63863 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63864 +#ifdef CONFIG_MODULES
63865 +static inline int is_module_text(unsigned long addr)
63866 +{
63867 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
63868 + return 1;
63869 +
63870 + addr = ktla_ktva(addr);
63871 + return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
63872 +}
63873 +#else
63874 +static inline int is_module_text(unsigned long addr)
63875 +{
63876 + return 0;
63877 +}
63878 +#endif
63879 +#endif
63880 +
63881 static inline int is_kernel_text(unsigned long addr)
63882 {
63883 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
63884 @@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr)
63885
63886 static inline int is_kernel(unsigned long addr)
63887 {
63888 +
63889 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63890 + if (is_kernel_text(addr) || is_kernel_inittext(addr))
63891 + return 1;
63892 +
63893 + if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
63894 +#else
63895 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
63896 +#endif
63897 +
63898 return 1;
63899 return in_gate_area_no_mm(addr);
63900 }
63901
63902 static int is_ksym_addr(unsigned long addr)
63903 {
63904 +
63905 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63906 + if (is_module_text(addr))
63907 + return 0;
63908 +#endif
63909 +
63910 if (all_var)
63911 return is_kernel(addr);
63912
63913 @@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
63914
63915 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
63916 {
63917 - iter->name[0] = '\0';
63918 iter->nameoff = get_symbol_offset(new_pos);
63919 iter->pos = new_pos;
63920 }
63921 @@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, void *p)
63922 {
63923 struct kallsym_iter *iter = m->private;
63924
63925 +#ifdef CONFIG_GRKERNSEC_HIDESYM
63926 + if (current_uid())
63927 + return 0;
63928 +#endif
63929 +
63930 /* Some debugging symbols have no name. Ignore them. */
63931 if (!iter->name[0])
63932 return 0;
63933 @@ -540,7 +583,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
63934 struct kallsym_iter *iter;
63935 int ret;
63936
63937 - iter = kmalloc(sizeof(*iter), GFP_KERNEL);
63938 + iter = kzalloc(sizeof(*iter), GFP_KERNEL);
63939 if (!iter)
63940 return -ENOMEM;
63941 reset_iter(iter, 0);
63942 diff --git a/kernel/kexec.c b/kernel/kexec.c
63943 index dc7bc08..4601964 100644
63944 --- a/kernel/kexec.c
63945 +++ b/kernel/kexec.c
63946 @@ -1048,7 +1048,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
63947 unsigned long flags)
63948 {
63949 struct compat_kexec_segment in;
63950 - struct kexec_segment out, __user *ksegments;
63951 + struct kexec_segment out;
63952 + struct kexec_segment __user *ksegments;
63953 unsigned long i, result;
63954
63955 /* Don't allow clients that don't understand the native
63956 diff --git a/kernel/kmod.c b/kernel/kmod.c
63957 index a4bea97..7a1ae9a 100644
63958 --- a/kernel/kmod.c
63959 +++ b/kernel/kmod.c
63960 @@ -73,13 +73,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe";
63961 * If module auto-loading support is disabled then this function
63962 * becomes a no-operation.
63963 */
63964 -int __request_module(bool wait, const char *fmt, ...)
63965 +static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
63966 {
63967 - va_list args;
63968 char module_name[MODULE_NAME_LEN];
63969 unsigned int max_modprobes;
63970 int ret;
63971 - char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
63972 + char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
63973 static char *envp[] = { "HOME=/",
63974 "TERM=linux",
63975 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
63976 @@ -88,9 +87,7 @@ int __request_module(bool wait, const char *fmt, ...)
63977 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
63978 static int kmod_loop_msg;
63979
63980 - va_start(args, fmt);
63981 - ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
63982 - va_end(args);
63983 + ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
63984 if (ret >= MODULE_NAME_LEN)
63985 return -ENAMETOOLONG;
63986
63987 @@ -98,6 +95,20 @@ int __request_module(bool wait, const char *fmt, ...)
63988 if (ret)
63989 return ret;
63990
63991 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
63992 + if (!current_uid()) {
63993 + /* hack to workaround consolekit/udisks stupidity */
63994 + read_lock(&tasklist_lock);
63995 + if (!strcmp(current->comm, "mount") &&
63996 + current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
63997 + read_unlock(&tasklist_lock);
63998 + printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
63999 + return -EPERM;
64000 + }
64001 + read_unlock(&tasklist_lock);
64002 + }
64003 +#endif
64004 +
64005 /* If modprobe needs a service that is in a module, we get a recursive
64006 * loop. Limit the number of running kmod threads to max_threads/2 or
64007 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
64008 @@ -133,6 +144,47 @@ int __request_module(bool wait, const char *fmt, ...)
64009 atomic_dec(&kmod_concurrent);
64010 return ret;
64011 }
64012 +
64013 +int ___request_module(bool wait, char *module_param, const char *fmt, ...)
64014 +{
64015 + va_list args;
64016 + int ret;
64017 +
64018 + va_start(args, fmt);
64019 + ret = ____request_module(wait, module_param, fmt, args);
64020 + va_end(args);
64021 +
64022 + return ret;
64023 +}
64024 +
64025 +int __request_module(bool wait, const char *fmt, ...)
64026 +{
64027 + va_list args;
64028 + int ret;
64029 +
64030 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
64031 + if (current_uid()) {
64032 + char module_param[MODULE_NAME_LEN];
64033 +
64034 + memset(module_param, 0, sizeof(module_param));
64035 +
64036 + snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
64037 +
64038 + va_start(args, fmt);
64039 + ret = ____request_module(wait, module_param, fmt, args);
64040 + va_end(args);
64041 +
64042 + return ret;
64043 + }
64044 +#endif
64045 +
64046 + va_start(args, fmt);
64047 + ret = ____request_module(wait, NULL, fmt, args);
64048 + va_end(args);
64049 +
64050 + return ret;
64051 +}
64052 +
64053 EXPORT_SYMBOL(__request_module);
64054 #endif /* CONFIG_MODULES */
64055
64056 @@ -222,7 +274,7 @@ static int wait_for_helper(void *data)
64057 *
64058 * Thus the __user pointer cast is valid here.
64059 */
64060 - sys_wait4(pid, (int __user *)&ret, 0, NULL);
64061 + sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
64062
64063 /*
64064 * If ret is 0, either ____call_usermodehelper failed and the
64065 diff --git a/kernel/kprobes.c b/kernel/kprobes.c
64066 index faa39d1..d7ad37e 100644
64067 --- a/kernel/kprobes.c
64068 +++ b/kernel/kprobes.c
64069 @@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
64070 * kernel image and loaded module images reside. This is required
64071 * so x86_64 can correctly handle the %rip-relative fixups.
64072 */
64073 - kip->insns = module_alloc(PAGE_SIZE);
64074 + kip->insns = module_alloc_exec(PAGE_SIZE);
64075 if (!kip->insns) {
64076 kfree(kip);
64077 return NULL;
64078 @@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
64079 */
64080 if (!list_is_singular(&kip->list)) {
64081 list_del(&kip->list);
64082 - module_free(NULL, kip->insns);
64083 + module_free_exec(NULL, kip->insns);
64084 kfree(kip);
64085 }
64086 return 1;
64087 @@ -1953,7 +1953,7 @@ static int __init init_kprobes(void)
64088 {
64089 int i, err = 0;
64090 unsigned long offset = 0, size = 0;
64091 - char *modname, namebuf[128];
64092 + char *modname, namebuf[KSYM_NAME_LEN];
64093 const char *symbol_name;
64094 void *addr;
64095 struct kprobe_blackpoint *kb;
64096 @@ -2079,7 +2079,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
64097 const char *sym = NULL;
64098 unsigned int i = *(loff_t *) v;
64099 unsigned long offset = 0;
64100 - char *modname, namebuf[128];
64101 + char *modname, namebuf[KSYM_NAME_LEN];
64102
64103 head = &kprobe_table[i];
64104 preempt_disable();
64105 diff --git a/kernel/lockdep.c b/kernel/lockdep.c
64106 index b2e08c9..01d8049 100644
64107 --- a/kernel/lockdep.c
64108 +++ b/kernel/lockdep.c
64109 @@ -592,6 +592,10 @@ static int static_obj(void *obj)
64110 end = (unsigned long) &_end,
64111 addr = (unsigned long) obj;
64112
64113 +#ifdef CONFIG_PAX_KERNEXEC
64114 + start = ktla_ktva(start);
64115 +#endif
64116 +
64117 /*
64118 * static variable?
64119 */
64120 @@ -731,6 +735,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
64121 if (!static_obj(lock->key)) {
64122 debug_locks_off();
64123 printk("INFO: trying to register non-static key.\n");
64124 + printk("lock:%pS key:%pS.\n", lock, lock->key);
64125 printk("the code is fine but needs lockdep annotation.\n");
64126 printk("turning off the locking correctness validator.\n");
64127 dump_stack();
64128 @@ -3042,7 +3047,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
64129 if (!class)
64130 return 0;
64131 }
64132 - atomic_inc((atomic_t *)&class->ops);
64133 + atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
64134 if (very_verbose(class)) {
64135 printk("\nacquire class [%p] %s", class->key, class->name);
64136 if (class->name_version > 1)
64137 diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
64138 index 91c32a0..b2c71c5 100644
64139 --- a/kernel/lockdep_proc.c
64140 +++ b/kernel/lockdep_proc.c
64141 @@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v)
64142
64143 static void print_name(struct seq_file *m, struct lock_class *class)
64144 {
64145 - char str[128];
64146 + char str[KSYM_NAME_LEN];
64147 const char *name = class->name;
64148
64149 if (!name) {
64150 diff --git a/kernel/module.c b/kernel/module.c
64151 index 178333c..04e3408 100644
64152 --- a/kernel/module.c
64153 +++ b/kernel/module.c
64154 @@ -58,6 +58,7 @@
64155 #include <linux/jump_label.h>
64156 #include <linux/pfn.h>
64157 #include <linux/bsearch.h>
64158 +#include <linux/grsecurity.h>
64159
64160 #define CREATE_TRACE_POINTS
64161 #include <trace/events/module.h>
64162 @@ -119,7 +120,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
64163
64164 /* Bounds of module allocation, for speeding __module_address.
64165 * Protected by module_mutex. */
64166 -static unsigned long module_addr_min = -1UL, module_addr_max = 0;
64167 +static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
64168 +static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
64169
64170 int register_module_notifier(struct notifier_block * nb)
64171 {
64172 @@ -284,7 +286,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
64173 return true;
64174
64175 list_for_each_entry_rcu(mod, &modules, list) {
64176 - struct symsearch arr[] = {
64177 + struct symsearch modarr[] = {
64178 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
64179 NOT_GPL_ONLY, false },
64180 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
64181 @@ -306,7 +308,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
64182 #endif
64183 };
64184
64185 - if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
64186 + if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
64187 return true;
64188 }
64189 return false;
64190 @@ -438,7 +440,7 @@ static inline void __percpu *mod_percpu(struct module *mod)
64191 static int percpu_modalloc(struct module *mod,
64192 unsigned long size, unsigned long align)
64193 {
64194 - if (align > PAGE_SIZE) {
64195 + if (align-1 >= PAGE_SIZE) {
64196 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
64197 mod->name, align, PAGE_SIZE);
64198 align = PAGE_SIZE;
64199 @@ -1183,7 +1185,7 @@ resolve_symbol_wait(struct module *mod,
64200 */
64201 #ifdef CONFIG_SYSFS
64202
64203 -#ifdef CONFIG_KALLSYMS
64204 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
64205 static inline bool sect_empty(const Elf_Shdr *sect)
64206 {
64207 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
64208 @@ -1649,21 +1651,21 @@ static void set_section_ro_nx(void *base,
64209
64210 static void unset_module_core_ro_nx(struct module *mod)
64211 {
64212 - set_page_attributes(mod->module_core + mod->core_text_size,
64213 - mod->module_core + mod->core_size,
64214 + set_page_attributes(mod->module_core_rw,
64215 + mod->module_core_rw + mod->core_size_rw,
64216 set_memory_x);
64217 - set_page_attributes(mod->module_core,
64218 - mod->module_core + mod->core_ro_size,
64219 + set_page_attributes(mod->module_core_rx,
64220 + mod->module_core_rx + mod->core_size_rx,
64221 set_memory_rw);
64222 }
64223
64224 static void unset_module_init_ro_nx(struct module *mod)
64225 {
64226 - set_page_attributes(mod->module_init + mod->init_text_size,
64227 - mod->module_init + mod->init_size,
64228 + set_page_attributes(mod->module_init_rw,
64229 + mod->module_init_rw + mod->init_size_rw,
64230 set_memory_x);
64231 - set_page_attributes(mod->module_init,
64232 - mod->module_init + mod->init_ro_size,
64233 + set_page_attributes(mod->module_init_rx,
64234 + mod->module_init_rx + mod->init_size_rx,
64235 set_memory_rw);
64236 }
64237
64238 @@ -1674,14 +1676,14 @@ void set_all_modules_text_rw(void)
64239
64240 mutex_lock(&module_mutex);
64241 list_for_each_entry_rcu(mod, &modules, list) {
64242 - if ((mod->module_core) && (mod->core_text_size)) {
64243 - set_page_attributes(mod->module_core,
64244 - mod->module_core + mod->core_text_size,
64245 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
64246 + set_page_attributes(mod->module_core_rx,
64247 + mod->module_core_rx + mod->core_size_rx,
64248 set_memory_rw);
64249 }
64250 - if ((mod->module_init) && (mod->init_text_size)) {
64251 - set_page_attributes(mod->module_init,
64252 - mod->module_init + mod->init_text_size,
64253 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
64254 + set_page_attributes(mod->module_init_rx,
64255 + mod->module_init_rx + mod->init_size_rx,
64256 set_memory_rw);
64257 }
64258 }
64259 @@ -1695,14 +1697,14 @@ void set_all_modules_text_ro(void)
64260
64261 mutex_lock(&module_mutex);
64262 list_for_each_entry_rcu(mod, &modules, list) {
64263 - if ((mod->module_core) && (mod->core_text_size)) {
64264 - set_page_attributes(mod->module_core,
64265 - mod->module_core + mod->core_text_size,
64266 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
64267 + set_page_attributes(mod->module_core_rx,
64268 + mod->module_core_rx + mod->core_size_rx,
64269 set_memory_ro);
64270 }
64271 - if ((mod->module_init) && (mod->init_text_size)) {
64272 - set_page_attributes(mod->module_init,
64273 - mod->module_init + mod->init_text_size,
64274 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
64275 + set_page_attributes(mod->module_init_rx,
64276 + mod->module_init_rx + mod->init_size_rx,
64277 set_memory_ro);
64278 }
64279 }
64280 @@ -1748,16 +1750,19 @@ static void free_module(struct module *mod)
64281
64282 /* This may be NULL, but that's OK */
64283 unset_module_init_ro_nx(mod);
64284 - module_free(mod, mod->module_init);
64285 + module_free(mod, mod->module_init_rw);
64286 + module_free_exec(mod, mod->module_init_rx);
64287 kfree(mod->args);
64288 percpu_modfree(mod);
64289
64290 /* Free lock-classes: */
64291 - lockdep_free_key_range(mod->module_core, mod->core_size);
64292 + lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
64293 + lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
64294
64295 /* Finally, free the core (containing the module structure) */
64296 unset_module_core_ro_nx(mod);
64297 - module_free(mod, mod->module_core);
64298 + module_free_exec(mod, mod->module_core_rx);
64299 + module_free(mod, mod->module_core_rw);
64300
64301 #ifdef CONFIG_MPU
64302 update_protections(current->mm);
64303 @@ -1826,10 +1831,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
64304 unsigned int i;
64305 int ret = 0;
64306 const struct kernel_symbol *ksym;
64307 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
64308 + int is_fs_load = 0;
64309 + int register_filesystem_found = 0;
64310 + char *p;
64311 +
64312 + p = strstr(mod->args, "grsec_modharden_fs");
64313 + if (p) {
64314 + char *endptr = p + strlen("grsec_modharden_fs");
64315 + /* copy \0 as well */
64316 + memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
64317 + is_fs_load = 1;
64318 + }
64319 +#endif
64320
64321 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
64322 const char *name = info->strtab + sym[i].st_name;
64323
64324 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
64325 + /* it's a real shame this will never get ripped and copied
64326 + upstream! ;(
64327 + */
64328 + if (is_fs_load && !strcmp(name, "register_filesystem"))
64329 + register_filesystem_found = 1;
64330 +#endif
64331 +
64332 switch (sym[i].st_shndx) {
64333 case SHN_COMMON:
64334 /* We compiled with -fno-common. These are not
64335 @@ -1850,7 +1876,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
64336 ksym = resolve_symbol_wait(mod, info, name);
64337 /* Ok if resolved. */
64338 if (ksym && !IS_ERR(ksym)) {
64339 + pax_open_kernel();
64340 sym[i].st_value = ksym->value;
64341 + pax_close_kernel();
64342 break;
64343 }
64344
64345 @@ -1869,11 +1897,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
64346 secbase = (unsigned long)mod_percpu(mod);
64347 else
64348 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
64349 + pax_open_kernel();
64350 sym[i].st_value += secbase;
64351 + pax_close_kernel();
64352 break;
64353 }
64354 }
64355
64356 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
64357 + if (is_fs_load && !register_filesystem_found) {
64358 + printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
64359 + ret = -EPERM;
64360 + }
64361 +#endif
64362 +
64363 return ret;
64364 }
64365
64366 @@ -1977,22 +2014,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
64367 || s->sh_entsize != ~0UL
64368 || strstarts(sname, ".init"))
64369 continue;
64370 - s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
64371 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
64372 + s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
64373 + else
64374 + s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
64375 DEBUGP("\t%s\n", name);
64376 }
64377 - switch (m) {
64378 - case 0: /* executable */
64379 - mod->core_size = debug_align(mod->core_size);
64380 - mod->core_text_size = mod->core_size;
64381 - break;
64382 - case 1: /* RO: text and ro-data */
64383 - mod->core_size = debug_align(mod->core_size);
64384 - mod->core_ro_size = mod->core_size;
64385 - break;
64386 - case 3: /* whole core */
64387 - mod->core_size = debug_align(mod->core_size);
64388 - break;
64389 - }
64390 }
64391
64392 DEBUGP("Init section allocation order:\n");
64393 @@ -2006,23 +2033,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
64394 || s->sh_entsize != ~0UL
64395 || !strstarts(sname, ".init"))
64396 continue;
64397 - s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
64398 - | INIT_OFFSET_MASK);
64399 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
64400 + s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
64401 + else
64402 + s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
64403 + s->sh_entsize |= INIT_OFFSET_MASK;
64404 DEBUGP("\t%s\n", sname);
64405 }
64406 - switch (m) {
64407 - case 0: /* executable */
64408 - mod->init_size = debug_align(mod->init_size);
64409 - mod->init_text_size = mod->init_size;
64410 - break;
64411 - case 1: /* RO: text and ro-data */
64412 - mod->init_size = debug_align(mod->init_size);
64413 - mod->init_ro_size = mod->init_size;
64414 - break;
64415 - case 3: /* whole init */
64416 - mod->init_size = debug_align(mod->init_size);
64417 - break;
64418 - }
64419 }
64420 }
64421
64422 @@ -2187,7 +2204,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
64423
64424 /* Put symbol section at end of init part of module. */
64425 symsect->sh_flags |= SHF_ALLOC;
64426 - symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
64427 + symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
64428 info->index.sym) | INIT_OFFSET_MASK;
64429 DEBUGP("\t%s\n", info->secstrings + symsect->sh_name);
64430
64431 @@ -2204,19 +2221,19 @@ static void layout_symtab(struct module *mod, struct load_info *info)
64432 }
64433
64434 /* Append room for core symbols at end of core part. */
64435 - info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
64436 - mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
64437 + info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
64438 + mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
64439
64440 /* Put string table section at end of init part of module. */
64441 strsect->sh_flags |= SHF_ALLOC;
64442 - strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
64443 + strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
64444 info->index.str) | INIT_OFFSET_MASK;
64445 DEBUGP("\t%s\n", info->secstrings + strsect->sh_name);
64446
64447 /* Append room for core symbols' strings at end of core part. */
64448 - info->stroffs = mod->core_size;
64449 + info->stroffs = mod->core_size_rx;
64450 __set_bit(0, info->strmap);
64451 - mod->core_size += bitmap_weight(info->strmap, strsect->sh_size);
64452 + mod->core_size_rx += bitmap_weight(info->strmap, strsect->sh_size);
64453 }
64454
64455 static void add_kallsyms(struct module *mod, const struct load_info *info)
64456 @@ -2232,11 +2249,13 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
64457 /* Make sure we get permanent strtab: don't use info->strtab. */
64458 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
64459
64460 + pax_open_kernel();
64461 +
64462 /* Set types up while we still have access to sections. */
64463 for (i = 0; i < mod->num_symtab; i++)
64464 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
64465
64466 - mod->core_symtab = dst = mod->module_core + info->symoffs;
64467 + mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
64468 src = mod->symtab;
64469 *dst = *src;
64470 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
64471 @@ -2249,10 +2268,12 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
64472 }
64473 mod->core_num_syms = ndst;
64474
64475 - mod->core_strtab = s = mod->module_core + info->stroffs;
64476 + mod->core_strtab = s = mod->module_core_rx + info->stroffs;
64477 for (*s = 0, i = 1; i < info->sechdrs[info->index.str].sh_size; ++i)
64478 if (test_bit(i, info->strmap))
64479 *++s = mod->strtab[i];
64480 +
64481 + pax_close_kernel();
64482 }
64483 #else
64484 static inline void layout_symtab(struct module *mod, struct load_info *info)
64485 @@ -2286,17 +2307,33 @@ void * __weak module_alloc(unsigned long size)
64486 return size == 0 ? NULL : vmalloc_exec(size);
64487 }
64488
64489 -static void *module_alloc_update_bounds(unsigned long size)
64490 +static void *module_alloc_update_bounds_rw(unsigned long size)
64491 {
64492 void *ret = module_alloc(size);
64493
64494 if (ret) {
64495 mutex_lock(&module_mutex);
64496 /* Update module bounds. */
64497 - if ((unsigned long)ret < module_addr_min)
64498 - module_addr_min = (unsigned long)ret;
64499 - if ((unsigned long)ret + size > module_addr_max)
64500 - module_addr_max = (unsigned long)ret + size;
64501 + if ((unsigned long)ret < module_addr_min_rw)
64502 + module_addr_min_rw = (unsigned long)ret;
64503 + if ((unsigned long)ret + size > module_addr_max_rw)
64504 + module_addr_max_rw = (unsigned long)ret + size;
64505 + mutex_unlock(&module_mutex);
64506 + }
64507 + return ret;
64508 +}
64509 +
64510 +static void *module_alloc_update_bounds_rx(unsigned long size)
64511 +{
64512 + void *ret = module_alloc_exec(size);
64513 +
64514 + if (ret) {
64515 + mutex_lock(&module_mutex);
64516 + /* Update module bounds. */
64517 + if ((unsigned long)ret < module_addr_min_rx)
64518 + module_addr_min_rx = (unsigned long)ret;
64519 + if ((unsigned long)ret + size > module_addr_max_rx)
64520 + module_addr_max_rx = (unsigned long)ret + size;
64521 mutex_unlock(&module_mutex);
64522 }
64523 return ret;
64524 @@ -2474,8 +2511,14 @@ static struct module *setup_load_info(struct load_info *info)
64525 static int check_modinfo(struct module *mod, struct load_info *info)
64526 {
64527 const char *modmagic = get_modinfo(info, "vermagic");
64528 + const char *license = get_modinfo(info, "license");
64529 int err;
64530
64531 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
64532 + if (!license || !license_is_gpl_compatible(license))
64533 + return -ENOEXEC;
64534 +#endif
64535 +
64536 /* This is allowed: modprobe --force will invalidate it. */
64537 if (!modmagic) {
64538 err = try_to_force_load(mod, "bad vermagic");
64539 @@ -2498,7 +2541,7 @@ static int check_modinfo(struct module *mod, struct load_info *info)
64540 }
64541
64542 /* Set up license info based on the info section */
64543 - set_license(mod, get_modinfo(info, "license"));
64544 + set_license(mod, license);
64545
64546 return 0;
64547 }
64548 @@ -2592,7 +2635,7 @@ static int move_module(struct module *mod, struct load_info *info)
64549 void *ptr;
64550
64551 /* Do the allocs. */
64552 - ptr = module_alloc_update_bounds(mod->core_size);
64553 + ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
64554 /*
64555 * The pointer to this block is stored in the module structure
64556 * which is inside the block. Just mark it as not being a
64557 @@ -2602,23 +2645,50 @@ static int move_module(struct module *mod, struct load_info *info)
64558 if (!ptr)
64559 return -ENOMEM;
64560
64561 - memset(ptr, 0, mod->core_size);
64562 - mod->module_core = ptr;
64563 + memset(ptr, 0, mod->core_size_rw);
64564 + mod->module_core_rw = ptr;
64565
64566 - ptr = module_alloc_update_bounds(mod->init_size);
64567 + ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
64568 /*
64569 * The pointer to this block is stored in the module structure
64570 * which is inside the block. This block doesn't need to be
64571 * scanned as it contains data and code that will be freed
64572 * after the module is initialized.
64573 */
64574 - kmemleak_ignore(ptr);
64575 - if (!ptr && mod->init_size) {
64576 - module_free(mod, mod->module_core);
64577 + kmemleak_not_leak(ptr);
64578 + if (!ptr && mod->init_size_rw) {
64579 + module_free(mod, mod->module_core_rw);
64580 return -ENOMEM;
64581 }
64582 - memset(ptr, 0, mod->init_size);
64583 - mod->module_init = ptr;
64584 + memset(ptr, 0, mod->init_size_rw);
64585 + mod->module_init_rw = ptr;
64586 +
64587 + ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
64588 + kmemleak_not_leak(ptr);
64589 + if (!ptr) {
64590 + module_free(mod, mod->module_init_rw);
64591 + module_free(mod, mod->module_core_rw);
64592 + return -ENOMEM;
64593 + }
64594 +
64595 + pax_open_kernel();
64596 + memset(ptr, 0, mod->core_size_rx);
64597 + pax_close_kernel();
64598 + mod->module_core_rx = ptr;
64599 +
64600 + ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
64601 + kmemleak_not_leak(ptr);
64602 + if (!ptr && mod->init_size_rx) {
64603 + module_free_exec(mod, mod->module_core_rx);
64604 + module_free(mod, mod->module_init_rw);
64605 + module_free(mod, mod->module_core_rw);
64606 + return -ENOMEM;
64607 + }
64608 +
64609 + pax_open_kernel();
64610 + memset(ptr, 0, mod->init_size_rx);
64611 + pax_close_kernel();
64612 + mod->module_init_rx = ptr;
64613
64614 /* Transfer each section which specifies SHF_ALLOC */
64615 DEBUGP("final section addresses:\n");
64616 @@ -2629,16 +2699,45 @@ static int move_module(struct module *mod, struct load_info *info)
64617 if (!(shdr->sh_flags & SHF_ALLOC))
64618 continue;
64619
64620 - if (shdr->sh_entsize & INIT_OFFSET_MASK)
64621 - dest = mod->module_init
64622 - + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
64623 - else
64624 - dest = mod->module_core + shdr->sh_entsize;
64625 + if (shdr->sh_entsize & INIT_OFFSET_MASK) {
64626 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
64627 + dest = mod->module_init_rw
64628 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
64629 + else
64630 + dest = mod->module_init_rx
64631 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
64632 + } else {
64633 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
64634 + dest = mod->module_core_rw + shdr->sh_entsize;
64635 + else
64636 + dest = mod->module_core_rx + shdr->sh_entsize;
64637 + }
64638 +
64639 + if (shdr->sh_type != SHT_NOBITS) {
64640 +
64641 +#ifdef CONFIG_PAX_KERNEXEC
64642 +#ifdef CONFIG_X86_64
64643 + if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
64644 + set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
64645 +#endif
64646 + if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
64647 + pax_open_kernel();
64648 + memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
64649 + pax_close_kernel();
64650 + } else
64651 +#endif
64652
64653 - if (shdr->sh_type != SHT_NOBITS)
64654 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
64655 + }
64656 /* Update sh_addr to point to copy in image. */
64657 - shdr->sh_addr = (unsigned long)dest;
64658 +
64659 +#ifdef CONFIG_PAX_KERNEXEC
64660 + if (shdr->sh_flags & SHF_EXECINSTR)
64661 + shdr->sh_addr = ktva_ktla((unsigned long)dest);
64662 + else
64663 +#endif
64664 +
64665 + shdr->sh_addr = (unsigned long)dest;
64666 DEBUGP("\t0x%lx %s\n",
64667 shdr->sh_addr, info->secstrings + shdr->sh_name);
64668 }
64669 @@ -2689,12 +2788,12 @@ static void flush_module_icache(const struct module *mod)
64670 * Do it before processing of module parameters, so the module
64671 * can provide parameter accessor functions of its own.
64672 */
64673 - if (mod->module_init)
64674 - flush_icache_range((unsigned long)mod->module_init,
64675 - (unsigned long)mod->module_init
64676 - + mod->init_size);
64677 - flush_icache_range((unsigned long)mod->module_core,
64678 - (unsigned long)mod->module_core + mod->core_size);
64679 + if (mod->module_init_rx)
64680 + flush_icache_range((unsigned long)mod->module_init_rx,
64681 + (unsigned long)mod->module_init_rx
64682 + + mod->init_size_rx);
64683 + flush_icache_range((unsigned long)mod->module_core_rx,
64684 + (unsigned long)mod->module_core_rx + mod->core_size_rx);
64685
64686 set_fs(old_fs);
64687 }
64688 @@ -2774,8 +2873,10 @@ static void module_deallocate(struct module *mod, struct load_info *info)
64689 {
64690 kfree(info->strmap);
64691 percpu_modfree(mod);
64692 - module_free(mod, mod->module_init);
64693 - module_free(mod, mod->module_core);
64694 + module_free_exec(mod, mod->module_init_rx);
64695 + module_free_exec(mod, mod->module_core_rx);
64696 + module_free(mod, mod->module_init_rw);
64697 + module_free(mod, mod->module_core_rw);
64698 }
64699
64700 int __weak module_finalize(const Elf_Ehdr *hdr,
64701 @@ -2839,9 +2940,38 @@ static struct module *load_module(void __user *umod,
64702 if (err)
64703 goto free_unload;
64704
64705 + /* Now copy in args */
64706 + mod->args = strndup_user(uargs, ~0UL >> 1);
64707 + if (IS_ERR(mod->args)) {
64708 + err = PTR_ERR(mod->args);
64709 + goto free_unload;
64710 + }
64711 +
64712 /* Set up MODINFO_ATTR fields */
64713 setup_modinfo(mod, &info);
64714
64715 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
64716 + {
64717 + char *p, *p2;
64718 +
64719 + if (strstr(mod->args, "grsec_modharden_netdev")) {
64720 + printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
64721 + err = -EPERM;
64722 + goto free_modinfo;
64723 + } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
64724 + p += strlen("grsec_modharden_normal");
64725 + p2 = strstr(p, "_");
64726 + if (p2) {
64727 + *p2 = '\0';
64728 + printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
64729 + *p2 = '_';
64730 + }
64731 + err = -EPERM;
64732 + goto free_modinfo;
64733 + }
64734 + }
64735 +#endif
64736 +
64737 /* Fix up syms, so that st_value is a pointer to location. */
64738 err = simplify_symbols(mod, &info);
64739 if (err < 0)
64740 @@ -2857,13 +2987,6 @@ static struct module *load_module(void __user *umod,
64741
64742 flush_module_icache(mod);
64743
64744 - /* Now copy in args */
64745 - mod->args = strndup_user(uargs, ~0UL >> 1);
64746 - if (IS_ERR(mod->args)) {
64747 - err = PTR_ERR(mod->args);
64748 - goto free_arch_cleanup;
64749 - }
64750 -
64751 /* Mark state as coming so strong_try_module_get() ignores us. */
64752 mod->state = MODULE_STATE_COMING;
64753
64754 @@ -2921,11 +3044,10 @@ static struct module *load_module(void __user *umod,
64755 unlock:
64756 mutex_unlock(&module_mutex);
64757 synchronize_sched();
64758 - kfree(mod->args);
64759 - free_arch_cleanup:
64760 module_arch_cleanup(mod);
64761 free_modinfo:
64762 free_modinfo(mod);
64763 + kfree(mod->args);
64764 free_unload:
64765 module_unload_free(mod);
64766 free_module:
64767 @@ -2966,16 +3088,16 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
64768 MODULE_STATE_COMING, mod);
64769
64770 /* Set RO and NX regions for core */
64771 - set_section_ro_nx(mod->module_core,
64772 - mod->core_text_size,
64773 - mod->core_ro_size,
64774 - mod->core_size);
64775 + set_section_ro_nx(mod->module_core_rx,
64776 + mod->core_size_rx,
64777 + mod->core_size_rx,
64778 + mod->core_size_rx);
64779
64780 /* Set RO and NX regions for init */
64781 - set_section_ro_nx(mod->module_init,
64782 - mod->init_text_size,
64783 - mod->init_ro_size,
64784 - mod->init_size);
64785 + set_section_ro_nx(mod->module_init_rx,
64786 + mod->init_size_rx,
64787 + mod->init_size_rx,
64788 + mod->init_size_rx);
64789
64790 do_mod_ctors(mod);
64791 /* Start the module */
64792 @@ -3021,11 +3143,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
64793 mod->strtab = mod->core_strtab;
64794 #endif
64795 unset_module_init_ro_nx(mod);
64796 - module_free(mod, mod->module_init);
64797 - mod->module_init = NULL;
64798 - mod->init_size = 0;
64799 - mod->init_ro_size = 0;
64800 - mod->init_text_size = 0;
64801 + module_free(mod, mod->module_init_rw);
64802 + module_free_exec(mod, mod->module_init_rx);
64803 + mod->module_init_rw = NULL;
64804 + mod->module_init_rx = NULL;
64805 + mod->init_size_rw = 0;
64806 + mod->init_size_rx = 0;
64807 mutex_unlock(&module_mutex);
64808
64809 return 0;
64810 @@ -3056,10 +3179,16 @@ static const char *get_ksymbol(struct module *mod,
64811 unsigned long nextval;
64812
64813 /* At worse, next value is at end of module */
64814 - if (within_module_init(addr, mod))
64815 - nextval = (unsigned long)mod->module_init+mod->init_text_size;
64816 + if (within_module_init_rx(addr, mod))
64817 + nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
64818 + else if (within_module_init_rw(addr, mod))
64819 + nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
64820 + else if (within_module_core_rx(addr, mod))
64821 + nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
64822 + else if (within_module_core_rw(addr, mod))
64823 + nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
64824 else
64825 - nextval = (unsigned long)mod->module_core+mod->core_text_size;
64826 + return NULL;
64827
64828 /* Scan for closest preceding symbol, and next symbol. (ELF
64829 starts real symbols at 1). */
64830 @@ -3307,7 +3436,7 @@ static int m_show(struct seq_file *m, void *p)
64831 char buf[8];
64832
64833 seq_printf(m, "%s %u",
64834 - mod->name, mod->init_size + mod->core_size);
64835 + mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
64836 print_unload_info(m, mod);
64837
64838 /* Informative for users. */
64839 @@ -3316,7 +3445,7 @@ static int m_show(struct seq_file *m, void *p)
64840 mod->state == MODULE_STATE_COMING ? "Loading":
64841 "Live");
64842 /* Used by oprofile and other similar tools. */
64843 - seq_printf(m, " 0x%pK", mod->module_core);
64844 + seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
64845
64846 /* Taints info */
64847 if (mod->taints)
64848 @@ -3352,7 +3481,17 @@ static const struct file_operations proc_modules_operations = {
64849
64850 static int __init proc_modules_init(void)
64851 {
64852 +#ifndef CONFIG_GRKERNSEC_HIDESYM
64853 +#ifdef CONFIG_GRKERNSEC_PROC_USER
64854 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
64855 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
64856 + proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
64857 +#else
64858 proc_create("modules", 0, NULL, &proc_modules_operations);
64859 +#endif
64860 +#else
64861 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
64862 +#endif
64863 return 0;
64864 }
64865 module_init(proc_modules_init);
64866 @@ -3411,12 +3550,12 @@ struct module *__module_address(unsigned long addr)
64867 {
64868 struct module *mod;
64869
64870 - if (addr < module_addr_min || addr > module_addr_max)
64871 + if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
64872 + (addr < module_addr_min_rw || addr > module_addr_max_rw))
64873 return NULL;
64874
64875 list_for_each_entry_rcu(mod, &modules, list)
64876 - if (within_module_core(addr, mod)
64877 - || within_module_init(addr, mod))
64878 + if (within_module_init(addr, mod) || within_module_core(addr, mod))
64879 return mod;
64880 return NULL;
64881 }
64882 @@ -3450,11 +3589,20 @@ bool is_module_text_address(unsigned long addr)
64883 */
64884 struct module *__module_text_address(unsigned long addr)
64885 {
64886 - struct module *mod = __module_address(addr);
64887 + struct module *mod;
64888 +
64889 +#ifdef CONFIG_X86_32
64890 + addr = ktla_ktva(addr);
64891 +#endif
64892 +
64893 + if (addr < module_addr_min_rx || addr > module_addr_max_rx)
64894 + return NULL;
64895 +
64896 + mod = __module_address(addr);
64897 +
64898 if (mod) {
64899 /* Make sure it's within the text section. */
64900 - if (!within(addr, mod->module_init, mod->init_text_size)
64901 - && !within(addr, mod->module_core, mod->core_text_size))
64902 + if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
64903 mod = NULL;
64904 }
64905 return mod;
64906 diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
64907 index 7e3443f..b2a1e6b 100644
64908 --- a/kernel/mutex-debug.c
64909 +++ b/kernel/mutex-debug.c
64910 @@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
64911 }
64912
64913 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64914 - struct thread_info *ti)
64915 + struct task_struct *task)
64916 {
64917 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
64918
64919 /* Mark the current thread as blocked on the lock: */
64920 - ti->task->blocked_on = waiter;
64921 + task->blocked_on = waiter;
64922 }
64923
64924 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64925 - struct thread_info *ti)
64926 + struct task_struct *task)
64927 {
64928 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
64929 - DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
64930 - DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
64931 - ti->task->blocked_on = NULL;
64932 + DEBUG_LOCKS_WARN_ON(waiter->task != task);
64933 + DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
64934 + task->blocked_on = NULL;
64935
64936 list_del_init(&waiter->list);
64937 waiter->task = NULL;
64938 diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
64939 index 0799fd3..d06ae3b 100644
64940 --- a/kernel/mutex-debug.h
64941 +++ b/kernel/mutex-debug.h
64942 @@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
64943 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
64944 extern void debug_mutex_add_waiter(struct mutex *lock,
64945 struct mutex_waiter *waiter,
64946 - struct thread_info *ti);
64947 + struct task_struct *task);
64948 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64949 - struct thread_info *ti);
64950 + struct task_struct *task);
64951 extern void debug_mutex_unlock(struct mutex *lock);
64952 extern void debug_mutex_init(struct mutex *lock, const char *name,
64953 struct lock_class_key *key);
64954 diff --git a/kernel/mutex.c b/kernel/mutex.c
64955 index 89096dd..f91ebc5 100644
64956 --- a/kernel/mutex.c
64957 +++ b/kernel/mutex.c
64958 @@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
64959 spin_lock_mutex(&lock->wait_lock, flags);
64960
64961 debug_mutex_lock_common(lock, &waiter);
64962 - debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
64963 + debug_mutex_add_waiter(lock, &waiter, task);
64964
64965 /* add waiting tasks to the end of the waitqueue (FIFO): */
64966 list_add_tail(&waiter.list, &lock->wait_list);
64967 @@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
64968 * TASK_UNINTERRUPTIBLE case.)
64969 */
64970 if (unlikely(signal_pending_state(state, task))) {
64971 - mutex_remove_waiter(lock, &waiter,
64972 - task_thread_info(task));
64973 + mutex_remove_waiter(lock, &waiter, task);
64974 mutex_release(&lock->dep_map, 1, ip);
64975 spin_unlock_mutex(&lock->wait_lock, flags);
64976
64977 @@ -249,7 +248,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
64978 done:
64979 lock_acquired(&lock->dep_map, ip);
64980 /* got the lock - rejoice! */
64981 - mutex_remove_waiter(lock, &waiter, current_thread_info());
64982 + mutex_remove_waiter(lock, &waiter, task);
64983 mutex_set_owner(lock);
64984
64985 /* set it to 0 if there are no waiters left: */
64986 diff --git a/kernel/padata.c b/kernel/padata.c
64987 index b452599..5d68f4e 100644
64988 --- a/kernel/padata.c
64989 +++ b/kernel/padata.c
64990 @@ -132,10 +132,10 @@ int padata_do_parallel(struct padata_instance *pinst,
64991 padata->pd = pd;
64992 padata->cb_cpu = cb_cpu;
64993
64994 - if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr))
64995 - atomic_set(&pd->seq_nr, -1);
64996 + if (unlikely(atomic_read_unchecked(&pd->seq_nr) == pd->max_seq_nr))
64997 + atomic_set_unchecked(&pd->seq_nr, -1);
64998
64999 - padata->seq_nr = atomic_inc_return(&pd->seq_nr);
65000 + padata->seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
65001
65002 target_cpu = padata_cpu_hash(padata);
65003 queue = per_cpu_ptr(pd->pqueue, target_cpu);
65004 @@ -444,7 +444,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
65005 padata_init_pqueues(pd);
65006 padata_init_squeues(pd);
65007 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
65008 - atomic_set(&pd->seq_nr, -1);
65009 + atomic_set_unchecked(&pd->seq_nr, -1);
65010 atomic_set(&pd->reorder_objects, 0);
65011 atomic_set(&pd->refcnt, 0);
65012 pd->pinst = pinst;
65013 diff --git a/kernel/panic.c b/kernel/panic.c
65014 index 3458469..342c500 100644
65015 --- a/kernel/panic.c
65016 +++ b/kernel/panic.c
65017 @@ -78,7 +78,11 @@ NORET_TYPE void panic(const char * fmt, ...)
65018 va_end(args);
65019 printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf);
65020 #ifdef CONFIG_DEBUG_BUGVERBOSE
65021 - dump_stack();
65022 + /*
65023 + * Avoid nested stack-dumping if a panic occurs during oops processing
65024 + */
65025 + if (!oops_in_progress)
65026 + dump_stack();
65027 #endif
65028
65029 /*
65030 @@ -382,7 +386,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
65031 const char *board;
65032
65033 printk(KERN_WARNING "------------[ cut here ]------------\n");
65034 - printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
65035 + printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
65036 board = dmi_get_system_info(DMI_PRODUCT_NAME);
65037 if (board)
65038 printk(KERN_WARNING "Hardware name: %s\n", board);
65039 @@ -437,7 +441,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
65040 */
65041 void __stack_chk_fail(void)
65042 {
65043 - panic("stack-protector: Kernel stack is corrupted in: %p\n",
65044 + dump_stack();
65045 + panic("stack-protector: Kernel stack is corrupted in: %pA\n",
65046 __builtin_return_address(0));
65047 }
65048 EXPORT_SYMBOL(__stack_chk_fail);
65049 diff --git a/kernel/pid.c b/kernel/pid.c
65050 index fa5f722..0c93e57 100644
65051 --- a/kernel/pid.c
65052 +++ b/kernel/pid.c
65053 @@ -33,6 +33,7 @@
65054 #include <linux/rculist.h>
65055 #include <linux/bootmem.h>
65056 #include <linux/hash.h>
65057 +#include <linux/security.h>
65058 #include <linux/pid_namespace.h>
65059 #include <linux/init_task.h>
65060 #include <linux/syscalls.h>
65061 @@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
65062
65063 int pid_max = PID_MAX_DEFAULT;
65064
65065 -#define RESERVED_PIDS 300
65066 +#define RESERVED_PIDS 500
65067
65068 int pid_max_min = RESERVED_PIDS + 1;
65069 int pid_max_max = PID_MAX_LIMIT;
65070 @@ -418,10 +419,18 @@ EXPORT_SYMBOL(pid_task);
65071 */
65072 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
65073 {
65074 + struct task_struct *task;
65075 +
65076 rcu_lockdep_assert(rcu_read_lock_held(),
65077 "find_task_by_pid_ns() needs rcu_read_lock()"
65078 " protection");
65079 - return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
65080 +
65081 + task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
65082 +
65083 + if (gr_pid_is_chrooted(task))
65084 + return NULL;
65085 +
65086 + return task;
65087 }
65088
65089 struct task_struct *find_task_by_vpid(pid_t vnr)
65090 @@ -429,6 +438,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
65091 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
65092 }
65093
65094 +struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
65095 +{
65096 + rcu_lockdep_assert(rcu_read_lock_held(),
65097 + "find_task_by_pid_ns() needs rcu_read_lock()"
65098 + " protection");
65099 + return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
65100 +}
65101 +
65102 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
65103 {
65104 struct pid *pid;
65105 diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
65106 index e7cb76d..75eceb3 100644
65107 --- a/kernel/posix-cpu-timers.c
65108 +++ b/kernel/posix-cpu-timers.c
65109 @@ -6,6 +6,7 @@
65110 #include <linux/posix-timers.h>
65111 #include <linux/errno.h>
65112 #include <linux/math64.h>
65113 +#include <linux/security.h>
65114 #include <asm/uaccess.h>
65115 #include <linux/kernel_stat.h>
65116 #include <trace/events/timer.h>
65117 @@ -1606,14 +1607,14 @@ struct k_clock clock_posix_cpu = {
65118
65119 static __init int init_posix_cpu_timers(void)
65120 {
65121 - struct k_clock process = {
65122 + static struct k_clock process = {
65123 .clock_getres = process_cpu_clock_getres,
65124 .clock_get = process_cpu_clock_get,
65125 .timer_create = process_cpu_timer_create,
65126 .nsleep = process_cpu_nsleep,
65127 .nsleep_restart = process_cpu_nsleep_restart,
65128 };
65129 - struct k_clock thread = {
65130 + static struct k_clock thread = {
65131 .clock_getres = thread_cpu_clock_getres,
65132 .clock_get = thread_cpu_clock_get,
65133 .timer_create = thread_cpu_timer_create,
65134 diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
65135 index 69185ae..cc2847a 100644
65136 --- a/kernel/posix-timers.c
65137 +++ b/kernel/posix-timers.c
65138 @@ -43,6 +43,7 @@
65139 #include <linux/idr.h>
65140 #include <linux/posix-clock.h>
65141 #include <linux/posix-timers.h>
65142 +#include <linux/grsecurity.h>
65143 #include <linux/syscalls.h>
65144 #include <linux/wait.h>
65145 #include <linux/workqueue.h>
65146 @@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
65147 * which we beg off on and pass to do_sys_settimeofday().
65148 */
65149
65150 -static struct k_clock posix_clocks[MAX_CLOCKS];
65151 +static struct k_clock *posix_clocks[MAX_CLOCKS];
65152
65153 /*
65154 * These ones are defined below.
65155 @@ -227,7 +228,7 @@ static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp)
65156 */
65157 static __init int init_posix_timers(void)
65158 {
65159 - struct k_clock clock_realtime = {
65160 + static struct k_clock clock_realtime = {
65161 .clock_getres = hrtimer_get_res,
65162 .clock_get = posix_clock_realtime_get,
65163 .clock_set = posix_clock_realtime_set,
65164 @@ -239,7 +240,7 @@ static __init int init_posix_timers(void)
65165 .timer_get = common_timer_get,
65166 .timer_del = common_timer_del,
65167 };
65168 - struct k_clock clock_monotonic = {
65169 + static struct k_clock clock_monotonic = {
65170 .clock_getres = hrtimer_get_res,
65171 .clock_get = posix_ktime_get_ts,
65172 .nsleep = common_nsleep,
65173 @@ -249,19 +250,19 @@ static __init int init_posix_timers(void)
65174 .timer_get = common_timer_get,
65175 .timer_del = common_timer_del,
65176 };
65177 - struct k_clock clock_monotonic_raw = {
65178 + static struct k_clock clock_monotonic_raw = {
65179 .clock_getres = hrtimer_get_res,
65180 .clock_get = posix_get_monotonic_raw,
65181 };
65182 - struct k_clock clock_realtime_coarse = {
65183 + static struct k_clock clock_realtime_coarse = {
65184 .clock_getres = posix_get_coarse_res,
65185 .clock_get = posix_get_realtime_coarse,
65186 };
65187 - struct k_clock clock_monotonic_coarse = {
65188 + static struct k_clock clock_monotonic_coarse = {
65189 .clock_getres = posix_get_coarse_res,
65190 .clock_get = posix_get_monotonic_coarse,
65191 };
65192 - struct k_clock clock_boottime = {
65193 + static struct k_clock clock_boottime = {
65194 .clock_getres = hrtimer_get_res,
65195 .clock_get = posix_get_boottime,
65196 .nsleep = common_nsleep,
65197 @@ -473,7 +474,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
65198 return;
65199 }
65200
65201 - posix_clocks[clock_id] = *new_clock;
65202 + posix_clocks[clock_id] = new_clock;
65203 }
65204 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
65205
65206 @@ -519,9 +520,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
65207 return (id & CLOCKFD_MASK) == CLOCKFD ?
65208 &clock_posix_dynamic : &clock_posix_cpu;
65209
65210 - if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
65211 + if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
65212 return NULL;
65213 - return &posix_clocks[id];
65214 + return posix_clocks[id];
65215 }
65216
65217 static int common_timer_create(struct k_itimer *new_timer)
65218 @@ -959,6 +960,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
65219 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
65220 return -EFAULT;
65221
65222 + /* only the CLOCK_REALTIME clock can be set, all other clocks
65223 + have their clock_set fptr set to a nosettime dummy function
65224 + CLOCK_REALTIME has a NULL clock_set fptr which causes it to
65225 + call common_clock_set, which calls do_sys_settimeofday, which
65226 + we hook
65227 + */
65228 +
65229 return kc->clock_set(which_clock, &new_tp);
65230 }
65231
65232 diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
65233 index d523593..68197a4 100644
65234 --- a/kernel/power/poweroff.c
65235 +++ b/kernel/power/poweroff.c
65236 @@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_poweroff_op = {
65237 .enable_mask = SYSRQ_ENABLE_BOOT,
65238 };
65239
65240 -static int pm_sysrq_init(void)
65241 +static int __init pm_sysrq_init(void)
65242 {
65243 register_sysrq_key('o', &sysrq_poweroff_op);
65244 return 0;
65245 diff --git a/kernel/power/process.c b/kernel/power/process.c
65246 index 3d4b954..11af930 100644
65247 --- a/kernel/power/process.c
65248 +++ b/kernel/power/process.c
65249 @@ -41,6 +41,7 @@ static int try_to_freeze_tasks(bool sig_only)
65250 u64 elapsed_csecs64;
65251 unsigned int elapsed_csecs;
65252 bool wakeup = false;
65253 + bool timedout = false;
65254
65255 do_gettimeofday(&start);
65256
65257 @@ -51,6 +52,8 @@ static int try_to_freeze_tasks(bool sig_only)
65258
65259 while (true) {
65260 todo = 0;
65261 + if (time_after(jiffies, end_time))
65262 + timedout = true;
65263 read_lock(&tasklist_lock);
65264 do_each_thread(g, p) {
65265 if (frozen(p) || !freezable(p))
65266 @@ -71,9 +74,13 @@ static int try_to_freeze_tasks(bool sig_only)
65267 * try_to_stop() after schedule() in ptrace/signal
65268 * stop sees TIF_FREEZE.
65269 */
65270 - if (!task_is_stopped_or_traced(p) &&
65271 - !freezer_should_skip(p))
65272 + if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
65273 todo++;
65274 + if (timedout) {
65275 + printk(KERN_ERR "Task refusing to freeze:\n");
65276 + sched_show_task(p);
65277 + }
65278 + }
65279 } while_each_thread(g, p);
65280 read_unlock(&tasklist_lock);
65281
65282 @@ -82,7 +89,7 @@ static int try_to_freeze_tasks(bool sig_only)
65283 todo += wq_busy;
65284 }
65285
65286 - if (!todo || time_after(jiffies, end_time))
65287 + if (!todo || timedout)
65288 break;
65289
65290 if (pm_wakeup_pending()) {
65291 diff --git a/kernel/printk.c b/kernel/printk.c
65292 index 7982a0a..2095fdc 100644
65293 --- a/kernel/printk.c
65294 +++ b/kernel/printk.c
65295 @@ -313,6 +313,11 @@ static int check_syslog_permissions(int type, bool from_file)
65296 if (from_file && type != SYSLOG_ACTION_OPEN)
65297 return 0;
65298
65299 +#ifdef CONFIG_GRKERNSEC_DMESG
65300 + if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
65301 + return -EPERM;
65302 +#endif
65303 +
65304 if (syslog_action_restricted(type)) {
65305 if (capable(CAP_SYSLOG))
65306 return 0;
65307 diff --git a/kernel/profile.c b/kernel/profile.c
65308 index 76b8e77..a2930e8 100644
65309 --- a/kernel/profile.c
65310 +++ b/kernel/profile.c
65311 @@ -39,7 +39,7 @@ struct profile_hit {
65312 /* Oprofile timer tick hook */
65313 static int (*timer_hook)(struct pt_regs *) __read_mostly;
65314
65315 -static atomic_t *prof_buffer;
65316 +static atomic_unchecked_t *prof_buffer;
65317 static unsigned long prof_len, prof_shift;
65318
65319 int prof_on __read_mostly;
65320 @@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
65321 hits[i].pc = 0;
65322 continue;
65323 }
65324 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
65325 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
65326 hits[i].hits = hits[i].pc = 0;
65327 }
65328 }
65329 @@ -342,9 +342,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
65330 * Add the current hit(s) and flush the write-queue out
65331 * to the global buffer:
65332 */
65333 - atomic_add(nr_hits, &prof_buffer[pc]);
65334 + atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
65335 for (i = 0; i < NR_PROFILE_HIT; ++i) {
65336 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
65337 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
65338 hits[i].pc = hits[i].hits = 0;
65339 }
65340 out:
65341 @@ -419,7 +419,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
65342 {
65343 unsigned long pc;
65344 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
65345 - atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
65346 + atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
65347 }
65348 #endif /* !CONFIG_SMP */
65349
65350 @@ -517,7 +517,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
65351 return -EFAULT;
65352 buf++; p++; count--; read++;
65353 }
65354 - pnt = (char *)prof_buffer + p - sizeof(atomic_t);
65355 + pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
65356 if (copy_to_user(buf, (void *)pnt, count))
65357 return -EFAULT;
65358 read += count;
65359 @@ -548,7 +548,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
65360 }
65361 #endif
65362 profile_discard_flip_buffers();
65363 - memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
65364 + memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
65365 return count;
65366 }
65367
65368 diff --git a/kernel/ptrace.c b/kernel/ptrace.c
65369 index 78ab24a..332c915 100644
65370 --- a/kernel/ptrace.c
65371 +++ b/kernel/ptrace.c
65372 @@ -172,7 +172,8 @@ int ptrace_check_attach(struct task_struct *child, bool ignore_state)
65373 return ret;
65374 }
65375
65376 -int __ptrace_may_access(struct task_struct *task, unsigned int mode)
65377 +static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
65378 + unsigned int log)
65379 {
65380 const struct cred *cred = current_cred(), *tcred;
65381
65382 @@ -198,7 +199,8 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
65383 cred->gid == tcred->sgid &&
65384 cred->gid == tcred->gid))
65385 goto ok;
65386 - if (ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE))
65387 + if ((!log && ns_capable_nolog(tcred->user->user_ns, CAP_SYS_PTRACE)) ||
65388 + (log && ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE)))
65389 goto ok;
65390 rcu_read_unlock();
65391 return -EPERM;
65392 @@ -207,7 +209,9 @@ ok:
65393 smp_rmb();
65394 if (task->mm)
65395 dumpable = get_dumpable(task->mm);
65396 - if (!dumpable && !task_ns_capable(task, CAP_SYS_PTRACE))
65397 + if (!dumpable &&
65398 + ((!log && !task_ns_capable_nolog(task, CAP_SYS_PTRACE)) ||
65399 + (log && !task_ns_capable(task, CAP_SYS_PTRACE))))
65400 return -EPERM;
65401
65402 return security_ptrace_access_check(task, mode);
65403 @@ -217,7 +221,21 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode)
65404 {
65405 int err;
65406 task_lock(task);
65407 - err = __ptrace_may_access(task, mode);
65408 + err = __ptrace_may_access(task, mode, 0);
65409 + task_unlock(task);
65410 + return !err;
65411 +}
65412 +
65413 +bool ptrace_may_access_nolock(struct task_struct *task, unsigned int mode)
65414 +{
65415 + return __ptrace_may_access(task, mode, 0);
65416 +}
65417 +
65418 +bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
65419 +{
65420 + int err;
65421 + task_lock(task);
65422 + err = __ptrace_may_access(task, mode, 1);
65423 task_unlock(task);
65424 return !err;
65425 }
65426 @@ -262,7 +280,7 @@ static int ptrace_attach(struct task_struct *task, long request,
65427 goto out;
65428
65429 task_lock(task);
65430 - retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
65431 + retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
65432 task_unlock(task);
65433 if (retval)
65434 goto unlock_creds;
65435 @@ -277,7 +295,7 @@ static int ptrace_attach(struct task_struct *task, long request,
65436 task->ptrace = PT_PTRACED;
65437 if (seize)
65438 task->ptrace |= PT_SEIZED;
65439 - if (task_ns_capable(task, CAP_SYS_PTRACE))
65440 + if (task_ns_capable_nolog(task, CAP_SYS_PTRACE))
65441 task->ptrace |= PT_PTRACE_CAP;
65442
65443 __ptrace_link(task, current);
65444 @@ -483,7 +501,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
65445 break;
65446 return -EIO;
65447 }
65448 - if (copy_to_user(dst, buf, retval))
65449 + if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
65450 return -EFAULT;
65451 copied += retval;
65452 src += retval;
65453 @@ -680,7 +698,7 @@ int ptrace_request(struct task_struct *child, long request,
65454 bool seized = child->ptrace & PT_SEIZED;
65455 int ret = -EIO;
65456 siginfo_t siginfo, *si;
65457 - void __user *datavp = (void __user *) data;
65458 + void __user *datavp = (__force void __user *) data;
65459 unsigned long __user *datalp = datavp;
65460 unsigned long flags;
65461
65462 @@ -882,14 +900,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
65463 goto out;
65464 }
65465
65466 + if (gr_handle_ptrace(child, request)) {
65467 + ret = -EPERM;
65468 + goto out_put_task_struct;
65469 + }
65470 +
65471 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
65472 ret = ptrace_attach(child, request, data);
65473 /*
65474 * Some architectures need to do book-keeping after
65475 * a ptrace attach.
65476 */
65477 - if (!ret)
65478 + if (!ret) {
65479 arch_ptrace_attach(child);
65480 + gr_audit_ptrace(child);
65481 + }
65482 goto out_put_task_struct;
65483 }
65484
65485 @@ -915,7 +940,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
65486 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
65487 if (copied != sizeof(tmp))
65488 return -EIO;
65489 - return put_user(tmp, (unsigned long __user *)data);
65490 + return put_user(tmp, (__force unsigned long __user *)data);
65491 }
65492
65493 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
65494 @@ -1025,14 +1050,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
65495 goto out;
65496 }
65497
65498 + if (gr_handle_ptrace(child, request)) {
65499 + ret = -EPERM;
65500 + goto out_put_task_struct;
65501 + }
65502 +
65503 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
65504 ret = ptrace_attach(child, request, data);
65505 /*
65506 * Some architectures need to do book-keeping after
65507 * a ptrace attach.
65508 */
65509 - if (!ret)
65510 + if (!ret) {
65511 arch_ptrace_attach(child);
65512 + gr_audit_ptrace(child);
65513 + }
65514 goto out_put_task_struct;
65515 }
65516
65517 diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
65518 index 764825c..3aa6ac4 100644
65519 --- a/kernel/rcutorture.c
65520 +++ b/kernel/rcutorture.c
65521 @@ -138,12 +138,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
65522 { 0 };
65523 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
65524 { 0 };
65525 -static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
65526 -static atomic_t n_rcu_torture_alloc;
65527 -static atomic_t n_rcu_torture_alloc_fail;
65528 -static atomic_t n_rcu_torture_free;
65529 -static atomic_t n_rcu_torture_mberror;
65530 -static atomic_t n_rcu_torture_error;
65531 +static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
65532 +static atomic_unchecked_t n_rcu_torture_alloc;
65533 +static atomic_unchecked_t n_rcu_torture_alloc_fail;
65534 +static atomic_unchecked_t n_rcu_torture_free;
65535 +static atomic_unchecked_t n_rcu_torture_mberror;
65536 +static atomic_unchecked_t n_rcu_torture_error;
65537 static long n_rcu_torture_boost_ktrerror;
65538 static long n_rcu_torture_boost_rterror;
65539 static long n_rcu_torture_boost_failure;
65540 @@ -223,11 +223,11 @@ rcu_torture_alloc(void)
65541
65542 spin_lock_bh(&rcu_torture_lock);
65543 if (list_empty(&rcu_torture_freelist)) {
65544 - atomic_inc(&n_rcu_torture_alloc_fail);
65545 + atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
65546 spin_unlock_bh(&rcu_torture_lock);
65547 return NULL;
65548 }
65549 - atomic_inc(&n_rcu_torture_alloc);
65550 + atomic_inc_unchecked(&n_rcu_torture_alloc);
65551 p = rcu_torture_freelist.next;
65552 list_del_init(p);
65553 spin_unlock_bh(&rcu_torture_lock);
65554 @@ -240,7 +240,7 @@ rcu_torture_alloc(void)
65555 static void
65556 rcu_torture_free(struct rcu_torture *p)
65557 {
65558 - atomic_inc(&n_rcu_torture_free);
65559 + atomic_inc_unchecked(&n_rcu_torture_free);
65560 spin_lock_bh(&rcu_torture_lock);
65561 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
65562 spin_unlock_bh(&rcu_torture_lock);
65563 @@ -360,7 +360,7 @@ rcu_torture_cb(struct rcu_head *p)
65564 i = rp->rtort_pipe_count;
65565 if (i > RCU_TORTURE_PIPE_LEN)
65566 i = RCU_TORTURE_PIPE_LEN;
65567 - atomic_inc(&rcu_torture_wcount[i]);
65568 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
65569 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
65570 rp->rtort_mbtest = 0;
65571 rcu_torture_free(rp);
65572 @@ -407,7 +407,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
65573 i = rp->rtort_pipe_count;
65574 if (i > RCU_TORTURE_PIPE_LEN)
65575 i = RCU_TORTURE_PIPE_LEN;
65576 - atomic_inc(&rcu_torture_wcount[i]);
65577 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
65578 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
65579 rp->rtort_mbtest = 0;
65580 list_del(&rp->rtort_free);
65581 @@ -872,7 +872,7 @@ rcu_torture_writer(void *arg)
65582 i = old_rp->rtort_pipe_count;
65583 if (i > RCU_TORTURE_PIPE_LEN)
65584 i = RCU_TORTURE_PIPE_LEN;
65585 - atomic_inc(&rcu_torture_wcount[i]);
65586 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
65587 old_rp->rtort_pipe_count++;
65588 cur_ops->deferred_free(old_rp);
65589 }
65590 @@ -940,7 +940,7 @@ static void rcu_torture_timer(unsigned long unused)
65591 return;
65592 }
65593 if (p->rtort_mbtest == 0)
65594 - atomic_inc(&n_rcu_torture_mberror);
65595 + atomic_inc_unchecked(&n_rcu_torture_mberror);
65596 spin_lock(&rand_lock);
65597 cur_ops->read_delay(&rand);
65598 n_rcu_torture_timers++;
65599 @@ -1001,7 +1001,7 @@ rcu_torture_reader(void *arg)
65600 continue;
65601 }
65602 if (p->rtort_mbtest == 0)
65603 - atomic_inc(&n_rcu_torture_mberror);
65604 + atomic_inc_unchecked(&n_rcu_torture_mberror);
65605 cur_ops->read_delay(&rand);
65606 preempt_disable();
65607 pipe_count = p->rtort_pipe_count;
65608 @@ -1060,16 +1060,16 @@ rcu_torture_printk(char *page)
65609 rcu_torture_current,
65610 rcu_torture_current_version,
65611 list_empty(&rcu_torture_freelist),
65612 - atomic_read(&n_rcu_torture_alloc),
65613 - atomic_read(&n_rcu_torture_alloc_fail),
65614 - atomic_read(&n_rcu_torture_free),
65615 - atomic_read(&n_rcu_torture_mberror),
65616 + atomic_read_unchecked(&n_rcu_torture_alloc),
65617 + atomic_read_unchecked(&n_rcu_torture_alloc_fail),
65618 + atomic_read_unchecked(&n_rcu_torture_free),
65619 + atomic_read_unchecked(&n_rcu_torture_mberror),
65620 n_rcu_torture_boost_ktrerror,
65621 n_rcu_torture_boost_rterror,
65622 n_rcu_torture_boost_failure,
65623 n_rcu_torture_boosts,
65624 n_rcu_torture_timers);
65625 - if (atomic_read(&n_rcu_torture_mberror) != 0 ||
65626 + if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
65627 n_rcu_torture_boost_ktrerror != 0 ||
65628 n_rcu_torture_boost_rterror != 0 ||
65629 n_rcu_torture_boost_failure != 0)
65630 @@ -1077,7 +1077,7 @@ rcu_torture_printk(char *page)
65631 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
65632 if (i > 1) {
65633 cnt += sprintf(&page[cnt], "!!! ");
65634 - atomic_inc(&n_rcu_torture_error);
65635 + atomic_inc_unchecked(&n_rcu_torture_error);
65636 WARN_ON_ONCE(1);
65637 }
65638 cnt += sprintf(&page[cnt], "Reader Pipe: ");
65639 @@ -1091,7 +1091,7 @@ rcu_torture_printk(char *page)
65640 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
65641 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
65642 cnt += sprintf(&page[cnt], " %d",
65643 - atomic_read(&rcu_torture_wcount[i]));
65644 + atomic_read_unchecked(&rcu_torture_wcount[i]));
65645 }
65646 cnt += sprintf(&page[cnt], "\n");
65647 if (cur_ops->stats)
65648 @@ -1401,7 +1401,7 @@ rcu_torture_cleanup(void)
65649
65650 if (cur_ops->cleanup)
65651 cur_ops->cleanup();
65652 - if (atomic_read(&n_rcu_torture_error))
65653 + if (atomic_read_unchecked(&n_rcu_torture_error))
65654 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
65655 else
65656 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
65657 @@ -1465,17 +1465,17 @@ rcu_torture_init(void)
65658
65659 rcu_torture_current = NULL;
65660 rcu_torture_current_version = 0;
65661 - atomic_set(&n_rcu_torture_alloc, 0);
65662 - atomic_set(&n_rcu_torture_alloc_fail, 0);
65663 - atomic_set(&n_rcu_torture_free, 0);
65664 - atomic_set(&n_rcu_torture_mberror, 0);
65665 - atomic_set(&n_rcu_torture_error, 0);
65666 + atomic_set_unchecked(&n_rcu_torture_alloc, 0);
65667 + atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
65668 + atomic_set_unchecked(&n_rcu_torture_free, 0);
65669 + atomic_set_unchecked(&n_rcu_torture_mberror, 0);
65670 + atomic_set_unchecked(&n_rcu_torture_error, 0);
65671 n_rcu_torture_boost_ktrerror = 0;
65672 n_rcu_torture_boost_rterror = 0;
65673 n_rcu_torture_boost_failure = 0;
65674 n_rcu_torture_boosts = 0;
65675 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
65676 - atomic_set(&rcu_torture_wcount[i], 0);
65677 + atomic_set_unchecked(&rcu_torture_wcount[i], 0);
65678 for_each_possible_cpu(cpu) {
65679 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
65680 per_cpu(rcu_torture_count, cpu)[i] = 0;
65681 diff --git a/kernel/rcutree.c b/kernel/rcutree.c
65682 index 6b76d81..7afc1b3 100644
65683 --- a/kernel/rcutree.c
65684 +++ b/kernel/rcutree.c
65685 @@ -367,9 +367,9 @@ void rcu_enter_nohz(void)
65686 trace_rcu_dyntick("Start");
65687 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
65688 smp_mb__before_atomic_inc(); /* See above. */
65689 - atomic_inc(&rdtp->dynticks);
65690 + atomic_inc_unchecked(&rdtp->dynticks);
65691 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
65692 - WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
65693 + WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
65694 local_irq_restore(flags);
65695 }
65696
65697 @@ -391,10 +391,10 @@ void rcu_exit_nohz(void)
65698 return;
65699 }
65700 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
65701 - atomic_inc(&rdtp->dynticks);
65702 + atomic_inc_unchecked(&rdtp->dynticks);
65703 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
65704 smp_mb__after_atomic_inc(); /* See above. */
65705 - WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
65706 + WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
65707 trace_rcu_dyntick("End");
65708 local_irq_restore(flags);
65709 }
65710 @@ -411,14 +411,14 @@ void rcu_nmi_enter(void)
65711 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
65712
65713 if (rdtp->dynticks_nmi_nesting == 0 &&
65714 - (atomic_read(&rdtp->dynticks) & 0x1))
65715 + (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
65716 return;
65717 rdtp->dynticks_nmi_nesting++;
65718 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
65719 - atomic_inc(&rdtp->dynticks);
65720 + atomic_inc_unchecked(&rdtp->dynticks);
65721 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
65722 smp_mb__after_atomic_inc(); /* See above. */
65723 - WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
65724 + WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
65725 }
65726
65727 /**
65728 @@ -437,9 +437,9 @@ void rcu_nmi_exit(void)
65729 return;
65730 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
65731 smp_mb__before_atomic_inc(); /* See above. */
65732 - atomic_inc(&rdtp->dynticks);
65733 + atomic_inc_unchecked(&rdtp->dynticks);
65734 smp_mb__after_atomic_inc(); /* Force delay to next write. */
65735 - WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
65736 + WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
65737 }
65738
65739 /**
65740 @@ -474,7 +474,7 @@ void rcu_irq_exit(void)
65741 */
65742 static int dyntick_save_progress_counter(struct rcu_data *rdp)
65743 {
65744 - rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
65745 + rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
65746 return 0;
65747 }
65748
65749 @@ -489,7 +489,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
65750 unsigned int curr;
65751 unsigned int snap;
65752
65753 - curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
65754 + curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
65755 snap = (unsigned int)rdp->dynticks_snap;
65756
65757 /*
65758 @@ -1552,7 +1552,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
65759 /*
65760 * Do RCU core processing for the current CPU.
65761 */
65762 -static void rcu_process_callbacks(struct softirq_action *unused)
65763 +static void rcu_process_callbacks(void)
65764 {
65765 trace_rcu_utilization("Start RCU core");
65766 __rcu_process_callbacks(&rcu_sched_state,
65767 diff --git a/kernel/rcutree.h b/kernel/rcutree.h
65768 index 849ce9e..74bc9de 100644
65769 --- a/kernel/rcutree.h
65770 +++ b/kernel/rcutree.h
65771 @@ -86,7 +86,7 @@
65772 struct rcu_dynticks {
65773 int dynticks_nesting; /* Track irq/process nesting level. */
65774 int dynticks_nmi_nesting; /* Track NMI nesting level. */
65775 - atomic_t dynticks; /* Even value for dynticks-idle, else odd. */
65776 + atomic_unchecked_t dynticks; /* Even value for dynticks-idle, else odd. */
65777 };
65778
65779 /* RCU's kthread states for tracing. */
65780 diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
65781 index 4b9b9f8..2326053 100644
65782 --- a/kernel/rcutree_plugin.h
65783 +++ b/kernel/rcutree_plugin.h
65784 @@ -842,7 +842,7 @@ void synchronize_rcu_expedited(void)
65785
65786 /* Clean up and exit. */
65787 smp_mb(); /* ensure expedited GP seen before counter increment. */
65788 - ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
65789 + ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
65790 unlock_mb_ret:
65791 mutex_unlock(&sync_rcu_preempt_exp_mutex);
65792 mb_ret:
65793 @@ -1815,8 +1815,8 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
65794
65795 #else /* #ifndef CONFIG_SMP */
65796
65797 -static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
65798 -static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
65799 +static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
65800 +static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
65801
65802 static int synchronize_sched_expedited_cpu_stop(void *data)
65803 {
65804 @@ -1871,7 +1871,7 @@ void synchronize_sched_expedited(void)
65805 int firstsnap, s, snap, trycount = 0;
65806
65807 /* Note that atomic_inc_return() implies full memory barrier. */
65808 - firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
65809 + firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
65810 get_online_cpus();
65811
65812 /*
65813 @@ -1892,7 +1892,7 @@ void synchronize_sched_expedited(void)
65814 }
65815
65816 /* Check to see if someone else did our work for us. */
65817 - s = atomic_read(&sync_sched_expedited_done);
65818 + s = atomic_read_unchecked(&sync_sched_expedited_done);
65819 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
65820 smp_mb(); /* ensure test happens before caller kfree */
65821 return;
65822 @@ -1907,7 +1907,7 @@ void synchronize_sched_expedited(void)
65823 * grace period works for us.
65824 */
65825 get_online_cpus();
65826 - snap = atomic_read(&sync_sched_expedited_started) - 1;
65827 + snap = atomic_read_unchecked(&sync_sched_expedited_started) - 1;
65828 smp_mb(); /* ensure read is before try_stop_cpus(). */
65829 }
65830
65831 @@ -1918,12 +1918,12 @@ void synchronize_sched_expedited(void)
65832 * than we did beat us to the punch.
65833 */
65834 do {
65835 - s = atomic_read(&sync_sched_expedited_done);
65836 + s = atomic_read_unchecked(&sync_sched_expedited_done);
65837 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
65838 smp_mb(); /* ensure test happens before caller kfree */
65839 break;
65840 }
65841 - } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
65842 + } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
65843
65844 put_online_cpus();
65845 }
65846 @@ -1985,7 +1985,7 @@ int rcu_needs_cpu(int cpu)
65847 for_each_online_cpu(thatcpu) {
65848 if (thatcpu == cpu)
65849 continue;
65850 - snap = atomic_add_return(0, &per_cpu(rcu_dynticks,
65851 + snap = atomic_add_return_unchecked(0, &per_cpu(rcu_dynticks,
65852 thatcpu).dynticks);
65853 smp_mb(); /* Order sampling of snap with end of grace period. */
65854 if ((snap & 0x1) != 0) {
65855 diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
65856 index 9feffa4..54058df 100644
65857 --- a/kernel/rcutree_trace.c
65858 +++ b/kernel/rcutree_trace.c
65859 @@ -69,7 +69,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
65860 rdp->qs_pending);
65861 #ifdef CONFIG_NO_HZ
65862 seq_printf(m, " dt=%d/%d/%d df=%lu",
65863 - atomic_read(&rdp->dynticks->dynticks),
65864 + atomic_read_unchecked(&rdp->dynticks->dynticks),
65865 rdp->dynticks->dynticks_nesting,
65866 rdp->dynticks->dynticks_nmi_nesting,
65867 rdp->dynticks_fqs);
65868 @@ -143,7 +143,7 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
65869 rdp->qs_pending);
65870 #ifdef CONFIG_NO_HZ
65871 seq_printf(m, ",%d,%d,%d,%lu",
65872 - atomic_read(&rdp->dynticks->dynticks),
65873 + atomic_read_unchecked(&rdp->dynticks->dynticks),
65874 rdp->dynticks->dynticks_nesting,
65875 rdp->dynticks->dynticks_nmi_nesting,
65876 rdp->dynticks_fqs);
65877 diff --git a/kernel/resource.c b/kernel/resource.c
65878 index 7640b3a..5879283 100644
65879 --- a/kernel/resource.c
65880 +++ b/kernel/resource.c
65881 @@ -141,8 +141,18 @@ static const struct file_operations proc_iomem_operations = {
65882
65883 static int __init ioresources_init(void)
65884 {
65885 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
65886 +#ifdef CONFIG_GRKERNSEC_PROC_USER
65887 + proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
65888 + proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
65889 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65890 + proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
65891 + proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
65892 +#endif
65893 +#else
65894 proc_create("ioports", 0, NULL, &proc_ioports_operations);
65895 proc_create("iomem", 0, NULL, &proc_iomem_operations);
65896 +#endif
65897 return 0;
65898 }
65899 __initcall(ioresources_init);
65900 diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
65901 index 3d9f31c..7fefc9e 100644
65902 --- a/kernel/rtmutex-tester.c
65903 +++ b/kernel/rtmutex-tester.c
65904 @@ -20,7 +20,7 @@
65905 #define MAX_RT_TEST_MUTEXES 8
65906
65907 static spinlock_t rttest_lock;
65908 -static atomic_t rttest_event;
65909 +static atomic_unchecked_t rttest_event;
65910
65911 struct test_thread_data {
65912 int opcode;
65913 @@ -61,7 +61,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
65914
65915 case RTTEST_LOCKCONT:
65916 td->mutexes[td->opdata] = 1;
65917 - td->event = atomic_add_return(1, &rttest_event);
65918 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65919 return 0;
65920
65921 case RTTEST_RESET:
65922 @@ -74,7 +74,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
65923 return 0;
65924
65925 case RTTEST_RESETEVENT:
65926 - atomic_set(&rttest_event, 0);
65927 + atomic_set_unchecked(&rttest_event, 0);
65928 return 0;
65929
65930 default:
65931 @@ -91,9 +91,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
65932 return ret;
65933
65934 td->mutexes[id] = 1;
65935 - td->event = atomic_add_return(1, &rttest_event);
65936 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65937 rt_mutex_lock(&mutexes[id]);
65938 - td->event = atomic_add_return(1, &rttest_event);
65939 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65940 td->mutexes[id] = 4;
65941 return 0;
65942
65943 @@ -104,9 +104,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
65944 return ret;
65945
65946 td->mutexes[id] = 1;
65947 - td->event = atomic_add_return(1, &rttest_event);
65948 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65949 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
65950 - td->event = atomic_add_return(1, &rttest_event);
65951 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65952 td->mutexes[id] = ret ? 0 : 4;
65953 return ret ? -EINTR : 0;
65954
65955 @@ -115,9 +115,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
65956 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
65957 return ret;
65958
65959 - td->event = atomic_add_return(1, &rttest_event);
65960 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65961 rt_mutex_unlock(&mutexes[id]);
65962 - td->event = atomic_add_return(1, &rttest_event);
65963 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65964 td->mutexes[id] = 0;
65965 return 0;
65966
65967 @@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
65968 break;
65969
65970 td->mutexes[dat] = 2;
65971 - td->event = atomic_add_return(1, &rttest_event);
65972 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65973 break;
65974
65975 default:
65976 @@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
65977 return;
65978
65979 td->mutexes[dat] = 3;
65980 - td->event = atomic_add_return(1, &rttest_event);
65981 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65982 break;
65983
65984 case RTTEST_LOCKNOWAIT:
65985 @@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
65986 return;
65987
65988 td->mutexes[dat] = 1;
65989 - td->event = atomic_add_return(1, &rttest_event);
65990 + td->event = atomic_add_return_unchecked(1, &rttest_event);
65991 return;
65992
65993 default:
65994 diff --git a/kernel/sched.c b/kernel/sched.c
65995 index d6b149c..896cbb8 100644
65996 --- a/kernel/sched.c
65997 +++ b/kernel/sched.c
65998 @@ -4389,6 +4389,19 @@ pick_next_task(struct rq *rq)
65999 BUG(); /* the idle class will always have a runnable task */
66000 }
66001
66002 +#ifdef CONFIG_GRKERNSEC_SETXID
66003 +extern void gr_delayed_cred_worker(void);
66004 +static inline void gr_cred_schedule(void)
66005 +{
66006 + if (unlikely(current->delayed_cred))
66007 + gr_delayed_cred_worker();
66008 +}
66009 +#else
66010 +static inline void gr_cred_schedule(void)
66011 +{
66012 +}
66013 +#endif
66014 +
66015 /*
66016 * __schedule() is the main scheduler function.
66017 */
66018 @@ -4408,6 +4421,8 @@ need_resched:
66019
66020 schedule_debug(prev);
66021
66022 + gr_cred_schedule();
66023 +
66024 if (sched_feat(HRTICK))
66025 hrtick_clear(rq);
66026
66027 @@ -5098,6 +5113,8 @@ int can_nice(const struct task_struct *p, const int nice)
66028 /* convert nice value [19,-20] to rlimit style value [1,40] */
66029 int nice_rlim = 20 - nice;
66030
66031 + gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
66032 +
66033 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
66034 capable(CAP_SYS_NICE));
66035 }
66036 @@ -5131,7 +5148,8 @@ SYSCALL_DEFINE1(nice, int, increment)
66037 if (nice > 19)
66038 nice = 19;
66039
66040 - if (increment < 0 && !can_nice(current, nice))
66041 + if (increment < 0 && (!can_nice(current, nice) ||
66042 + gr_handle_chroot_nice()))
66043 return -EPERM;
66044
66045 retval = security_task_setnice(current, nice);
66046 @@ -5288,6 +5306,7 @@ recheck:
66047 unsigned long rlim_rtprio =
66048 task_rlimit(p, RLIMIT_RTPRIO);
66049
66050 + gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
66051 /* can't set/change the rt policy */
66052 if (policy != p->policy && !rlim_rtprio)
66053 return -EPERM;
66054 diff --git a/kernel/sched_autogroup.c b/kernel/sched_autogroup.c
66055 index 429242f..d7cca82 100644
66056 --- a/kernel/sched_autogroup.c
66057 +++ b/kernel/sched_autogroup.c
66058 @@ -7,7 +7,7 @@
66059
66060 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
66061 static struct autogroup autogroup_default;
66062 -static atomic_t autogroup_seq_nr;
66063 +static atomic_unchecked_t autogroup_seq_nr;
66064
66065 static void __init autogroup_init(struct task_struct *init_task)
66066 {
66067 @@ -78,7 +78,7 @@ static inline struct autogroup *autogroup_create(void)
66068
66069 kref_init(&ag->kref);
66070 init_rwsem(&ag->lock);
66071 - ag->id = atomic_inc_return(&autogroup_seq_nr);
66072 + ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
66073 ag->tg = tg;
66074 #ifdef CONFIG_RT_GROUP_SCHED
66075 /*
66076 diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
66077 index 8a39fa3..34f3dbc 100644
66078 --- a/kernel/sched_fair.c
66079 +++ b/kernel/sched_fair.c
66080 @@ -4801,7 +4801,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
66081 * run_rebalance_domains is triggered when needed from the scheduler tick.
66082 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
66083 */
66084 -static void run_rebalance_domains(struct softirq_action *h)
66085 +static void run_rebalance_domains(void)
66086 {
66087 int this_cpu = smp_processor_id();
66088 struct rq *this_rq = cpu_rq(this_cpu);
66089 diff --git a/kernel/signal.c b/kernel/signal.c
66090 index 2065515..aed2987 100644
66091 --- a/kernel/signal.c
66092 +++ b/kernel/signal.c
66093 @@ -45,12 +45,12 @@ static struct kmem_cache *sigqueue_cachep;
66094
66095 int print_fatal_signals __read_mostly;
66096
66097 -static void __user *sig_handler(struct task_struct *t, int sig)
66098 +static __sighandler_t sig_handler(struct task_struct *t, int sig)
66099 {
66100 return t->sighand->action[sig - 1].sa.sa_handler;
66101 }
66102
66103 -static int sig_handler_ignored(void __user *handler, int sig)
66104 +static int sig_handler_ignored(__sighandler_t handler, int sig)
66105 {
66106 /* Is it explicitly or implicitly ignored? */
66107 return handler == SIG_IGN ||
66108 @@ -60,7 +60,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
66109 static int sig_task_ignored(struct task_struct *t, int sig,
66110 int from_ancestor_ns)
66111 {
66112 - void __user *handler;
66113 + __sighandler_t handler;
66114
66115 handler = sig_handler(t, sig);
66116
66117 @@ -364,6 +364,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
66118 atomic_inc(&user->sigpending);
66119 rcu_read_unlock();
66120
66121 + if (!override_rlimit)
66122 + gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
66123 +
66124 if (override_rlimit ||
66125 atomic_read(&user->sigpending) <=
66126 task_rlimit(t, RLIMIT_SIGPENDING)) {
66127 @@ -488,7 +491,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
66128
66129 int unhandled_signal(struct task_struct *tsk, int sig)
66130 {
66131 - void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
66132 + __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
66133 if (is_global_init(tsk))
66134 return 1;
66135 if (handler != SIG_IGN && handler != SIG_DFL)
66136 @@ -815,6 +818,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
66137 }
66138 }
66139
66140 + /* allow glibc communication via tgkill to other threads in our
66141 + thread group */
66142 + if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
66143 + sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
66144 + && gr_handle_signal(t, sig))
66145 + return -EPERM;
66146 +
66147 return security_task_kill(t, info, sig, 0);
66148 }
66149
66150 @@ -1165,7 +1175,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
66151 return send_signal(sig, info, p, 1);
66152 }
66153
66154 -static int
66155 +int
66156 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
66157 {
66158 return send_signal(sig, info, t, 0);
66159 @@ -1202,6 +1212,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
66160 unsigned long int flags;
66161 int ret, blocked, ignored;
66162 struct k_sigaction *action;
66163 + int is_unhandled = 0;
66164
66165 spin_lock_irqsave(&t->sighand->siglock, flags);
66166 action = &t->sighand->action[sig-1];
66167 @@ -1216,9 +1227,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
66168 }
66169 if (action->sa.sa_handler == SIG_DFL)
66170 t->signal->flags &= ~SIGNAL_UNKILLABLE;
66171 + if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
66172 + is_unhandled = 1;
66173 ret = specific_send_sig_info(sig, info, t);
66174 spin_unlock_irqrestore(&t->sighand->siglock, flags);
66175
66176 + /* only deal with unhandled signals, java etc trigger SIGSEGV during
66177 + normal operation */
66178 + if (is_unhandled) {
66179 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
66180 + gr_handle_crash(t, sig);
66181 + }
66182 +
66183 return ret;
66184 }
66185
66186 @@ -1285,8 +1305,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
66187 ret = check_kill_permission(sig, info, p);
66188 rcu_read_unlock();
66189
66190 - if (!ret && sig)
66191 + if (!ret && sig) {
66192 ret = do_send_sig_info(sig, info, p, true);
66193 + if (!ret)
66194 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
66195 + }
66196
66197 return ret;
66198 }
66199 @@ -2754,7 +2777,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
66200 int error = -ESRCH;
66201
66202 rcu_read_lock();
66203 - p = find_task_by_vpid(pid);
66204 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
66205 + /* allow glibc communication via tgkill to other threads in our
66206 + thread group */
66207 + if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
66208 + sig == (SIGRTMIN+1) && tgid == info->si_pid)
66209 + p = find_task_by_vpid_unrestricted(pid);
66210 + else
66211 +#endif
66212 + p = find_task_by_vpid(pid);
66213 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
66214 error = check_kill_permission(sig, info, p);
66215 /*
66216 diff --git a/kernel/smp.c b/kernel/smp.c
66217 index db197d6..17aef0b 100644
66218 --- a/kernel/smp.c
66219 +++ b/kernel/smp.c
66220 @@ -580,22 +580,22 @@ int smp_call_function(smp_call_func_t func, void *info, int wait)
66221 }
66222 EXPORT_SYMBOL(smp_call_function);
66223
66224 -void ipi_call_lock(void)
66225 +void ipi_call_lock(void) __acquires(call_function.lock)
66226 {
66227 raw_spin_lock(&call_function.lock);
66228 }
66229
66230 -void ipi_call_unlock(void)
66231 +void ipi_call_unlock(void) __releases(call_function.lock)
66232 {
66233 raw_spin_unlock(&call_function.lock);
66234 }
66235
66236 -void ipi_call_lock_irq(void)
66237 +void ipi_call_lock_irq(void) __acquires(call_function.lock)
66238 {
66239 raw_spin_lock_irq(&call_function.lock);
66240 }
66241
66242 -void ipi_call_unlock_irq(void)
66243 +void ipi_call_unlock_irq(void) __releases(call_function.lock)
66244 {
66245 raw_spin_unlock_irq(&call_function.lock);
66246 }
66247 diff --git a/kernel/softirq.c b/kernel/softirq.c
66248 index 2c71d91..1021f81 100644
66249 --- a/kernel/softirq.c
66250 +++ b/kernel/softirq.c
66251 @@ -56,7 +56,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
66252
66253 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
66254
66255 -char *softirq_to_name[NR_SOFTIRQS] = {
66256 +const char * const softirq_to_name[NR_SOFTIRQS] = {
66257 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
66258 "TASKLET", "SCHED", "HRTIMER", "RCU"
66259 };
66260 @@ -235,7 +235,7 @@ restart:
66261 kstat_incr_softirqs_this_cpu(vec_nr);
66262
66263 trace_softirq_entry(vec_nr);
66264 - h->action(h);
66265 + h->action();
66266 trace_softirq_exit(vec_nr);
66267 if (unlikely(prev_count != preempt_count())) {
66268 printk(KERN_ERR "huh, entered softirq %u %s %p"
66269 @@ -385,9 +385,11 @@ void raise_softirq(unsigned int nr)
66270 local_irq_restore(flags);
66271 }
66272
66273 -void open_softirq(int nr, void (*action)(struct softirq_action *))
66274 +void open_softirq(int nr, void (*action)(void))
66275 {
66276 - softirq_vec[nr].action = action;
66277 + pax_open_kernel();
66278 + *(void **)&softirq_vec[nr].action = action;
66279 + pax_close_kernel();
66280 }
66281
66282 /*
66283 @@ -441,7 +443,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
66284
66285 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
66286
66287 -static void tasklet_action(struct softirq_action *a)
66288 +static void tasklet_action(void)
66289 {
66290 struct tasklet_struct *list;
66291
66292 @@ -476,7 +478,7 @@ static void tasklet_action(struct softirq_action *a)
66293 }
66294 }
66295
66296 -static void tasklet_hi_action(struct softirq_action *a)
66297 +static void tasklet_hi_action(void)
66298 {
66299 struct tasklet_struct *list;
66300
66301 diff --git a/kernel/sys.c b/kernel/sys.c
66302 index 481611f..0754d86 100644
66303 --- a/kernel/sys.c
66304 +++ b/kernel/sys.c
66305 @@ -158,6 +158,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
66306 error = -EACCES;
66307 goto out;
66308 }
66309 +
66310 + if (gr_handle_chroot_setpriority(p, niceval)) {
66311 + error = -EACCES;
66312 + goto out;
66313 + }
66314 +
66315 no_nice = security_task_setnice(p, niceval);
66316 if (no_nice) {
66317 error = no_nice;
66318 @@ -572,6 +578,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
66319 goto error;
66320 }
66321
66322 + if (gr_check_group_change(new->gid, new->egid, -1))
66323 + goto error;
66324 +
66325 if (rgid != (gid_t) -1 ||
66326 (egid != (gid_t) -1 && egid != old->gid))
66327 new->sgid = new->egid;
66328 @@ -601,6 +610,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
66329 old = current_cred();
66330
66331 retval = -EPERM;
66332 +
66333 + if (gr_check_group_change(gid, gid, gid))
66334 + goto error;
66335 +
66336 if (nsown_capable(CAP_SETGID))
66337 new->gid = new->egid = new->sgid = new->fsgid = gid;
66338 else if (gid == old->gid || gid == old->sgid)
66339 @@ -618,7 +631,7 @@ error:
66340 /*
66341 * change the user struct in a credentials set to match the new UID
66342 */
66343 -static int set_user(struct cred *new)
66344 +int set_user(struct cred *new)
66345 {
66346 struct user_struct *new_user;
66347
66348 @@ -688,6 +701,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
66349 goto error;
66350 }
66351
66352 + if (gr_check_user_change(new->uid, new->euid, -1))
66353 + goto error;
66354 +
66355 if (new->uid != old->uid) {
66356 retval = set_user(new);
66357 if (retval < 0)
66358 @@ -732,6 +748,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
66359 old = current_cred();
66360
66361 retval = -EPERM;
66362 +
66363 + if (gr_check_crash_uid(uid))
66364 + goto error;
66365 + if (gr_check_user_change(uid, uid, uid))
66366 + goto error;
66367 +
66368 if (nsown_capable(CAP_SETUID)) {
66369 new->suid = new->uid = uid;
66370 if (uid != old->uid) {
66371 @@ -786,6 +808,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
66372 goto error;
66373 }
66374
66375 + if (gr_check_user_change(ruid, euid, -1))
66376 + goto error;
66377 +
66378 if (ruid != (uid_t) -1) {
66379 new->uid = ruid;
66380 if (ruid != old->uid) {
66381 @@ -850,6 +875,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
66382 goto error;
66383 }
66384
66385 + if (gr_check_group_change(rgid, egid, -1))
66386 + goto error;
66387 +
66388 if (rgid != (gid_t) -1)
66389 new->gid = rgid;
66390 if (egid != (gid_t) -1)
66391 @@ -896,6 +924,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
66392 old = current_cred();
66393 old_fsuid = old->fsuid;
66394
66395 + if (gr_check_user_change(-1, -1, uid))
66396 + goto error;
66397 +
66398 if (uid == old->uid || uid == old->euid ||
66399 uid == old->suid || uid == old->fsuid ||
66400 nsown_capable(CAP_SETUID)) {
66401 @@ -906,6 +937,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
66402 }
66403 }
66404
66405 +error:
66406 abort_creds(new);
66407 return old_fsuid;
66408
66409 @@ -932,12 +964,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
66410 if (gid == old->gid || gid == old->egid ||
66411 gid == old->sgid || gid == old->fsgid ||
66412 nsown_capable(CAP_SETGID)) {
66413 + if (gr_check_group_change(-1, -1, gid))
66414 + goto error;
66415 +
66416 if (gid != old_fsgid) {
66417 new->fsgid = gid;
66418 goto change_okay;
66419 }
66420 }
66421
66422 +error:
66423 abort_creds(new);
66424 return old_fsgid;
66425
66426 @@ -1189,7 +1225,10 @@ static int override_release(char __user *release, int len)
66427 }
66428 v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
66429 snprintf(buf, len, "2.6.%u%s", v, rest);
66430 - ret = copy_to_user(release, buf, len);
66431 + if (len > sizeof(buf))
66432 + ret = -EFAULT;
66433 + else
66434 + ret = copy_to_user(release, buf, len);
66435 }
66436 return ret;
66437 }
66438 @@ -1243,19 +1282,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
66439 return -EFAULT;
66440
66441 down_read(&uts_sem);
66442 - error = __copy_to_user(&name->sysname, &utsname()->sysname,
66443 + error = __copy_to_user(name->sysname, &utsname()->sysname,
66444 __OLD_UTS_LEN);
66445 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
66446 - error |= __copy_to_user(&name->nodename, &utsname()->nodename,
66447 + error |= __copy_to_user(name->nodename, &utsname()->nodename,
66448 __OLD_UTS_LEN);
66449 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
66450 - error |= __copy_to_user(&name->release, &utsname()->release,
66451 + error |= __copy_to_user(name->release, &utsname()->release,
66452 __OLD_UTS_LEN);
66453 error |= __put_user(0, name->release + __OLD_UTS_LEN);
66454 - error |= __copy_to_user(&name->version, &utsname()->version,
66455 + error |= __copy_to_user(name->version, &utsname()->version,
66456 __OLD_UTS_LEN);
66457 error |= __put_user(0, name->version + __OLD_UTS_LEN);
66458 - error |= __copy_to_user(&name->machine, &utsname()->machine,
66459 + error |= __copy_to_user(name->machine, &utsname()->machine,
66460 __OLD_UTS_LEN);
66461 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
66462 up_read(&uts_sem);
66463 @@ -1720,7 +1759,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
66464 error = get_dumpable(me->mm);
66465 break;
66466 case PR_SET_DUMPABLE:
66467 - if (arg2 < 0 || arg2 > 1) {
66468 + if (arg2 > 1) {
66469 error = -EINVAL;
66470 break;
66471 }
66472 diff --git a/kernel/sysctl.c b/kernel/sysctl.c
66473 index ae27196..7506d69 100644
66474 --- a/kernel/sysctl.c
66475 +++ b/kernel/sysctl.c
66476 @@ -86,6 +86,13 @@
66477
66478
66479 #if defined(CONFIG_SYSCTL)
66480 +#include <linux/grsecurity.h>
66481 +#include <linux/grinternal.h>
66482 +
66483 +extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
66484 +extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
66485 + const int op);
66486 +extern int gr_handle_chroot_sysctl(const int op);
66487
66488 /* External variables not in a header file. */
66489 extern int sysctl_overcommit_memory;
66490 @@ -191,6 +198,7 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
66491 }
66492
66493 #endif
66494 +extern struct ctl_table grsecurity_table[];
66495
66496 static struct ctl_table root_table[];
66497 static struct ctl_table_root sysctl_table_root;
66498 @@ -220,6 +228,20 @@ extern struct ctl_table epoll_table[];
66499 int sysctl_legacy_va_layout;
66500 #endif
66501
66502 +#ifdef CONFIG_PAX_SOFTMODE
66503 +static ctl_table pax_table[] = {
66504 + {
66505 + .procname = "softmode",
66506 + .data = &pax_softmode,
66507 + .maxlen = sizeof(unsigned int),
66508 + .mode = 0600,
66509 + .proc_handler = &proc_dointvec,
66510 + },
66511 +
66512 + { }
66513 +};
66514 +#endif
66515 +
66516 /* The default sysctl tables: */
66517
66518 static struct ctl_table root_table[] = {
66519 @@ -266,6 +288,22 @@ static int max_extfrag_threshold = 1000;
66520 #endif
66521
66522 static struct ctl_table kern_table[] = {
66523 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
66524 + {
66525 + .procname = "grsecurity",
66526 + .mode = 0500,
66527 + .child = grsecurity_table,
66528 + },
66529 +#endif
66530 +
66531 +#ifdef CONFIG_PAX_SOFTMODE
66532 + {
66533 + .procname = "pax",
66534 + .mode = 0500,
66535 + .child = pax_table,
66536 + },
66537 +#endif
66538 +
66539 {
66540 .procname = "sched_child_runs_first",
66541 .data = &sysctl_sched_child_runs_first,
66542 @@ -550,7 +588,7 @@ static struct ctl_table kern_table[] = {
66543 .data = &modprobe_path,
66544 .maxlen = KMOD_PATH_LEN,
66545 .mode = 0644,
66546 - .proc_handler = proc_dostring,
66547 + .proc_handler = proc_dostring_modpriv,
66548 },
66549 {
66550 .procname = "modules_disabled",
66551 @@ -717,16 +755,20 @@ static struct ctl_table kern_table[] = {
66552 .extra1 = &zero,
66553 .extra2 = &one,
66554 },
66555 +#endif
66556 {
66557 .procname = "kptr_restrict",
66558 .data = &kptr_restrict,
66559 .maxlen = sizeof(int),
66560 .mode = 0644,
66561 .proc_handler = proc_dmesg_restrict,
66562 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66563 + .extra1 = &two,
66564 +#else
66565 .extra1 = &zero,
66566 +#endif
66567 .extra2 = &two,
66568 },
66569 -#endif
66570 {
66571 .procname = "ngroups_max",
66572 .data = &ngroups_max,
66573 @@ -1216,6 +1258,13 @@ static struct ctl_table vm_table[] = {
66574 .proc_handler = proc_dointvec_minmax,
66575 .extra1 = &zero,
66576 },
66577 + {
66578 + .procname = "heap_stack_gap",
66579 + .data = &sysctl_heap_stack_gap,
66580 + .maxlen = sizeof(sysctl_heap_stack_gap),
66581 + .mode = 0644,
66582 + .proc_handler = proc_doulongvec_minmax,
66583 + },
66584 #else
66585 {
66586 .procname = "nr_trim_pages",
66587 @@ -1720,6 +1769,17 @@ static int test_perm(int mode, int op)
66588 int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
66589 {
66590 int mode;
66591 + int error;
66592 +
66593 + if (table->parent != NULL && table->parent->procname != NULL &&
66594 + table->procname != NULL &&
66595 + gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
66596 + return -EACCES;
66597 + if (gr_handle_chroot_sysctl(op))
66598 + return -EACCES;
66599 + error = gr_handle_sysctl(table, op);
66600 + if (error)
66601 + return error;
66602
66603 if (root->permissions)
66604 mode = root->permissions(root, current->nsproxy, table);
66605 @@ -2124,6 +2184,16 @@ int proc_dostring(struct ctl_table *table, int write,
66606 buffer, lenp, ppos);
66607 }
66608
66609 +int proc_dostring_modpriv(struct ctl_table *table, int write,
66610 + void __user *buffer, size_t *lenp, loff_t *ppos)
66611 +{
66612 + if (write && !capable(CAP_SYS_MODULE))
66613 + return -EPERM;
66614 +
66615 + return _proc_do_string(table->data, table->maxlen, write,
66616 + buffer, lenp, ppos);
66617 +}
66618 +
66619 static size_t proc_skip_spaces(char **buf)
66620 {
66621 size_t ret;
66622 @@ -2229,6 +2299,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
66623 len = strlen(tmp);
66624 if (len > *size)
66625 len = *size;
66626 + if (len > sizeof(tmp))
66627 + len = sizeof(tmp);
66628 if (copy_to_user(*buf, tmp, len))
66629 return -EFAULT;
66630 *size -= len;
66631 @@ -2545,8 +2617,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
66632 *i = val;
66633 } else {
66634 val = convdiv * (*i) / convmul;
66635 - if (!first)
66636 + if (!first) {
66637 err = proc_put_char(&buffer, &left, '\t');
66638 + if (err)
66639 + break;
66640 + }
66641 err = proc_put_long(&buffer, &left, val, false);
66642 if (err)
66643 break;
66644 @@ -2941,6 +3016,12 @@ int proc_dostring(struct ctl_table *table, int write,
66645 return -ENOSYS;
66646 }
66647
66648 +int proc_dostring_modpriv(struct ctl_table *table, int write,
66649 + void __user *buffer, size_t *lenp, loff_t *ppos)
66650 +{
66651 + return -ENOSYS;
66652 +}
66653 +
66654 int proc_dointvec(struct ctl_table *table, int write,
66655 void __user *buffer, size_t *lenp, loff_t *ppos)
66656 {
66657 @@ -2997,6 +3078,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
66658 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
66659 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
66660 EXPORT_SYMBOL(proc_dostring);
66661 +EXPORT_SYMBOL(proc_dostring_modpriv);
66662 EXPORT_SYMBOL(proc_doulongvec_minmax);
66663 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
66664 EXPORT_SYMBOL(register_sysctl_table);
66665 diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
66666 index a650694..aaeeb20 100644
66667 --- a/kernel/sysctl_binary.c
66668 +++ b/kernel/sysctl_binary.c
66669 @@ -989,7 +989,7 @@ static ssize_t bin_intvec(struct file *file,
66670 int i;
66671
66672 set_fs(KERNEL_DS);
66673 - result = vfs_read(file, buffer, BUFSZ - 1, &pos);
66674 + result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
66675 set_fs(old_fs);
66676 if (result < 0)
66677 goto out_kfree;
66678 @@ -1034,7 +1034,7 @@ static ssize_t bin_intvec(struct file *file,
66679 }
66680
66681 set_fs(KERNEL_DS);
66682 - result = vfs_write(file, buffer, str - buffer, &pos);
66683 + result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
66684 set_fs(old_fs);
66685 if (result < 0)
66686 goto out_kfree;
66687 @@ -1067,7 +1067,7 @@ static ssize_t bin_ulongvec(struct file *file,
66688 int i;
66689
66690 set_fs(KERNEL_DS);
66691 - result = vfs_read(file, buffer, BUFSZ - 1, &pos);
66692 + result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
66693 set_fs(old_fs);
66694 if (result < 0)
66695 goto out_kfree;
66696 @@ -1112,7 +1112,7 @@ static ssize_t bin_ulongvec(struct file *file,
66697 }
66698
66699 set_fs(KERNEL_DS);
66700 - result = vfs_write(file, buffer, str - buffer, &pos);
66701 + result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
66702 set_fs(old_fs);
66703 if (result < 0)
66704 goto out_kfree;
66705 @@ -1138,7 +1138,7 @@ static ssize_t bin_uuid(struct file *file,
66706 int i;
66707
66708 set_fs(KERNEL_DS);
66709 - result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
66710 + result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
66711 set_fs(old_fs);
66712 if (result < 0)
66713 goto out;
66714 @@ -1185,7 +1185,7 @@ static ssize_t bin_dn_node_address(struct file *file,
66715 __le16 dnaddr;
66716
66717 set_fs(KERNEL_DS);
66718 - result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
66719 + result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
66720 set_fs(old_fs);
66721 if (result < 0)
66722 goto out;
66723 @@ -1233,7 +1233,7 @@ static ssize_t bin_dn_node_address(struct file *file,
66724 le16_to_cpu(dnaddr) & 0x3ff);
66725
66726 set_fs(KERNEL_DS);
66727 - result = vfs_write(file, buf, len, &pos);
66728 + result = vfs_write(file, (const char __force_user *)buf, len, &pos);
66729 set_fs(old_fs);
66730 if (result < 0)
66731 goto out;
66732 diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
66733 index 362da65..ab8ef8c 100644
66734 --- a/kernel/sysctl_check.c
66735 +++ b/kernel/sysctl_check.c
66736 @@ -129,6 +129,7 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
66737 set_fail(&fail, table, "Directory with extra2");
66738 } else {
66739 if ((table->proc_handler == proc_dostring) ||
66740 + (table->proc_handler == proc_dostring_modpriv) ||
66741 (table->proc_handler == proc_dointvec) ||
66742 (table->proc_handler == proc_dointvec_minmax) ||
66743 (table->proc_handler == proc_dointvec_jiffies) ||
66744 diff --git a/kernel/taskstats.c b/kernel/taskstats.c
66745 index e660464..c8b9e67 100644
66746 --- a/kernel/taskstats.c
66747 +++ b/kernel/taskstats.c
66748 @@ -27,9 +27,12 @@
66749 #include <linux/cgroup.h>
66750 #include <linux/fs.h>
66751 #include <linux/file.h>
66752 +#include <linux/grsecurity.h>
66753 #include <net/genetlink.h>
66754 #include <linux/atomic.h>
66755
66756 +extern int gr_is_taskstats_denied(int pid);
66757 +
66758 /*
66759 * Maximum length of a cpumask that can be specified in
66760 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
66761 @@ -556,6 +559,9 @@ err:
66762
66763 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
66764 {
66765 + if (gr_is_taskstats_denied(current->pid))
66766 + return -EACCES;
66767 +
66768 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
66769 return cmd_attr_register_cpumask(info);
66770 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
66771 diff --git a/kernel/time.c b/kernel/time.c
66772 index 73e416d..cfc6f69 100644
66773 --- a/kernel/time.c
66774 +++ b/kernel/time.c
66775 @@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
66776 return error;
66777
66778 if (tz) {
66779 + /* we log in do_settimeofday called below, so don't log twice
66780 + */
66781 + if (!tv)
66782 + gr_log_timechange();
66783 +
66784 /* SMP safe, global irq locking makes it work. */
66785 sys_tz = *tz;
66786 update_vsyscall_tz();
66787 diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
66788 index 8a46f5d..bbe6f9c 100644
66789 --- a/kernel/time/alarmtimer.c
66790 +++ b/kernel/time/alarmtimer.c
66791 @@ -773,7 +773,7 @@ static int __init alarmtimer_init(void)
66792 struct platform_device *pdev;
66793 int error = 0;
66794 int i;
66795 - struct k_clock alarm_clock = {
66796 + static struct k_clock alarm_clock = {
66797 .clock_getres = alarm_clock_getres,
66798 .clock_get = alarm_clock_get,
66799 .timer_create = alarm_timer_create,
66800 diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
66801 index fd4a7b1..fae5c2a 100644
66802 --- a/kernel/time/tick-broadcast.c
66803 +++ b/kernel/time/tick-broadcast.c
66804 @@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
66805 * then clear the broadcast bit.
66806 */
66807 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
66808 - int cpu = smp_processor_id();
66809 + cpu = smp_processor_id();
66810
66811 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
66812 tick_broadcast_clear_oneshot(cpu);
66813 diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
66814 index 2378413..be455fd 100644
66815 --- a/kernel/time/timekeeping.c
66816 +++ b/kernel/time/timekeeping.c
66817 @@ -14,6 +14,7 @@
66818 #include <linux/init.h>
66819 #include <linux/mm.h>
66820 #include <linux/sched.h>
66821 +#include <linux/grsecurity.h>
66822 #include <linux/syscore_ops.h>
66823 #include <linux/clocksource.h>
66824 #include <linux/jiffies.h>
66825 @@ -365,6 +366,8 @@ int do_settimeofday(const struct timespec *tv)
66826 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
66827 return -EINVAL;
66828
66829 + gr_log_timechange();
66830 +
66831 write_seqlock_irqsave(&xtime_lock, flags);
66832
66833 timekeeping_forward_now();
66834 diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
66835 index 3258455..f35227d 100644
66836 --- a/kernel/time/timer_list.c
66837 +++ b/kernel/time/timer_list.c
66838 @@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
66839
66840 static void print_name_offset(struct seq_file *m, void *sym)
66841 {
66842 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66843 + SEQ_printf(m, "<%p>", NULL);
66844 +#else
66845 char symname[KSYM_NAME_LEN];
66846
66847 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
66848 SEQ_printf(m, "<%pK>", sym);
66849 else
66850 SEQ_printf(m, "%s", symname);
66851 +#endif
66852 }
66853
66854 static void
66855 @@ -112,7 +116,11 @@ next_one:
66856 static void
66857 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
66858 {
66859 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66860 + SEQ_printf(m, " .base: %p\n", NULL);
66861 +#else
66862 SEQ_printf(m, " .base: %pK\n", base);
66863 +#endif
66864 SEQ_printf(m, " .index: %d\n",
66865 base->index);
66866 SEQ_printf(m, " .resolution: %Lu nsecs\n",
66867 @@ -293,7 +301,11 @@ static int __init init_timer_list_procfs(void)
66868 {
66869 struct proc_dir_entry *pe;
66870
66871 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
66872 + pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
66873 +#else
66874 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
66875 +#endif
66876 if (!pe)
66877 return -ENOMEM;
66878 return 0;
66879 diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
66880 index 0b537f2..9e71eca 100644
66881 --- a/kernel/time/timer_stats.c
66882 +++ b/kernel/time/timer_stats.c
66883 @@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
66884 static unsigned long nr_entries;
66885 static struct entry entries[MAX_ENTRIES];
66886
66887 -static atomic_t overflow_count;
66888 +static atomic_unchecked_t overflow_count;
66889
66890 /*
66891 * The entries are in a hash-table, for fast lookup:
66892 @@ -140,7 +140,7 @@ static void reset_entries(void)
66893 nr_entries = 0;
66894 memset(entries, 0, sizeof(entries));
66895 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
66896 - atomic_set(&overflow_count, 0);
66897 + atomic_set_unchecked(&overflow_count, 0);
66898 }
66899
66900 static struct entry *alloc_entry(void)
66901 @@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
66902 if (likely(entry))
66903 entry->count++;
66904 else
66905 - atomic_inc(&overflow_count);
66906 + atomic_inc_unchecked(&overflow_count);
66907
66908 out_unlock:
66909 raw_spin_unlock_irqrestore(lock, flags);
66910 @@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
66911
66912 static void print_name_offset(struct seq_file *m, unsigned long addr)
66913 {
66914 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66915 + seq_printf(m, "<%p>", NULL);
66916 +#else
66917 char symname[KSYM_NAME_LEN];
66918
66919 if (lookup_symbol_name(addr, symname) < 0)
66920 seq_printf(m, "<%p>", (void *)addr);
66921 else
66922 seq_printf(m, "%s", symname);
66923 +#endif
66924 }
66925
66926 static int tstats_show(struct seq_file *m, void *v)
66927 @@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
66928
66929 seq_puts(m, "Timer Stats Version: v0.2\n");
66930 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
66931 - if (atomic_read(&overflow_count))
66932 + if (atomic_read_unchecked(&overflow_count))
66933 seq_printf(m, "Overflow: %d entries\n",
66934 - atomic_read(&overflow_count));
66935 + atomic_read_unchecked(&overflow_count));
66936
66937 for (i = 0; i < nr_entries; i++) {
66938 entry = entries + i;
66939 @@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
66940 {
66941 struct proc_dir_entry *pe;
66942
66943 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
66944 + pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
66945 +#else
66946 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
66947 +#endif
66948 if (!pe)
66949 return -ENOMEM;
66950 return 0;
66951 diff --git a/kernel/timer.c b/kernel/timer.c
66952 index 9c3c62b..441690e 100644
66953 --- a/kernel/timer.c
66954 +++ b/kernel/timer.c
66955 @@ -1304,7 +1304,7 @@ void update_process_times(int user_tick)
66956 /*
66957 * This function runs timers and the timer-tq in bottom half context.
66958 */
66959 -static void run_timer_softirq(struct softirq_action *h)
66960 +static void run_timer_softirq(void)
66961 {
66962 struct tvec_base *base = __this_cpu_read(tvec_bases);
66963
66964 diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
66965 index 16fc34a..efd8bb8 100644
66966 --- a/kernel/trace/blktrace.c
66967 +++ b/kernel/trace/blktrace.c
66968 @@ -324,7 +324,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
66969 struct blk_trace *bt = filp->private_data;
66970 char buf[16];
66971
66972 - snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
66973 + snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
66974
66975 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
66976 }
66977 @@ -389,7 +389,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
66978 return 1;
66979
66980 bt = buf->chan->private_data;
66981 - atomic_inc(&bt->dropped);
66982 + atomic_inc_unchecked(&bt->dropped);
66983 return 0;
66984 }
66985
66986 @@ -490,7 +490,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
66987
66988 bt->dir = dir;
66989 bt->dev = dev;
66990 - atomic_set(&bt->dropped, 0);
66991 + atomic_set_unchecked(&bt->dropped, 0);
66992
66993 ret = -EIO;
66994 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
66995 diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
66996 index 25b4f4d..6f4772d 100644
66997 --- a/kernel/trace/ftrace.c
66998 +++ b/kernel/trace/ftrace.c
66999 @@ -1587,12 +1587,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
67000 if (unlikely(ftrace_disabled))
67001 return 0;
67002
67003 + ret = ftrace_arch_code_modify_prepare();
67004 + FTRACE_WARN_ON(ret);
67005 + if (ret)
67006 + return 0;
67007 +
67008 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
67009 + FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
67010 if (ret) {
67011 ftrace_bug(ret, ip);
67012 - return 0;
67013 }
67014 - return 1;
67015 + return ret ? 0 : 1;
67016 }
67017
67018 /*
67019 @@ -2608,7 +2613,7 @@ static void ftrace_free_entry_rcu(struct rcu_head *rhp)
67020
67021 int
67022 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
67023 - void *data)
67024 + void *data)
67025 {
67026 struct ftrace_func_probe *entry;
67027 struct ftrace_page *pg;
67028 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
67029 index f2bd275..adaf3a2 100644
67030 --- a/kernel/trace/trace.c
67031 +++ b/kernel/trace/trace.c
67032 @@ -4201,10 +4201,9 @@ static const struct file_operations tracing_dyn_info_fops = {
67033 };
67034 #endif
67035
67036 -static struct dentry *d_tracer;
67037 -
67038 struct dentry *tracing_init_dentry(void)
67039 {
67040 + static struct dentry *d_tracer;
67041 static int once;
67042
67043 if (d_tracer)
67044 @@ -4224,10 +4223,9 @@ struct dentry *tracing_init_dentry(void)
67045 return d_tracer;
67046 }
67047
67048 -static struct dentry *d_percpu;
67049 -
67050 struct dentry *tracing_dentry_percpu(void)
67051 {
67052 + static struct dentry *d_percpu;
67053 static int once;
67054 struct dentry *d_tracer;
67055
67056 diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
67057 index c212a7f..7b02394 100644
67058 --- a/kernel/trace/trace_events.c
67059 +++ b/kernel/trace/trace_events.c
67060 @@ -1299,10 +1299,6 @@ static LIST_HEAD(ftrace_module_file_list);
67061 struct ftrace_module_file_ops {
67062 struct list_head list;
67063 struct module *mod;
67064 - struct file_operations id;
67065 - struct file_operations enable;
67066 - struct file_operations format;
67067 - struct file_operations filter;
67068 };
67069
67070 static struct ftrace_module_file_ops *
67071 @@ -1323,17 +1319,12 @@ trace_create_file_ops(struct module *mod)
67072
67073 file_ops->mod = mod;
67074
67075 - file_ops->id = ftrace_event_id_fops;
67076 - file_ops->id.owner = mod;
67077 -
67078 - file_ops->enable = ftrace_enable_fops;
67079 - file_ops->enable.owner = mod;
67080 -
67081 - file_ops->filter = ftrace_event_filter_fops;
67082 - file_ops->filter.owner = mod;
67083 -
67084 - file_ops->format = ftrace_event_format_fops;
67085 - file_ops->format.owner = mod;
67086 + pax_open_kernel();
67087 + *(void **)&mod->trace_id.owner = mod;
67088 + *(void **)&mod->trace_enable.owner = mod;
67089 + *(void **)&mod->trace_filter.owner = mod;
67090 + *(void **)&mod->trace_format.owner = mod;
67091 + pax_close_kernel();
67092
67093 list_add(&file_ops->list, &ftrace_module_file_list);
67094
67095 @@ -1357,8 +1348,8 @@ static void trace_module_add_events(struct module *mod)
67096
67097 for_each_event(call, start, end) {
67098 __trace_add_event_call(*call, mod,
67099 - &file_ops->id, &file_ops->enable,
67100 - &file_ops->filter, &file_ops->format);
67101 + &mod->trace_id, &mod->trace_enable,
67102 + &mod->trace_filter, &mod->trace_format);
67103 }
67104 }
67105
67106 diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
67107 index 00d527c..7c5b1a3 100644
67108 --- a/kernel/trace/trace_kprobe.c
67109 +++ b/kernel/trace/trace_kprobe.c
67110 @@ -217,7 +217,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
67111 long ret;
67112 int maxlen = get_rloc_len(*(u32 *)dest);
67113 u8 *dst = get_rloc_data(dest);
67114 - u8 *src = addr;
67115 + const u8 __user *src = (const u8 __force_user *)addr;
67116 mm_segment_t old_fs = get_fs();
67117 if (!maxlen)
67118 return;
67119 @@ -229,7 +229,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
67120 pagefault_disable();
67121 do
67122 ret = __copy_from_user_inatomic(dst++, src++, 1);
67123 - while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen);
67124 + while (dst[-1] && ret == 0 && src - (const u8 __force_user *)addr < maxlen);
67125 dst[-1] = '\0';
67126 pagefault_enable();
67127 set_fs(old_fs);
67128 @@ -238,7 +238,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
67129 ((u8 *)get_rloc_data(dest))[0] = '\0';
67130 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
67131 } else
67132 - *(u32 *)dest = make_data_rloc(src - (u8 *)addr,
67133 + *(u32 *)dest = make_data_rloc(src - (const u8 __force_user *)addr,
67134 get_rloc_offs(*(u32 *)dest));
67135 }
67136 /* Return the length of string -- including null terminal byte */
67137 @@ -252,7 +252,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
67138 set_fs(KERNEL_DS);
67139 pagefault_disable();
67140 do {
67141 - ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
67142 + ret = __copy_from_user_inatomic(&c, (const u8 __force_user *)addr + len, 1);
67143 len++;
67144 } while (c && ret == 0 && len < MAX_STRING_SIZE);
67145 pagefault_enable();
67146 diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
67147 index fd3c8aa..5f324a6 100644
67148 --- a/kernel/trace/trace_mmiotrace.c
67149 +++ b/kernel/trace/trace_mmiotrace.c
67150 @@ -24,7 +24,7 @@ struct header_iter {
67151 static struct trace_array *mmio_trace_array;
67152 static bool overrun_detected;
67153 static unsigned long prev_overruns;
67154 -static atomic_t dropped_count;
67155 +static atomic_unchecked_t dropped_count;
67156
67157 static void mmio_reset_data(struct trace_array *tr)
67158 {
67159 @@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
67160
67161 static unsigned long count_overruns(struct trace_iterator *iter)
67162 {
67163 - unsigned long cnt = atomic_xchg(&dropped_count, 0);
67164 + unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
67165 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
67166
67167 if (over > prev_overruns)
67168 @@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
67169 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
67170 sizeof(*entry), 0, pc);
67171 if (!event) {
67172 - atomic_inc(&dropped_count);
67173 + atomic_inc_unchecked(&dropped_count);
67174 return;
67175 }
67176 entry = ring_buffer_event_data(event);
67177 @@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
67178 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
67179 sizeof(*entry), 0, pc);
67180 if (!event) {
67181 - atomic_inc(&dropped_count);
67182 + atomic_inc_unchecked(&dropped_count);
67183 return;
67184 }
67185 entry = ring_buffer_event_data(event);
67186 diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
67187 index 5199930..26c73a0 100644
67188 --- a/kernel/trace/trace_output.c
67189 +++ b/kernel/trace/trace_output.c
67190 @@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s, struct path *path)
67191
67192 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
67193 if (!IS_ERR(p)) {
67194 - p = mangle_path(s->buffer + s->len, p, "\n");
67195 + p = mangle_path(s->buffer + s->len, p, "\n\\");
67196 if (p) {
67197 s->len = p - s->buffer;
67198 return 1;
67199 diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
67200 index 77575b3..6e623d1 100644
67201 --- a/kernel/trace/trace_stack.c
67202 +++ b/kernel/trace/trace_stack.c
67203 @@ -50,7 +50,7 @@ static inline void check_stack(void)
67204 return;
67205
67206 /* we do not handle interrupt stacks yet */
67207 - if (!object_is_on_stack(&this_size))
67208 + if (!object_starts_on_stack(&this_size))
67209 return;
67210
67211 local_irq_save(flags);
67212 diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
67213 index 209b379..7f76423 100644
67214 --- a/kernel/trace/trace_workqueue.c
67215 +++ b/kernel/trace/trace_workqueue.c
67216 @@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
67217 int cpu;
67218 pid_t pid;
67219 /* Can be inserted from interrupt or user context, need to be atomic */
67220 - atomic_t inserted;
67221 + atomic_unchecked_t inserted;
67222 /*
67223 * Don't need to be atomic, works are serialized in a single workqueue thread
67224 * on a single CPU.
67225 @@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
67226 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
67227 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
67228 if (node->pid == wq_thread->pid) {
67229 - atomic_inc(&node->inserted);
67230 + atomic_inc_unchecked(&node->inserted);
67231 goto found;
67232 }
67233 }
67234 @@ -210,7 +210,7 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
67235 tsk = get_pid_task(pid, PIDTYPE_PID);
67236 if (tsk) {
67237 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
67238 - atomic_read(&cws->inserted), cws->executed,
67239 + atomic_read_unchecked(&cws->inserted), cws->executed,
67240 tsk->comm);
67241 put_task_struct(tsk);
67242 }
67243 diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
67244 index 82928f5..92da771 100644
67245 --- a/lib/Kconfig.debug
67246 +++ b/lib/Kconfig.debug
67247 @@ -1103,6 +1103,7 @@ config LATENCYTOP
67248 depends on DEBUG_KERNEL
67249 depends on STACKTRACE_SUPPORT
67250 depends on PROC_FS
67251 + depends on !GRKERNSEC_HIDESYM
67252 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
67253 select KALLSYMS
67254 select KALLSYMS_ALL
67255 diff --git a/lib/bitmap.c b/lib/bitmap.c
67256 index 0d4a127..33a06c7 100644
67257 --- a/lib/bitmap.c
67258 +++ b/lib/bitmap.c
67259 @@ -419,7 +419,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
67260 {
67261 int c, old_c, totaldigits, ndigits, nchunks, nbits;
67262 u32 chunk;
67263 - const char __user __force *ubuf = (const char __user __force *)buf;
67264 + const char __user *ubuf = (const char __force_user *)buf;
67265
67266 bitmap_zero(maskp, nmaskbits);
67267
67268 @@ -504,7 +504,7 @@ int bitmap_parse_user(const char __user *ubuf,
67269 {
67270 if (!access_ok(VERIFY_READ, ubuf, ulen))
67271 return -EFAULT;
67272 - return __bitmap_parse((const char __force *)ubuf,
67273 + return __bitmap_parse((const char __force_kernel *)ubuf,
67274 ulen, 1, maskp, nmaskbits);
67275
67276 }
67277 @@ -596,7 +596,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
67278 {
67279 unsigned a, b;
67280 int c, old_c, totaldigits;
67281 - const char __user __force *ubuf = (const char __user __force *)buf;
67282 + const char __user *ubuf = (const char __force_user *)buf;
67283 int exp_digit, in_range;
67284
67285 totaldigits = c = 0;
67286 @@ -696,7 +696,7 @@ int bitmap_parselist_user(const char __user *ubuf,
67287 {
67288 if (!access_ok(VERIFY_READ, ubuf, ulen))
67289 return -EFAULT;
67290 - return __bitmap_parselist((const char __force *)ubuf,
67291 + return __bitmap_parselist((const char __force_kernel *)ubuf,
67292 ulen, 1, maskp, nmaskbits);
67293 }
67294 EXPORT_SYMBOL(bitmap_parselist_user);
67295 diff --git a/lib/bug.c b/lib/bug.c
67296 index 1955209..cbbb2ad 100644
67297 --- a/lib/bug.c
67298 +++ b/lib/bug.c
67299 @@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
67300 return BUG_TRAP_TYPE_NONE;
67301
67302 bug = find_bug(bugaddr);
67303 + if (!bug)
67304 + return BUG_TRAP_TYPE_NONE;
67305
67306 file = NULL;
67307 line = 0;
67308 diff --git a/lib/debugobjects.c b/lib/debugobjects.c
67309 index a78b7c6..2c73084 100644
67310 --- a/lib/debugobjects.c
67311 +++ b/lib/debugobjects.c
67312 @@ -284,7 +284,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
67313 if (limit > 4)
67314 return;
67315
67316 - is_on_stack = object_is_on_stack(addr);
67317 + is_on_stack = object_starts_on_stack(addr);
67318 if (is_on_stack == onstack)
67319 return;
67320
67321 diff --git a/lib/devres.c b/lib/devres.c
67322 index 7c0e953..f642b5c 100644
67323 --- a/lib/devres.c
67324 +++ b/lib/devres.c
67325 @@ -80,7 +80,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
67326 void devm_iounmap(struct device *dev, void __iomem *addr)
67327 {
67328 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
67329 - (void *)addr));
67330 + (void __force *)addr));
67331 iounmap(addr);
67332 }
67333 EXPORT_SYMBOL(devm_iounmap);
67334 @@ -141,7 +141,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
67335 {
67336 ioport_unmap(addr);
67337 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
67338 - devm_ioport_map_match, (void *)addr));
67339 + devm_ioport_map_match, (void __force *)addr));
67340 }
67341 EXPORT_SYMBOL(devm_ioport_unmap);
67342
67343 diff --git a/lib/dma-debug.c b/lib/dma-debug.c
67344 index fea790a..ebb0e82 100644
67345 --- a/lib/dma-debug.c
67346 +++ b/lib/dma-debug.c
67347 @@ -925,7 +925,7 @@ out:
67348
67349 static void check_for_stack(struct device *dev, void *addr)
67350 {
67351 - if (object_is_on_stack(addr))
67352 + if (object_starts_on_stack(addr))
67353 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
67354 "stack [addr=%p]\n", addr);
67355 }
67356 diff --git a/lib/extable.c b/lib/extable.c
67357 index 4cac81e..63e9b8f 100644
67358 --- a/lib/extable.c
67359 +++ b/lib/extable.c
67360 @@ -13,6 +13,7 @@
67361 #include <linux/init.h>
67362 #include <linux/sort.h>
67363 #include <asm/uaccess.h>
67364 +#include <asm/pgtable.h>
67365
67366 #ifndef ARCH_HAS_SORT_EXTABLE
67367 /*
67368 @@ -36,8 +37,10 @@ static int cmp_ex(const void *a, const void *b)
67369 void sort_extable(struct exception_table_entry *start,
67370 struct exception_table_entry *finish)
67371 {
67372 + pax_open_kernel();
67373 sort(start, finish - start, sizeof(struct exception_table_entry),
67374 cmp_ex, NULL);
67375 + pax_close_kernel();
67376 }
67377
67378 #ifdef CONFIG_MODULES
67379 diff --git a/lib/inflate.c b/lib/inflate.c
67380 index 013a761..c28f3fc 100644
67381 --- a/lib/inflate.c
67382 +++ b/lib/inflate.c
67383 @@ -269,7 +269,7 @@ static void free(void *where)
67384 malloc_ptr = free_mem_ptr;
67385 }
67386 #else
67387 -#define malloc(a) kmalloc(a, GFP_KERNEL)
67388 +#define malloc(a) kmalloc((a), GFP_KERNEL)
67389 #define free(a) kfree(a)
67390 #endif
67391
67392 diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
67393 index bd2bea9..6b3c95e 100644
67394 --- a/lib/is_single_threaded.c
67395 +++ b/lib/is_single_threaded.c
67396 @@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
67397 struct task_struct *p, *t;
67398 bool ret;
67399
67400 + if (!mm)
67401 + return true;
67402 +
67403 if (atomic_read(&task->signal->live) != 1)
67404 return false;
67405
67406 diff --git a/lib/kref.c b/lib/kref.c
67407 index 3efb882..8492f4c 100644
67408 --- a/lib/kref.c
67409 +++ b/lib/kref.c
67410 @@ -52,7 +52,7 @@ void kref_get(struct kref *kref)
67411 */
67412 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
67413 {
67414 - WARN_ON(release == NULL);
67415 + BUG_ON(release == NULL);
67416 WARN_ON(release == (void (*)(struct kref *))kfree);
67417
67418 if (atomic_dec_and_test(&kref->refcount)) {
67419 diff --git a/lib/radix-tree.c b/lib/radix-tree.c
67420 index d9df745..e73c2fe 100644
67421 --- a/lib/radix-tree.c
67422 +++ b/lib/radix-tree.c
67423 @@ -80,7 +80,7 @@ struct radix_tree_preload {
67424 int nr;
67425 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
67426 };
67427 -static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
67428 +static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
67429
67430 static inline void *ptr_to_indirect(void *ptr)
67431 {
67432 diff --git a/lib/vsprintf.c b/lib/vsprintf.c
67433 index 993599e..84dc70e 100644
67434 --- a/lib/vsprintf.c
67435 +++ b/lib/vsprintf.c
67436 @@ -16,6 +16,9 @@
67437 * - scnprintf and vscnprintf
67438 */
67439
67440 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67441 +#define __INCLUDED_BY_HIDESYM 1
67442 +#endif
67443 #include <stdarg.h>
67444 #include <linux/module.h>
67445 #include <linux/types.h>
67446 @@ -413,7 +416,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
67447 char sym[KSYM_SYMBOL_LEN];
67448 if (ext == 'B')
67449 sprint_backtrace(sym, value);
67450 - else if (ext != 'f' && ext != 's')
67451 + else if (ext != 'f' && ext != 's' && ext != 'a')
67452 sprint_symbol(sym, value);
67453 else
67454 kallsyms_lookup(value, NULL, NULL, NULL, sym);
67455 @@ -777,7 +780,11 @@ char *uuid_string(char *buf, char *end, const u8 *addr,
67456 return string(buf, end, uuid, spec);
67457 }
67458
67459 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67460 +int kptr_restrict __read_mostly = 2;
67461 +#else
67462 int kptr_restrict __read_mostly;
67463 +#endif
67464
67465 /*
67466 * Show a '%p' thing. A kernel extension is that the '%p' is followed
67467 @@ -791,6 +798,8 @@ int kptr_restrict __read_mostly;
67468 * - 'S' For symbolic direct pointers with offset
67469 * - 's' For symbolic direct pointers without offset
67470 * - 'B' For backtraced symbolic direct pointers with offset
67471 + * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
67472 + * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
67473 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
67474 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
67475 * - 'M' For a 6-byte MAC address, it prints the address in the
67476 @@ -835,12 +844,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
67477 {
67478 if (!ptr && *fmt != 'K') {
67479 /*
67480 - * Print (null) with the same width as a pointer so it makes
67481 + * Print (nil) with the same width as a pointer so it makes
67482 * tabular output look nice.
67483 */
67484 if (spec.field_width == -1)
67485 spec.field_width = 2 * sizeof(void *);
67486 - return string(buf, end, "(null)", spec);
67487 + return string(buf, end, "(nil)", spec);
67488 }
67489
67490 switch (*fmt) {
67491 @@ -850,6 +859,13 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
67492 /* Fallthrough */
67493 case 'S':
67494 case 's':
67495 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67496 + break;
67497 +#else
67498 + return symbol_string(buf, end, ptr, spec, *fmt);
67499 +#endif
67500 + case 'A':
67501 + case 'a':
67502 case 'B':
67503 return symbol_string(buf, end, ptr, spec, *fmt);
67504 case 'R':
67505 @@ -1608,11 +1624,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
67506 typeof(type) value; \
67507 if (sizeof(type) == 8) { \
67508 args = PTR_ALIGN(args, sizeof(u32)); \
67509 - *(u32 *)&value = *(u32 *)args; \
67510 - *((u32 *)&value + 1) = *(u32 *)(args + 4); \
67511 + *(u32 *)&value = *(const u32 *)args; \
67512 + *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
67513 } else { \
67514 args = PTR_ALIGN(args, sizeof(type)); \
67515 - value = *(typeof(type) *)args; \
67516 + value = *(const typeof(type) *)args; \
67517 } \
67518 args += sizeof(type); \
67519 value; \
67520 @@ -1675,7 +1691,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
67521 case FORMAT_TYPE_STR: {
67522 const char *str_arg = args;
67523 args += strlen(str_arg) + 1;
67524 - str = string(str, end, (char *)str_arg, spec);
67525 + str = string(str, end, str_arg, spec);
67526 break;
67527 }
67528
67529 diff --git a/localversion-grsec b/localversion-grsec
67530 new file mode 100644
67531 index 0000000..7cd6065
67532 --- /dev/null
67533 +++ b/localversion-grsec
67534 @@ -0,0 +1 @@
67535 +-grsec
67536 diff --git a/mm/Kconfig b/mm/Kconfig
67537 index 011b110..b492af2 100644
67538 --- a/mm/Kconfig
67539 +++ b/mm/Kconfig
67540 @@ -241,10 +241,10 @@ config KSM
67541 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
67542
67543 config DEFAULT_MMAP_MIN_ADDR
67544 - int "Low address space to protect from user allocation"
67545 + int "Low address space to protect from user allocation"
67546 depends on MMU
67547 - default 4096
67548 - help
67549 + default 65536
67550 + help
67551 This is the portion of low virtual memory which should be protected
67552 from userspace allocation. Keeping a user from writing to low pages
67553 can help reduce the impact of kernel NULL pointer bugs.
67554 diff --git a/mm/filemap.c b/mm/filemap.c
67555 index 03c5b0e..a01e793 100644
67556 --- a/mm/filemap.c
67557 +++ b/mm/filemap.c
67558 @@ -1770,7 +1770,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
67559 struct address_space *mapping = file->f_mapping;
67560
67561 if (!mapping->a_ops->readpage)
67562 - return -ENOEXEC;
67563 + return -ENODEV;
67564 file_accessed(file);
67565 vma->vm_ops = &generic_file_vm_ops;
67566 vma->vm_flags |= VM_CAN_NONLINEAR;
67567 @@ -2176,6 +2176,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
67568 *pos = i_size_read(inode);
67569
67570 if (limit != RLIM_INFINITY) {
67571 + gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
67572 if (*pos >= limit) {
67573 send_sig(SIGXFSZ, current, 0);
67574 return -EFBIG;
67575 diff --git a/mm/fremap.c b/mm/fremap.c
67576 index 9ed4fd4..c42648d 100644
67577 --- a/mm/fremap.c
67578 +++ b/mm/fremap.c
67579 @@ -155,6 +155,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
67580 retry:
67581 vma = find_vma(mm, start);
67582
67583 +#ifdef CONFIG_PAX_SEGMEXEC
67584 + if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
67585 + goto out;
67586 +#endif
67587 +
67588 /*
67589 * Make sure the vma is shared, that it supports prefaulting,
67590 * and that the remapped range is valid and fully within
67591 diff --git a/mm/highmem.c b/mm/highmem.c
67592 index 57d82c6..e9e0552 100644
67593 --- a/mm/highmem.c
67594 +++ b/mm/highmem.c
67595 @@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
67596 * So no dangers, even with speculative execution.
67597 */
67598 page = pte_page(pkmap_page_table[i]);
67599 + pax_open_kernel();
67600 pte_clear(&init_mm, (unsigned long)page_address(page),
67601 &pkmap_page_table[i]);
67602 -
67603 + pax_close_kernel();
67604 set_page_address(page, NULL);
67605 need_flush = 1;
67606 }
67607 @@ -186,9 +187,11 @@ start:
67608 }
67609 }
67610 vaddr = PKMAP_ADDR(last_pkmap_nr);
67611 +
67612 + pax_open_kernel();
67613 set_pte_at(&init_mm, vaddr,
67614 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
67615 -
67616 + pax_close_kernel();
67617 pkmap_count[last_pkmap_nr] = 1;
67618 set_page_address(page, (void *)vaddr);
67619
67620 diff --git a/mm/huge_memory.c b/mm/huge_memory.c
67621 index 33141f5..e56bef9 100644
67622 --- a/mm/huge_memory.c
67623 +++ b/mm/huge_memory.c
67624 @@ -703,7 +703,7 @@ out:
67625 * run pte_offset_map on the pmd, if an huge pmd could
67626 * materialize from under us from a different thread.
67627 */
67628 - if (unlikely(__pte_alloc(mm, vma, pmd, address)))
67629 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
67630 return VM_FAULT_OOM;
67631 /* if an huge pmd materialized from under us just retry later */
67632 if (unlikely(pmd_trans_huge(*pmd)))
67633 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
67634 index 2316840..b418671 100644
67635 --- a/mm/hugetlb.c
67636 +++ b/mm/hugetlb.c
67637 @@ -2347,6 +2347,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
67638 return 1;
67639 }
67640
67641 +#ifdef CONFIG_PAX_SEGMEXEC
67642 +static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
67643 +{
67644 + struct mm_struct *mm = vma->vm_mm;
67645 + struct vm_area_struct *vma_m;
67646 + unsigned long address_m;
67647 + pte_t *ptep_m;
67648 +
67649 + vma_m = pax_find_mirror_vma(vma);
67650 + if (!vma_m)
67651 + return;
67652 +
67653 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67654 + address_m = address + SEGMEXEC_TASK_SIZE;
67655 + ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
67656 + get_page(page_m);
67657 + hugepage_add_anon_rmap(page_m, vma_m, address_m);
67658 + set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
67659 +}
67660 +#endif
67661 +
67662 /*
67663 * Hugetlb_cow() should be called with page lock of the original hugepage held.
67664 */
67665 @@ -2450,6 +2471,11 @@ retry_avoidcopy:
67666 make_huge_pte(vma, new_page, 1));
67667 page_remove_rmap(old_page);
67668 hugepage_add_new_anon_rmap(new_page, vma, address);
67669 +
67670 +#ifdef CONFIG_PAX_SEGMEXEC
67671 + pax_mirror_huge_pte(vma, address, new_page);
67672 +#endif
67673 +
67674 /* Make the old page be freed below */
67675 new_page = old_page;
67676 mmu_notifier_invalidate_range_end(mm,
67677 @@ -2601,6 +2627,10 @@ retry:
67678 && (vma->vm_flags & VM_SHARED)));
67679 set_huge_pte_at(mm, address, ptep, new_pte);
67680
67681 +#ifdef CONFIG_PAX_SEGMEXEC
67682 + pax_mirror_huge_pte(vma, address, page);
67683 +#endif
67684 +
67685 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
67686 /* Optimization, do the COW without a second fault */
67687 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
67688 @@ -2630,6 +2660,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
67689 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
67690 struct hstate *h = hstate_vma(vma);
67691
67692 +#ifdef CONFIG_PAX_SEGMEXEC
67693 + struct vm_area_struct *vma_m;
67694 +#endif
67695 +
67696 ptep = huge_pte_offset(mm, address);
67697 if (ptep) {
67698 entry = huge_ptep_get(ptep);
67699 @@ -2641,6 +2675,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
67700 VM_FAULT_SET_HINDEX(h - hstates);
67701 }
67702
67703 +#ifdef CONFIG_PAX_SEGMEXEC
67704 + vma_m = pax_find_mirror_vma(vma);
67705 + if (vma_m) {
67706 + unsigned long address_m;
67707 +
67708 + if (vma->vm_start > vma_m->vm_start) {
67709 + address_m = address;
67710 + address -= SEGMEXEC_TASK_SIZE;
67711 + vma = vma_m;
67712 + h = hstate_vma(vma);
67713 + } else
67714 + address_m = address + SEGMEXEC_TASK_SIZE;
67715 +
67716 + if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
67717 + return VM_FAULT_OOM;
67718 + address_m &= HPAGE_MASK;
67719 + unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
67720 + }
67721 +#endif
67722 +
67723 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
67724 if (!ptep)
67725 return VM_FAULT_OOM;
67726 diff --git a/mm/internal.h b/mm/internal.h
67727 index 2189af4..f2ca332 100644
67728 --- a/mm/internal.h
67729 +++ b/mm/internal.h
67730 @@ -95,6 +95,7 @@ extern void putback_lru_page(struct page *page);
67731 * in mm/page_alloc.c
67732 */
67733 extern void __free_pages_bootmem(struct page *page, unsigned int order);
67734 +extern void free_compound_page(struct page *page);
67735 extern void prep_compound_page(struct page *page, unsigned long order);
67736 #ifdef CONFIG_MEMORY_FAILURE
67737 extern bool is_free_buddy_page(struct page *page);
67738 diff --git a/mm/kmemleak.c b/mm/kmemleak.c
67739 index f3b2a00..61da94d 100644
67740 --- a/mm/kmemleak.c
67741 +++ b/mm/kmemleak.c
67742 @@ -357,7 +357,7 @@ static void print_unreferenced(struct seq_file *seq,
67743
67744 for (i = 0; i < object->trace_len; i++) {
67745 void *ptr = (void *)object->trace[i];
67746 - seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
67747 + seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
67748 }
67749 }
67750
67751 diff --git a/mm/maccess.c b/mm/maccess.c
67752 index d53adf9..03a24bf 100644
67753 --- a/mm/maccess.c
67754 +++ b/mm/maccess.c
67755 @@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
67756 set_fs(KERNEL_DS);
67757 pagefault_disable();
67758 ret = __copy_from_user_inatomic(dst,
67759 - (__force const void __user *)src, size);
67760 + (const void __force_user *)src, size);
67761 pagefault_enable();
67762 set_fs(old_fs);
67763
67764 @@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
67765
67766 set_fs(KERNEL_DS);
67767 pagefault_disable();
67768 - ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
67769 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
67770 pagefault_enable();
67771 set_fs(old_fs);
67772
67773 diff --git a/mm/madvise.c b/mm/madvise.c
67774 index 74bf193..feb6fd3 100644
67775 --- a/mm/madvise.c
67776 +++ b/mm/madvise.c
67777 @@ -45,6 +45,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
67778 pgoff_t pgoff;
67779 unsigned long new_flags = vma->vm_flags;
67780
67781 +#ifdef CONFIG_PAX_SEGMEXEC
67782 + struct vm_area_struct *vma_m;
67783 +#endif
67784 +
67785 switch (behavior) {
67786 case MADV_NORMAL:
67787 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
67788 @@ -110,6 +114,13 @@ success:
67789 /*
67790 * vm_flags is protected by the mmap_sem held in write mode.
67791 */
67792 +
67793 +#ifdef CONFIG_PAX_SEGMEXEC
67794 + vma_m = pax_find_mirror_vma(vma);
67795 + if (vma_m)
67796 + vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
67797 +#endif
67798 +
67799 vma->vm_flags = new_flags;
67800
67801 out:
67802 @@ -168,6 +179,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
67803 struct vm_area_struct ** prev,
67804 unsigned long start, unsigned long end)
67805 {
67806 +
67807 +#ifdef CONFIG_PAX_SEGMEXEC
67808 + struct vm_area_struct *vma_m;
67809 +#endif
67810 +
67811 *prev = vma;
67812 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
67813 return -EINVAL;
67814 @@ -180,6 +196,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
67815 zap_page_range(vma, start, end - start, &details);
67816 } else
67817 zap_page_range(vma, start, end - start, NULL);
67818 +
67819 +#ifdef CONFIG_PAX_SEGMEXEC
67820 + vma_m = pax_find_mirror_vma(vma);
67821 + if (vma_m) {
67822 + if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
67823 + struct zap_details details = {
67824 + .nonlinear_vma = vma_m,
67825 + .last_index = ULONG_MAX,
67826 + };
67827 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
67828 + } else
67829 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
67830 + }
67831 +#endif
67832 +
67833 return 0;
67834 }
67835
67836 @@ -376,6 +407,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
67837 if (end < start)
67838 goto out;
67839
67840 +#ifdef CONFIG_PAX_SEGMEXEC
67841 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
67842 + if (end > SEGMEXEC_TASK_SIZE)
67843 + goto out;
67844 + } else
67845 +#endif
67846 +
67847 + if (end > TASK_SIZE)
67848 + goto out;
67849 +
67850 error = 0;
67851 if (end == start)
67852 goto out;
67853 diff --git a/mm/memory-failure.c b/mm/memory-failure.c
67854 index 06d3479..0778eef 100644
67855 --- a/mm/memory-failure.c
67856 +++ b/mm/memory-failure.c
67857 @@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
67858
67859 int sysctl_memory_failure_recovery __read_mostly = 1;
67860
67861 -atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
67862 +atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
67863
67864 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
67865
67866 @@ -202,7 +202,7 @@ static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
67867 si.si_signo = SIGBUS;
67868 si.si_errno = 0;
67869 si.si_code = BUS_MCEERR_AO;
67870 - si.si_addr = (void *)addr;
67871 + si.si_addr = (void __user *)addr;
67872 #ifdef __ARCH_SI_TRAPNO
67873 si.si_trapno = trapno;
67874 #endif
67875 @@ -1010,7 +1010,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
67876 }
67877
67878 nr_pages = 1 << compound_trans_order(hpage);
67879 - atomic_long_add(nr_pages, &mce_bad_pages);
67880 + atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
67881
67882 /*
67883 * We need/can do nothing about count=0 pages.
67884 @@ -1040,7 +1040,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
67885 if (!PageHWPoison(hpage)
67886 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
67887 || (p != hpage && TestSetPageHWPoison(hpage))) {
67888 - atomic_long_sub(nr_pages, &mce_bad_pages);
67889 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
67890 return 0;
67891 }
67892 set_page_hwpoison_huge_page(hpage);
67893 @@ -1098,7 +1098,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
67894 }
67895 if (hwpoison_filter(p)) {
67896 if (TestClearPageHWPoison(p))
67897 - atomic_long_sub(nr_pages, &mce_bad_pages);
67898 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
67899 unlock_page(hpage);
67900 put_page(hpage);
67901 return 0;
67902 @@ -1315,7 +1315,7 @@ int unpoison_memory(unsigned long pfn)
67903 return 0;
67904 }
67905 if (TestClearPageHWPoison(p))
67906 - atomic_long_sub(nr_pages, &mce_bad_pages);
67907 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
67908 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
67909 return 0;
67910 }
67911 @@ -1329,7 +1329,7 @@ int unpoison_memory(unsigned long pfn)
67912 */
67913 if (TestClearPageHWPoison(page)) {
67914 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
67915 - atomic_long_sub(nr_pages, &mce_bad_pages);
67916 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
67917 freeit = 1;
67918 if (PageHuge(page))
67919 clear_page_hwpoison_huge_page(page);
67920 @@ -1442,7 +1442,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
67921 }
67922 done:
67923 if (!PageHWPoison(hpage))
67924 - atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
67925 + atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
67926 set_page_hwpoison_huge_page(hpage);
67927 dequeue_hwpoisoned_huge_page(hpage);
67928 /* keep elevated page count for bad page */
67929 @@ -1573,7 +1573,7 @@ int soft_offline_page(struct page *page, int flags)
67930 return ret;
67931
67932 done:
67933 - atomic_long_add(1, &mce_bad_pages);
67934 + atomic_long_add_unchecked(1, &mce_bad_pages);
67935 SetPageHWPoison(page);
67936 /* keep elevated page count for bad page */
67937 return ret;
67938 diff --git a/mm/memory.c b/mm/memory.c
67939 index 829d437..3d3926a 100644
67940 --- a/mm/memory.c
67941 +++ b/mm/memory.c
67942 @@ -457,8 +457,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
67943 return;
67944
67945 pmd = pmd_offset(pud, start);
67946 +
67947 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
67948 pud_clear(pud);
67949 pmd_free_tlb(tlb, pmd, start);
67950 +#endif
67951 +
67952 }
67953
67954 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
67955 @@ -489,9 +493,12 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
67956 if (end - 1 > ceiling - 1)
67957 return;
67958
67959 +#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
67960 pud = pud_offset(pgd, start);
67961 pgd_clear(pgd);
67962 pud_free_tlb(tlb, pud, start);
67963 +#endif
67964 +
67965 }
67966
67967 /*
67968 @@ -1566,12 +1573,6 @@ no_page_table:
67969 return page;
67970 }
67971
67972 -static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
67973 -{
67974 - return stack_guard_page_start(vma, addr) ||
67975 - stack_guard_page_end(vma, addr+PAGE_SIZE);
67976 -}
67977 -
67978 /**
67979 * __get_user_pages() - pin user pages in memory
67980 * @tsk: task_struct of target task
67981 @@ -1644,10 +1645,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
67982 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
67983 i = 0;
67984
67985 - do {
67986 + while (nr_pages) {
67987 struct vm_area_struct *vma;
67988
67989 - vma = find_extend_vma(mm, start);
67990 + vma = find_vma(mm, start);
67991 if (!vma && in_gate_area(mm, start)) {
67992 unsigned long pg = start & PAGE_MASK;
67993 pgd_t *pgd;
67994 @@ -1695,7 +1696,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
67995 goto next_page;
67996 }
67997
67998 - if (!vma ||
67999 + if (!vma || start < vma->vm_start ||
68000 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
68001 !(vm_flags & vma->vm_flags))
68002 return i ? : -EFAULT;
68003 @@ -1722,11 +1723,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
68004 int ret;
68005 unsigned int fault_flags = 0;
68006
68007 - /* For mlock, just skip the stack guard page. */
68008 - if (foll_flags & FOLL_MLOCK) {
68009 - if (stack_guard_page(vma, start))
68010 - goto next_page;
68011 - }
68012 if (foll_flags & FOLL_WRITE)
68013 fault_flags |= FAULT_FLAG_WRITE;
68014 if (nonblocking)
68015 @@ -1800,7 +1796,7 @@ next_page:
68016 start += PAGE_SIZE;
68017 nr_pages--;
68018 } while (nr_pages && start < vma->vm_end);
68019 - } while (nr_pages);
68020 + }
68021 return i;
68022 }
68023 EXPORT_SYMBOL(__get_user_pages);
68024 @@ -2007,6 +2003,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
68025 page_add_file_rmap(page);
68026 set_pte_at(mm, addr, pte, mk_pte(page, prot));
68027
68028 +#ifdef CONFIG_PAX_SEGMEXEC
68029 + pax_mirror_file_pte(vma, addr, page, ptl);
68030 +#endif
68031 +
68032 retval = 0;
68033 pte_unmap_unlock(pte, ptl);
68034 return retval;
68035 @@ -2041,10 +2041,22 @@ out:
68036 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
68037 struct page *page)
68038 {
68039 +
68040 +#ifdef CONFIG_PAX_SEGMEXEC
68041 + struct vm_area_struct *vma_m;
68042 +#endif
68043 +
68044 if (addr < vma->vm_start || addr >= vma->vm_end)
68045 return -EFAULT;
68046 if (!page_count(page))
68047 return -EINVAL;
68048 +
68049 +#ifdef CONFIG_PAX_SEGMEXEC
68050 + vma_m = pax_find_mirror_vma(vma);
68051 + if (vma_m)
68052 + vma_m->vm_flags |= VM_INSERTPAGE;
68053 +#endif
68054 +
68055 vma->vm_flags |= VM_INSERTPAGE;
68056 return insert_page(vma, addr, page, vma->vm_page_prot);
68057 }
68058 @@ -2130,6 +2142,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
68059 unsigned long pfn)
68060 {
68061 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
68062 + BUG_ON(vma->vm_mirror);
68063
68064 if (addr < vma->vm_start || addr >= vma->vm_end)
68065 return -EFAULT;
68066 @@ -2445,6 +2458,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
68067 copy_user_highpage(dst, src, va, vma);
68068 }
68069
68070 +#ifdef CONFIG_PAX_SEGMEXEC
68071 +static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
68072 +{
68073 + struct mm_struct *mm = vma->vm_mm;
68074 + spinlock_t *ptl;
68075 + pte_t *pte, entry;
68076 +
68077 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
68078 + entry = *pte;
68079 + if (!pte_present(entry)) {
68080 + if (!pte_none(entry)) {
68081 + BUG_ON(pte_file(entry));
68082 + free_swap_and_cache(pte_to_swp_entry(entry));
68083 + pte_clear_not_present_full(mm, address, pte, 0);
68084 + }
68085 + } else {
68086 + struct page *page;
68087 +
68088 + flush_cache_page(vma, address, pte_pfn(entry));
68089 + entry = ptep_clear_flush(vma, address, pte);
68090 + BUG_ON(pte_dirty(entry));
68091 + page = vm_normal_page(vma, address, entry);
68092 + if (page) {
68093 + update_hiwater_rss(mm);
68094 + if (PageAnon(page))
68095 + dec_mm_counter_fast(mm, MM_ANONPAGES);
68096 + else
68097 + dec_mm_counter_fast(mm, MM_FILEPAGES);
68098 + page_remove_rmap(page);
68099 + page_cache_release(page);
68100 + }
68101 + }
68102 + pte_unmap_unlock(pte, ptl);
68103 +}
68104 +
68105 +/* PaX: if vma is mirrored, synchronize the mirror's PTE
68106 + *
68107 + * the ptl of the lower mapped page is held on entry and is not released on exit
68108 + * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
68109 + */
68110 +static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
68111 +{
68112 + struct mm_struct *mm = vma->vm_mm;
68113 + unsigned long address_m;
68114 + spinlock_t *ptl_m;
68115 + struct vm_area_struct *vma_m;
68116 + pmd_t *pmd_m;
68117 + pte_t *pte_m, entry_m;
68118 +
68119 + BUG_ON(!page_m || !PageAnon(page_m));
68120 +
68121 + vma_m = pax_find_mirror_vma(vma);
68122 + if (!vma_m)
68123 + return;
68124 +
68125 + BUG_ON(!PageLocked(page_m));
68126 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
68127 + address_m = address + SEGMEXEC_TASK_SIZE;
68128 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
68129 + pte_m = pte_offset_map(pmd_m, address_m);
68130 + ptl_m = pte_lockptr(mm, pmd_m);
68131 + if (ptl != ptl_m) {
68132 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
68133 + if (!pte_none(*pte_m))
68134 + goto out;
68135 + }
68136 +
68137 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
68138 + page_cache_get(page_m);
68139 + page_add_anon_rmap(page_m, vma_m, address_m);
68140 + inc_mm_counter_fast(mm, MM_ANONPAGES);
68141 + set_pte_at(mm, address_m, pte_m, entry_m);
68142 + update_mmu_cache(vma_m, address_m, entry_m);
68143 +out:
68144 + if (ptl != ptl_m)
68145 + spin_unlock(ptl_m);
68146 + pte_unmap(pte_m);
68147 + unlock_page(page_m);
68148 +}
68149 +
68150 +void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
68151 +{
68152 + struct mm_struct *mm = vma->vm_mm;
68153 + unsigned long address_m;
68154 + spinlock_t *ptl_m;
68155 + struct vm_area_struct *vma_m;
68156 + pmd_t *pmd_m;
68157 + pte_t *pte_m, entry_m;
68158 +
68159 + BUG_ON(!page_m || PageAnon(page_m));
68160 +
68161 + vma_m = pax_find_mirror_vma(vma);
68162 + if (!vma_m)
68163 + return;
68164 +
68165 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
68166 + address_m = address + SEGMEXEC_TASK_SIZE;
68167 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
68168 + pte_m = pte_offset_map(pmd_m, address_m);
68169 + ptl_m = pte_lockptr(mm, pmd_m);
68170 + if (ptl != ptl_m) {
68171 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
68172 + if (!pte_none(*pte_m))
68173 + goto out;
68174 + }
68175 +
68176 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
68177 + page_cache_get(page_m);
68178 + page_add_file_rmap(page_m);
68179 + inc_mm_counter_fast(mm, MM_FILEPAGES);
68180 + set_pte_at(mm, address_m, pte_m, entry_m);
68181 + update_mmu_cache(vma_m, address_m, entry_m);
68182 +out:
68183 + if (ptl != ptl_m)
68184 + spin_unlock(ptl_m);
68185 + pte_unmap(pte_m);
68186 +}
68187 +
68188 +static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
68189 +{
68190 + struct mm_struct *mm = vma->vm_mm;
68191 + unsigned long address_m;
68192 + spinlock_t *ptl_m;
68193 + struct vm_area_struct *vma_m;
68194 + pmd_t *pmd_m;
68195 + pte_t *pte_m, entry_m;
68196 +
68197 + vma_m = pax_find_mirror_vma(vma);
68198 + if (!vma_m)
68199 + return;
68200 +
68201 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
68202 + address_m = address + SEGMEXEC_TASK_SIZE;
68203 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
68204 + pte_m = pte_offset_map(pmd_m, address_m);
68205 + ptl_m = pte_lockptr(mm, pmd_m);
68206 + if (ptl != ptl_m) {
68207 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
68208 + if (!pte_none(*pte_m))
68209 + goto out;
68210 + }
68211 +
68212 + entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
68213 + set_pte_at(mm, address_m, pte_m, entry_m);
68214 +out:
68215 + if (ptl != ptl_m)
68216 + spin_unlock(ptl_m);
68217 + pte_unmap(pte_m);
68218 +}
68219 +
68220 +static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
68221 +{
68222 + struct page *page_m;
68223 + pte_t entry;
68224 +
68225 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
68226 + goto out;
68227 +
68228 + entry = *pte;
68229 + page_m = vm_normal_page(vma, address, entry);
68230 + if (!page_m)
68231 + pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
68232 + else if (PageAnon(page_m)) {
68233 + if (pax_find_mirror_vma(vma)) {
68234 + pte_unmap_unlock(pte, ptl);
68235 + lock_page(page_m);
68236 + pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
68237 + if (pte_same(entry, *pte))
68238 + pax_mirror_anon_pte(vma, address, page_m, ptl);
68239 + else
68240 + unlock_page(page_m);
68241 + }
68242 + } else
68243 + pax_mirror_file_pte(vma, address, page_m, ptl);
68244 +
68245 +out:
68246 + pte_unmap_unlock(pte, ptl);
68247 +}
68248 +#endif
68249 +
68250 /*
68251 * This routine handles present pages, when users try to write
68252 * to a shared page. It is done by copying the page to a new address
68253 @@ -2656,6 +2849,12 @@ gotten:
68254 */
68255 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
68256 if (likely(pte_same(*page_table, orig_pte))) {
68257 +
68258 +#ifdef CONFIG_PAX_SEGMEXEC
68259 + if (pax_find_mirror_vma(vma))
68260 + BUG_ON(!trylock_page(new_page));
68261 +#endif
68262 +
68263 if (old_page) {
68264 if (!PageAnon(old_page)) {
68265 dec_mm_counter_fast(mm, MM_FILEPAGES);
68266 @@ -2707,6 +2906,10 @@ gotten:
68267 page_remove_rmap(old_page);
68268 }
68269
68270 +#ifdef CONFIG_PAX_SEGMEXEC
68271 + pax_mirror_anon_pte(vma, address, new_page, ptl);
68272 +#endif
68273 +
68274 /* Free the old page.. */
68275 new_page = old_page;
68276 ret |= VM_FAULT_WRITE;
68277 @@ -2986,6 +3189,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
68278 swap_free(entry);
68279 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
68280 try_to_free_swap(page);
68281 +
68282 +#ifdef CONFIG_PAX_SEGMEXEC
68283 + if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
68284 +#endif
68285 +
68286 unlock_page(page);
68287 if (swapcache) {
68288 /*
68289 @@ -3009,6 +3217,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
68290
68291 /* No need to invalidate - it was non-present before */
68292 update_mmu_cache(vma, address, page_table);
68293 +
68294 +#ifdef CONFIG_PAX_SEGMEXEC
68295 + pax_mirror_anon_pte(vma, address, page, ptl);
68296 +#endif
68297 +
68298 unlock:
68299 pte_unmap_unlock(page_table, ptl);
68300 out:
68301 @@ -3028,40 +3241,6 @@ out_release:
68302 }
68303
68304 /*
68305 - * This is like a special single-page "expand_{down|up}wards()",
68306 - * except we must first make sure that 'address{-|+}PAGE_SIZE'
68307 - * doesn't hit another vma.
68308 - */
68309 -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
68310 -{
68311 - address &= PAGE_MASK;
68312 - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
68313 - struct vm_area_struct *prev = vma->vm_prev;
68314 -
68315 - /*
68316 - * Is there a mapping abutting this one below?
68317 - *
68318 - * That's only ok if it's the same stack mapping
68319 - * that has gotten split..
68320 - */
68321 - if (prev && prev->vm_end == address)
68322 - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
68323 -
68324 - expand_downwards(vma, address - PAGE_SIZE);
68325 - }
68326 - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
68327 - struct vm_area_struct *next = vma->vm_next;
68328 -
68329 - /* As VM_GROWSDOWN but s/below/above/ */
68330 - if (next && next->vm_start == address + PAGE_SIZE)
68331 - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
68332 -
68333 - expand_upwards(vma, address + PAGE_SIZE);
68334 - }
68335 - return 0;
68336 -}
68337 -
68338 -/*
68339 * We enter with non-exclusive mmap_sem (to exclude vma changes,
68340 * but allow concurrent faults), and pte mapped but not yet locked.
68341 * We return with mmap_sem still held, but pte unmapped and unlocked.
68342 @@ -3070,27 +3249,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
68343 unsigned long address, pte_t *page_table, pmd_t *pmd,
68344 unsigned int flags)
68345 {
68346 - struct page *page;
68347 + struct page *page = NULL;
68348 spinlock_t *ptl;
68349 pte_t entry;
68350
68351 - pte_unmap(page_table);
68352 -
68353 - /* Check if we need to add a guard page to the stack */
68354 - if (check_stack_guard_page(vma, address) < 0)
68355 - return VM_FAULT_SIGBUS;
68356 -
68357 - /* Use the zero-page for reads */
68358 if (!(flags & FAULT_FLAG_WRITE)) {
68359 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
68360 vma->vm_page_prot));
68361 - page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
68362 + ptl = pte_lockptr(mm, pmd);
68363 + spin_lock(ptl);
68364 if (!pte_none(*page_table))
68365 goto unlock;
68366 goto setpte;
68367 }
68368
68369 /* Allocate our own private page. */
68370 + pte_unmap(page_table);
68371 +
68372 if (unlikely(anon_vma_prepare(vma)))
68373 goto oom;
68374 page = alloc_zeroed_user_highpage_movable(vma, address);
68375 @@ -3109,6 +3284,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
68376 if (!pte_none(*page_table))
68377 goto release;
68378
68379 +#ifdef CONFIG_PAX_SEGMEXEC
68380 + if (pax_find_mirror_vma(vma))
68381 + BUG_ON(!trylock_page(page));
68382 +#endif
68383 +
68384 inc_mm_counter_fast(mm, MM_ANONPAGES);
68385 page_add_new_anon_rmap(page, vma, address);
68386 setpte:
68387 @@ -3116,6 +3296,12 @@ setpte:
68388
68389 /* No need to invalidate - it was non-present before */
68390 update_mmu_cache(vma, address, page_table);
68391 +
68392 +#ifdef CONFIG_PAX_SEGMEXEC
68393 + if (page)
68394 + pax_mirror_anon_pte(vma, address, page, ptl);
68395 +#endif
68396 +
68397 unlock:
68398 pte_unmap_unlock(page_table, ptl);
68399 return 0;
68400 @@ -3259,6 +3445,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
68401 */
68402 /* Only go through if we didn't race with anybody else... */
68403 if (likely(pte_same(*page_table, orig_pte))) {
68404 +
68405 +#ifdef CONFIG_PAX_SEGMEXEC
68406 + if (anon && pax_find_mirror_vma(vma))
68407 + BUG_ON(!trylock_page(page));
68408 +#endif
68409 +
68410 flush_icache_page(vma, page);
68411 entry = mk_pte(page, vma->vm_page_prot);
68412 if (flags & FAULT_FLAG_WRITE)
68413 @@ -3278,6 +3470,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
68414
68415 /* no need to invalidate: a not-present page won't be cached */
68416 update_mmu_cache(vma, address, page_table);
68417 +
68418 +#ifdef CONFIG_PAX_SEGMEXEC
68419 + if (anon)
68420 + pax_mirror_anon_pte(vma, address, page, ptl);
68421 + else
68422 + pax_mirror_file_pte(vma, address, page, ptl);
68423 +#endif
68424 +
68425 } else {
68426 if (cow_page)
68427 mem_cgroup_uncharge_page(cow_page);
68428 @@ -3431,6 +3631,12 @@ int handle_pte_fault(struct mm_struct *mm,
68429 if (flags & FAULT_FLAG_WRITE)
68430 flush_tlb_fix_spurious_fault(vma, address);
68431 }
68432 +
68433 +#ifdef CONFIG_PAX_SEGMEXEC
68434 + pax_mirror_pte(vma, address, pte, pmd, ptl);
68435 + return 0;
68436 +#endif
68437 +
68438 unlock:
68439 pte_unmap_unlock(pte, ptl);
68440 return 0;
68441 @@ -3447,6 +3653,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
68442 pmd_t *pmd;
68443 pte_t *pte;
68444
68445 +#ifdef CONFIG_PAX_SEGMEXEC
68446 + struct vm_area_struct *vma_m;
68447 +#endif
68448 +
68449 __set_current_state(TASK_RUNNING);
68450
68451 count_vm_event(PGFAULT);
68452 @@ -3458,6 +3668,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
68453 if (unlikely(is_vm_hugetlb_page(vma)))
68454 return hugetlb_fault(mm, vma, address, flags);
68455
68456 +#ifdef CONFIG_PAX_SEGMEXEC
68457 + vma_m = pax_find_mirror_vma(vma);
68458 + if (vma_m) {
68459 + unsigned long address_m;
68460 + pgd_t *pgd_m;
68461 + pud_t *pud_m;
68462 + pmd_t *pmd_m;
68463 +
68464 + if (vma->vm_start > vma_m->vm_start) {
68465 + address_m = address;
68466 + address -= SEGMEXEC_TASK_SIZE;
68467 + vma = vma_m;
68468 + } else
68469 + address_m = address + SEGMEXEC_TASK_SIZE;
68470 +
68471 + pgd_m = pgd_offset(mm, address_m);
68472 + pud_m = pud_alloc(mm, pgd_m, address_m);
68473 + if (!pud_m)
68474 + return VM_FAULT_OOM;
68475 + pmd_m = pmd_alloc(mm, pud_m, address_m);
68476 + if (!pmd_m)
68477 + return VM_FAULT_OOM;
68478 + if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
68479 + return VM_FAULT_OOM;
68480 + pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
68481 + }
68482 +#endif
68483 +
68484 pgd = pgd_offset(mm, address);
68485 pud = pud_alloc(mm, pgd, address);
68486 if (!pud)
68487 @@ -3487,7 +3725,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
68488 * run pte_offset_map on the pmd, if an huge pmd could
68489 * materialize from under us from a different thread.
68490 */
68491 - if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
68492 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
68493 return VM_FAULT_OOM;
68494 /* if an huge pmd materialized from under us just retry later */
68495 if (unlikely(pmd_trans_huge(*pmd)))
68496 @@ -3591,7 +3829,7 @@ static int __init gate_vma_init(void)
68497 gate_vma.vm_start = FIXADDR_USER_START;
68498 gate_vma.vm_end = FIXADDR_USER_END;
68499 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
68500 - gate_vma.vm_page_prot = __P101;
68501 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
68502 /*
68503 * Make sure the vDSO gets into every core dump.
68504 * Dumping its contents makes post-mortem fully interpretable later
68505 diff --git a/mm/mempolicy.c b/mm/mempolicy.c
68506 index c3fdbcb..2e8ef90 100644
68507 --- a/mm/mempolicy.c
68508 +++ b/mm/mempolicy.c
68509 @@ -640,6 +640,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
68510 unsigned long vmstart;
68511 unsigned long vmend;
68512
68513 +#ifdef CONFIG_PAX_SEGMEXEC
68514 + struct vm_area_struct *vma_m;
68515 +#endif
68516 +
68517 vma = find_vma_prev(mm, start, &prev);
68518 if (!vma || vma->vm_start > start)
68519 return -EFAULT;
68520 @@ -678,6 +682,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
68521 err = policy_vma(vma, new_pol);
68522 if (err)
68523 goto out;
68524 +
68525 +#ifdef CONFIG_PAX_SEGMEXEC
68526 + vma_m = pax_find_mirror_vma(vma);
68527 + if (vma_m) {
68528 + err = policy_vma(vma_m, new_pol);
68529 + if (err)
68530 + goto out;
68531 + }
68532 +#endif
68533 +
68534 }
68535
68536 out:
68537 @@ -1111,6 +1125,17 @@ static long do_mbind(unsigned long start, unsigned long len,
68538
68539 if (end < start)
68540 return -EINVAL;
68541 +
68542 +#ifdef CONFIG_PAX_SEGMEXEC
68543 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
68544 + if (end > SEGMEXEC_TASK_SIZE)
68545 + return -EINVAL;
68546 + } else
68547 +#endif
68548 +
68549 + if (end > TASK_SIZE)
68550 + return -EINVAL;
68551 +
68552 if (end == start)
68553 return 0;
68554
68555 @@ -1329,6 +1354,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
68556 if (!mm)
68557 goto out;
68558
68559 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68560 + if (mm != current->mm &&
68561 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
68562 + err = -EPERM;
68563 + goto out;
68564 + }
68565 +#endif
68566 +
68567 /*
68568 * Check if this process has the right to modify the specified
68569 * process. The right exists if the process has administrative
68570 @@ -1338,8 +1371,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
68571 rcu_read_lock();
68572 tcred = __task_cred(task);
68573 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
68574 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
68575 - !capable(CAP_SYS_NICE)) {
68576 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
68577 rcu_read_unlock();
68578 err = -EPERM;
68579 goto out;
68580 diff --git a/mm/migrate.c b/mm/migrate.c
68581 index 177aca4..ab3a744 100644
68582 --- a/mm/migrate.c
68583 +++ b/mm/migrate.c
68584 @@ -1313,6 +1313,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
68585 if (!mm)
68586 return -EINVAL;
68587
68588 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68589 + if (mm != current->mm &&
68590 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
68591 + err = -EPERM;
68592 + goto out;
68593 + }
68594 +#endif
68595 +
68596 /*
68597 * Check if this process has the right to modify the specified
68598 * process. The right exists if the process has administrative
68599 @@ -1322,8 +1330,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
68600 rcu_read_lock();
68601 tcred = __task_cred(task);
68602 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
68603 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
68604 - !capable(CAP_SYS_NICE)) {
68605 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
68606 rcu_read_unlock();
68607 err = -EPERM;
68608 goto out;
68609 diff --git a/mm/mlock.c b/mm/mlock.c
68610 index 4f4f53b..9511904 100644
68611 --- a/mm/mlock.c
68612 +++ b/mm/mlock.c
68613 @@ -13,6 +13,7 @@
68614 #include <linux/pagemap.h>
68615 #include <linux/mempolicy.h>
68616 #include <linux/syscalls.h>
68617 +#include <linux/security.h>
68618 #include <linux/sched.h>
68619 #include <linux/export.h>
68620 #include <linux/rmap.h>
68621 @@ -385,6 +386,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
68622 return -EINVAL;
68623 if (end == start)
68624 return 0;
68625 + if (end > TASK_SIZE)
68626 + return -EINVAL;
68627 +
68628 vma = find_vma_prev(current->mm, start, &prev);
68629 if (!vma || vma->vm_start > start)
68630 return -ENOMEM;
68631 @@ -395,6 +399,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
68632 for (nstart = start ; ; ) {
68633 vm_flags_t newflags;
68634
68635 +#ifdef CONFIG_PAX_SEGMEXEC
68636 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
68637 + break;
68638 +#endif
68639 +
68640 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
68641
68642 newflags = vma->vm_flags | VM_LOCKED;
68643 @@ -500,6 +509,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
68644 lock_limit >>= PAGE_SHIFT;
68645
68646 /* check against resource limits */
68647 + gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
68648 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
68649 error = do_mlock(start, len, 1);
68650 up_write(&current->mm->mmap_sem);
68651 @@ -523,17 +533,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
68652 static int do_mlockall(int flags)
68653 {
68654 struct vm_area_struct * vma, * prev = NULL;
68655 - unsigned int def_flags = 0;
68656
68657 if (flags & MCL_FUTURE)
68658 - def_flags = VM_LOCKED;
68659 - current->mm->def_flags = def_flags;
68660 + current->mm->def_flags |= VM_LOCKED;
68661 + else
68662 + current->mm->def_flags &= ~VM_LOCKED;
68663 if (flags == MCL_FUTURE)
68664 goto out;
68665
68666 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
68667 vm_flags_t newflags;
68668
68669 +#ifdef CONFIG_PAX_SEGMEXEC
68670 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
68671 + break;
68672 +#endif
68673 +
68674 + BUG_ON(vma->vm_end > TASK_SIZE);
68675 newflags = vma->vm_flags | VM_LOCKED;
68676 if (!(flags & MCL_CURRENT))
68677 newflags &= ~VM_LOCKED;
68678 @@ -566,6 +582,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
68679 lock_limit >>= PAGE_SHIFT;
68680
68681 ret = -ENOMEM;
68682 + gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
68683 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
68684 capable(CAP_IPC_LOCK))
68685 ret = do_mlockall(flags);
68686 diff --git a/mm/mmap.c b/mm/mmap.c
68687 index eae90af..44552cf 100644
68688 --- a/mm/mmap.c
68689 +++ b/mm/mmap.c
68690 @@ -46,6 +46,16 @@
68691 #define arch_rebalance_pgtables(addr, len) (addr)
68692 #endif
68693
68694 +static inline void verify_mm_writelocked(struct mm_struct *mm)
68695 +{
68696 +#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
68697 + if (unlikely(down_read_trylock(&mm->mmap_sem))) {
68698 + up_read(&mm->mmap_sem);
68699 + BUG();
68700 + }
68701 +#endif
68702 +}
68703 +
68704 static void unmap_region(struct mm_struct *mm,
68705 struct vm_area_struct *vma, struct vm_area_struct *prev,
68706 unsigned long start, unsigned long end);
68707 @@ -71,22 +81,32 @@ static void unmap_region(struct mm_struct *mm,
68708 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
68709 *
68710 */
68711 -pgprot_t protection_map[16] = {
68712 +pgprot_t protection_map[16] __read_only = {
68713 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
68714 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
68715 };
68716
68717 -pgprot_t vm_get_page_prot(unsigned long vm_flags)
68718 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
68719 {
68720 - return __pgprot(pgprot_val(protection_map[vm_flags &
68721 + pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
68722 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
68723 pgprot_val(arch_vm_get_page_prot(vm_flags)));
68724 +
68725 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68726 + if (!(__supported_pte_mask & _PAGE_NX) &&
68727 + (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
68728 + (vm_flags & (VM_READ | VM_WRITE)))
68729 + prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
68730 +#endif
68731 +
68732 + return prot;
68733 }
68734 EXPORT_SYMBOL(vm_get_page_prot);
68735
68736 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
68737 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
68738 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
68739 +unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
68740 /*
68741 * Make sure vm_committed_as in one cacheline and not cacheline shared with
68742 * other variables. It can be updated by several CPUs frequently.
68743 @@ -228,6 +248,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
68744 struct vm_area_struct *next = vma->vm_next;
68745
68746 might_sleep();
68747 + BUG_ON(vma->vm_mirror);
68748 if (vma->vm_ops && vma->vm_ops->close)
68749 vma->vm_ops->close(vma);
68750 if (vma->vm_file) {
68751 @@ -272,6 +293,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
68752 * not page aligned -Ram Gupta
68753 */
68754 rlim = rlimit(RLIMIT_DATA);
68755 + gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
68756 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
68757 (mm->end_data - mm->start_data) > rlim)
68758 goto out;
68759 @@ -689,6 +711,12 @@ static int
68760 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
68761 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
68762 {
68763 +
68764 +#ifdef CONFIG_PAX_SEGMEXEC
68765 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
68766 + return 0;
68767 +#endif
68768 +
68769 if (is_mergeable_vma(vma, file, vm_flags) &&
68770 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
68771 if (vma->vm_pgoff == vm_pgoff)
68772 @@ -708,6 +736,12 @@ static int
68773 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
68774 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
68775 {
68776 +
68777 +#ifdef CONFIG_PAX_SEGMEXEC
68778 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
68779 + return 0;
68780 +#endif
68781 +
68782 if (is_mergeable_vma(vma, file, vm_flags) &&
68783 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
68784 pgoff_t vm_pglen;
68785 @@ -750,13 +784,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
68786 struct vm_area_struct *vma_merge(struct mm_struct *mm,
68787 struct vm_area_struct *prev, unsigned long addr,
68788 unsigned long end, unsigned long vm_flags,
68789 - struct anon_vma *anon_vma, struct file *file,
68790 + struct anon_vma *anon_vma, struct file *file,
68791 pgoff_t pgoff, struct mempolicy *policy)
68792 {
68793 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
68794 struct vm_area_struct *area, *next;
68795 int err;
68796
68797 +#ifdef CONFIG_PAX_SEGMEXEC
68798 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
68799 + struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
68800 +
68801 + BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
68802 +#endif
68803 +
68804 /*
68805 * We later require that vma->vm_flags == vm_flags,
68806 * so this tests vma->vm_flags & VM_SPECIAL, too.
68807 @@ -772,6 +813,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
68808 if (next && next->vm_end == end) /* cases 6, 7, 8 */
68809 next = next->vm_next;
68810
68811 +#ifdef CONFIG_PAX_SEGMEXEC
68812 + if (prev)
68813 + prev_m = pax_find_mirror_vma(prev);
68814 + if (area)
68815 + area_m = pax_find_mirror_vma(area);
68816 + if (next)
68817 + next_m = pax_find_mirror_vma(next);
68818 +#endif
68819 +
68820 /*
68821 * Can it merge with the predecessor?
68822 */
68823 @@ -791,9 +841,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
68824 /* cases 1, 6 */
68825 err = vma_adjust(prev, prev->vm_start,
68826 next->vm_end, prev->vm_pgoff, NULL);
68827 - } else /* cases 2, 5, 7 */
68828 +
68829 +#ifdef CONFIG_PAX_SEGMEXEC
68830 + if (!err && prev_m)
68831 + err = vma_adjust(prev_m, prev_m->vm_start,
68832 + next_m->vm_end, prev_m->vm_pgoff, NULL);
68833 +#endif
68834 +
68835 + } else { /* cases 2, 5, 7 */
68836 err = vma_adjust(prev, prev->vm_start,
68837 end, prev->vm_pgoff, NULL);
68838 +
68839 +#ifdef CONFIG_PAX_SEGMEXEC
68840 + if (!err && prev_m)
68841 + err = vma_adjust(prev_m, prev_m->vm_start,
68842 + end_m, prev_m->vm_pgoff, NULL);
68843 +#endif
68844 +
68845 + }
68846 if (err)
68847 return NULL;
68848 khugepaged_enter_vma_merge(prev);
68849 @@ -807,12 +872,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
68850 mpol_equal(policy, vma_policy(next)) &&
68851 can_vma_merge_before(next, vm_flags,
68852 anon_vma, file, pgoff+pglen)) {
68853 - if (prev && addr < prev->vm_end) /* case 4 */
68854 + if (prev && addr < prev->vm_end) { /* case 4 */
68855 err = vma_adjust(prev, prev->vm_start,
68856 addr, prev->vm_pgoff, NULL);
68857 - else /* cases 3, 8 */
68858 +
68859 +#ifdef CONFIG_PAX_SEGMEXEC
68860 + if (!err && prev_m)
68861 + err = vma_adjust(prev_m, prev_m->vm_start,
68862 + addr_m, prev_m->vm_pgoff, NULL);
68863 +#endif
68864 +
68865 + } else { /* cases 3, 8 */
68866 err = vma_adjust(area, addr, next->vm_end,
68867 next->vm_pgoff - pglen, NULL);
68868 +
68869 +#ifdef CONFIG_PAX_SEGMEXEC
68870 + if (!err && area_m)
68871 + err = vma_adjust(area_m, addr_m, next_m->vm_end,
68872 + next_m->vm_pgoff - pglen, NULL);
68873 +#endif
68874 +
68875 + }
68876 if (err)
68877 return NULL;
68878 khugepaged_enter_vma_merge(area);
68879 @@ -921,14 +1001,11 @@ none:
68880 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
68881 struct file *file, long pages)
68882 {
68883 - const unsigned long stack_flags
68884 - = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
68885 -
68886 if (file) {
68887 mm->shared_vm += pages;
68888 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
68889 mm->exec_vm += pages;
68890 - } else if (flags & stack_flags)
68891 + } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
68892 mm->stack_vm += pages;
68893 if (flags & (VM_RESERVED|VM_IO))
68894 mm->reserved_vm += pages;
68895 @@ -955,7 +1032,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
68896 * (the exception is when the underlying filesystem is noexec
68897 * mounted, in which case we dont add PROT_EXEC.)
68898 */
68899 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
68900 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
68901 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
68902 prot |= PROT_EXEC;
68903
68904 @@ -981,7 +1058,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
68905 /* Obtain the address to map to. we verify (or select) it and ensure
68906 * that it represents a valid section of the address space.
68907 */
68908 - addr = get_unmapped_area(file, addr, len, pgoff, flags);
68909 + addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
68910 if (addr & ~PAGE_MASK)
68911 return addr;
68912
68913 @@ -992,6 +1069,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
68914 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
68915 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
68916
68917 +#ifdef CONFIG_PAX_MPROTECT
68918 + if (mm->pax_flags & MF_PAX_MPROTECT) {
68919 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
68920 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
68921 + gr_log_rwxmmap(file);
68922 +
68923 +#ifdef CONFIG_PAX_EMUPLT
68924 + vm_flags &= ~VM_EXEC;
68925 +#else
68926 + return -EPERM;
68927 +#endif
68928 +
68929 + }
68930 +
68931 + if (!(vm_flags & VM_EXEC))
68932 + vm_flags &= ~VM_MAYEXEC;
68933 +#else
68934 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
68935 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
68936 +#endif
68937 + else
68938 + vm_flags &= ~VM_MAYWRITE;
68939 + }
68940 +#endif
68941 +
68942 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68943 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
68944 + vm_flags &= ~VM_PAGEEXEC;
68945 +#endif
68946 +
68947 if (flags & MAP_LOCKED)
68948 if (!can_do_mlock())
68949 return -EPERM;
68950 @@ -1003,6 +1110,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
68951 locked += mm->locked_vm;
68952 lock_limit = rlimit(RLIMIT_MEMLOCK);
68953 lock_limit >>= PAGE_SHIFT;
68954 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
68955 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
68956 return -EAGAIN;
68957 }
68958 @@ -1073,6 +1181,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
68959 if (error)
68960 return error;
68961
68962 + if (!gr_acl_handle_mmap(file, prot))
68963 + return -EACCES;
68964 +
68965 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
68966 }
68967 EXPORT_SYMBOL(do_mmap_pgoff);
68968 @@ -1153,7 +1264,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
68969 vm_flags_t vm_flags = vma->vm_flags;
68970
68971 /* If it was private or non-writable, the write bit is already clear */
68972 - if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
68973 + if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
68974 return 0;
68975
68976 /* The backer wishes to know when pages are first written to? */
68977 @@ -1202,14 +1313,24 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
68978 unsigned long charged = 0;
68979 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
68980
68981 +#ifdef CONFIG_PAX_SEGMEXEC
68982 + struct vm_area_struct *vma_m = NULL;
68983 +#endif
68984 +
68985 + /*
68986 + * mm->mmap_sem is required to protect against another thread
68987 + * changing the mappings in case we sleep.
68988 + */
68989 + verify_mm_writelocked(mm);
68990 +
68991 /* Clear old maps */
68992 error = -ENOMEM;
68993 -munmap_back:
68994 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
68995 if (vma && vma->vm_start < addr + len) {
68996 if (do_munmap(mm, addr, len))
68997 return -ENOMEM;
68998 - goto munmap_back;
68999 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
69000 + BUG_ON(vma && vma->vm_start < addr + len);
69001 }
69002
69003 /* Check against address space limit. */
69004 @@ -1258,6 +1379,16 @@ munmap_back:
69005 goto unacct_error;
69006 }
69007
69008 +#ifdef CONFIG_PAX_SEGMEXEC
69009 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
69010 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69011 + if (!vma_m) {
69012 + error = -ENOMEM;
69013 + goto free_vma;
69014 + }
69015 + }
69016 +#endif
69017 +
69018 vma->vm_mm = mm;
69019 vma->vm_start = addr;
69020 vma->vm_end = addr + len;
69021 @@ -1281,6 +1412,19 @@ munmap_back:
69022 error = file->f_op->mmap(file, vma);
69023 if (error)
69024 goto unmap_and_free_vma;
69025 +
69026 +#ifdef CONFIG_PAX_SEGMEXEC
69027 + if (vma_m && (vm_flags & VM_EXECUTABLE))
69028 + added_exe_file_vma(mm);
69029 +#endif
69030 +
69031 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
69032 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
69033 + vma->vm_flags |= VM_PAGEEXEC;
69034 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
69035 + }
69036 +#endif
69037 +
69038 if (vm_flags & VM_EXECUTABLE)
69039 added_exe_file_vma(mm);
69040
69041 @@ -1316,6 +1460,11 @@ munmap_back:
69042 vma_link(mm, vma, prev, rb_link, rb_parent);
69043 file = vma->vm_file;
69044
69045 +#ifdef CONFIG_PAX_SEGMEXEC
69046 + if (vma_m)
69047 + BUG_ON(pax_mirror_vma(vma_m, vma));
69048 +#endif
69049 +
69050 /* Once vma denies write, undo our temporary denial count */
69051 if (correct_wcount)
69052 atomic_inc(&inode->i_writecount);
69053 @@ -1324,6 +1473,7 @@ out:
69054
69055 mm->total_vm += len >> PAGE_SHIFT;
69056 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
69057 + track_exec_limit(mm, addr, addr + len, vm_flags);
69058 if (vm_flags & VM_LOCKED) {
69059 if (!mlock_vma_pages_range(vma, addr, addr + len))
69060 mm->locked_vm += (len >> PAGE_SHIFT);
69061 @@ -1341,6 +1491,12 @@ unmap_and_free_vma:
69062 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
69063 charged = 0;
69064 free_vma:
69065 +
69066 +#ifdef CONFIG_PAX_SEGMEXEC
69067 + if (vma_m)
69068 + kmem_cache_free(vm_area_cachep, vma_m);
69069 +#endif
69070 +
69071 kmem_cache_free(vm_area_cachep, vma);
69072 unacct_error:
69073 if (charged)
69074 @@ -1348,6 +1504,44 @@ unacct_error:
69075 return error;
69076 }
69077
69078 +bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
69079 +{
69080 + if (!vma) {
69081 +#ifdef CONFIG_STACK_GROWSUP
69082 + if (addr > sysctl_heap_stack_gap)
69083 + vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
69084 + else
69085 + vma = find_vma(current->mm, 0);
69086 + if (vma && (vma->vm_flags & VM_GROWSUP))
69087 + return false;
69088 +#endif
69089 + return true;
69090 + }
69091 +
69092 + if (addr + len > vma->vm_start)
69093 + return false;
69094 +
69095 + if (vma->vm_flags & VM_GROWSDOWN)
69096 + return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
69097 +#ifdef CONFIG_STACK_GROWSUP
69098 + else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
69099 + return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
69100 +#endif
69101 +
69102 + return true;
69103 +}
69104 +
69105 +unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
69106 +{
69107 + if (vma->vm_start < len)
69108 + return -ENOMEM;
69109 + if (!(vma->vm_flags & VM_GROWSDOWN))
69110 + return vma->vm_start - len;
69111 + if (sysctl_heap_stack_gap <= vma->vm_start - len)
69112 + return vma->vm_start - len - sysctl_heap_stack_gap;
69113 + return -ENOMEM;
69114 +}
69115 +
69116 /* Get an address range which is currently unmapped.
69117 * For shmat() with addr=0.
69118 *
69119 @@ -1374,18 +1568,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
69120 if (flags & MAP_FIXED)
69121 return addr;
69122
69123 +#ifdef CONFIG_PAX_RANDMMAP
69124 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
69125 +#endif
69126 +
69127 if (addr) {
69128 addr = PAGE_ALIGN(addr);
69129 - vma = find_vma(mm, addr);
69130 - if (TASK_SIZE - len >= addr &&
69131 - (!vma || addr + len <= vma->vm_start))
69132 - return addr;
69133 + if (TASK_SIZE - len >= addr) {
69134 + vma = find_vma(mm, addr);
69135 + if (check_heap_stack_gap(vma, addr, len))
69136 + return addr;
69137 + }
69138 }
69139 if (len > mm->cached_hole_size) {
69140 - start_addr = addr = mm->free_area_cache;
69141 + start_addr = addr = mm->free_area_cache;
69142 } else {
69143 - start_addr = addr = TASK_UNMAPPED_BASE;
69144 - mm->cached_hole_size = 0;
69145 + start_addr = addr = mm->mmap_base;
69146 + mm->cached_hole_size = 0;
69147 }
69148
69149 full_search:
69150 @@ -1396,34 +1595,40 @@ full_search:
69151 * Start a new search - just in case we missed
69152 * some holes.
69153 */
69154 - if (start_addr != TASK_UNMAPPED_BASE) {
69155 - addr = TASK_UNMAPPED_BASE;
69156 - start_addr = addr;
69157 + if (start_addr != mm->mmap_base) {
69158 + start_addr = addr = mm->mmap_base;
69159 mm->cached_hole_size = 0;
69160 goto full_search;
69161 }
69162 return -ENOMEM;
69163 }
69164 - if (!vma || addr + len <= vma->vm_start) {
69165 - /*
69166 - * Remember the place where we stopped the search:
69167 - */
69168 - mm->free_area_cache = addr + len;
69169 - return addr;
69170 - }
69171 + if (check_heap_stack_gap(vma, addr, len))
69172 + break;
69173 if (addr + mm->cached_hole_size < vma->vm_start)
69174 mm->cached_hole_size = vma->vm_start - addr;
69175 addr = vma->vm_end;
69176 }
69177 +
69178 + /*
69179 + * Remember the place where we stopped the search:
69180 + */
69181 + mm->free_area_cache = addr + len;
69182 + return addr;
69183 }
69184 #endif
69185
69186 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
69187 {
69188 +
69189 +#ifdef CONFIG_PAX_SEGMEXEC
69190 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
69191 + return;
69192 +#endif
69193 +
69194 /*
69195 * Is this a new hole at the lowest possible address?
69196 */
69197 - if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
69198 + if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
69199 mm->free_area_cache = addr;
69200 mm->cached_hole_size = ~0UL;
69201 }
69202 @@ -1441,7 +1646,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
69203 {
69204 struct vm_area_struct *vma;
69205 struct mm_struct *mm = current->mm;
69206 - unsigned long addr = addr0;
69207 + unsigned long base = mm->mmap_base, addr = addr0;
69208
69209 /* requested length too big for entire address space */
69210 if (len > TASK_SIZE)
69211 @@ -1450,13 +1655,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
69212 if (flags & MAP_FIXED)
69213 return addr;
69214
69215 +#ifdef CONFIG_PAX_RANDMMAP
69216 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
69217 +#endif
69218 +
69219 /* requesting a specific address */
69220 if (addr) {
69221 addr = PAGE_ALIGN(addr);
69222 - vma = find_vma(mm, addr);
69223 - if (TASK_SIZE - len >= addr &&
69224 - (!vma || addr + len <= vma->vm_start))
69225 - return addr;
69226 + if (TASK_SIZE - len >= addr) {
69227 + vma = find_vma(mm, addr);
69228 + if (check_heap_stack_gap(vma, addr, len))
69229 + return addr;
69230 + }
69231 }
69232
69233 /* check if free_area_cache is useful for us */
69234 @@ -1471,7 +1681,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
69235 /* make sure it can fit in the remaining address space */
69236 if (addr > len) {
69237 vma = find_vma(mm, addr-len);
69238 - if (!vma || addr <= vma->vm_start)
69239 + if (check_heap_stack_gap(vma, addr - len, len))
69240 /* remember the address as a hint for next time */
69241 return (mm->free_area_cache = addr-len);
69242 }
69243 @@ -1488,7 +1698,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
69244 * return with success:
69245 */
69246 vma = find_vma(mm, addr);
69247 - if (!vma || addr+len <= vma->vm_start)
69248 + if (check_heap_stack_gap(vma, addr, len))
69249 /* remember the address as a hint for next time */
69250 return (mm->free_area_cache = addr);
69251
69252 @@ -1497,8 +1707,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
69253 mm->cached_hole_size = vma->vm_start - addr;
69254
69255 /* try just below the current vma->vm_start */
69256 - addr = vma->vm_start-len;
69257 - } while (len < vma->vm_start);
69258 + addr = skip_heap_stack_gap(vma, len);
69259 + } while (!IS_ERR_VALUE(addr));
69260
69261 bottomup:
69262 /*
69263 @@ -1507,13 +1717,21 @@ bottomup:
69264 * can happen with large stack limits and large mmap()
69265 * allocations.
69266 */
69267 + mm->mmap_base = TASK_UNMAPPED_BASE;
69268 +
69269 +#ifdef CONFIG_PAX_RANDMMAP
69270 + if (mm->pax_flags & MF_PAX_RANDMMAP)
69271 + mm->mmap_base += mm->delta_mmap;
69272 +#endif
69273 +
69274 + mm->free_area_cache = mm->mmap_base;
69275 mm->cached_hole_size = ~0UL;
69276 - mm->free_area_cache = TASK_UNMAPPED_BASE;
69277 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
69278 /*
69279 * Restore the topdown base:
69280 */
69281 - mm->free_area_cache = mm->mmap_base;
69282 + mm->mmap_base = base;
69283 + mm->free_area_cache = base;
69284 mm->cached_hole_size = ~0UL;
69285
69286 return addr;
69287 @@ -1522,6 +1740,12 @@ bottomup:
69288
69289 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
69290 {
69291 +
69292 +#ifdef CONFIG_PAX_SEGMEXEC
69293 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
69294 + return;
69295 +#endif
69296 +
69297 /*
69298 * Is this a new hole at the highest possible address?
69299 */
69300 @@ -1529,8 +1753,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
69301 mm->free_area_cache = addr;
69302
69303 /* dont allow allocations above current base */
69304 - if (mm->free_area_cache > mm->mmap_base)
69305 + if (mm->free_area_cache > mm->mmap_base) {
69306 mm->free_area_cache = mm->mmap_base;
69307 + mm->cached_hole_size = ~0UL;
69308 + }
69309 }
69310
69311 unsigned long
69312 @@ -1603,40 +1829,42 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
69313
69314 EXPORT_SYMBOL(find_vma);
69315
69316 -/* Same as find_vma, but also return a pointer to the previous VMA in *pprev. */
69317 +/*
69318 + * Same as find_vma, but also return a pointer to the previous VMA in *pprev.
69319 + * Note: pprev is set to NULL when return value is NULL.
69320 + */
69321 struct vm_area_struct *
69322 find_vma_prev(struct mm_struct *mm, unsigned long addr,
69323 struct vm_area_struct **pprev)
69324 {
69325 - struct vm_area_struct *vma = NULL, *prev = NULL;
69326 - struct rb_node *rb_node;
69327 - if (!mm)
69328 - goto out;
69329 + struct vm_area_struct *vma;
69330
69331 - /* Guard against addr being lower than the first VMA */
69332 - vma = mm->mmap;
69333 + vma = find_vma(mm, addr);
69334 + *pprev = vma ? vma->vm_prev : NULL;
69335 + return vma;
69336 +}
69337
69338 - /* Go through the RB tree quickly. */
69339 - rb_node = mm->mm_rb.rb_node;
69340 +#ifdef CONFIG_PAX_SEGMEXEC
69341 +struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
69342 +{
69343 + struct vm_area_struct *vma_m;
69344
69345 - while (rb_node) {
69346 - struct vm_area_struct *vma_tmp;
69347 - vma_tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb);
69348 -
69349 - if (addr < vma_tmp->vm_end) {
69350 - rb_node = rb_node->rb_left;
69351 - } else {
69352 - prev = vma_tmp;
69353 - if (!prev->vm_next || (addr < prev->vm_next->vm_end))
69354 - break;
69355 - rb_node = rb_node->rb_right;
69356 - }
69357 + BUG_ON(!vma || vma->vm_start >= vma->vm_end);
69358 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
69359 + BUG_ON(vma->vm_mirror);
69360 + return NULL;
69361 }
69362 -
69363 -out:
69364 - *pprev = prev;
69365 - return prev ? prev->vm_next : vma;
69366 + BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
69367 + vma_m = vma->vm_mirror;
69368 + BUG_ON(!vma_m || vma_m->vm_mirror != vma);
69369 + BUG_ON(vma->vm_file != vma_m->vm_file);
69370 + BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
69371 + BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
69372 + BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
69373 + BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
69374 + return vma_m;
69375 }
69376 +#endif
69377
69378 /*
69379 * Verify that the stack growth is acceptable and
69380 @@ -1654,6 +1882,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
69381 return -ENOMEM;
69382
69383 /* Stack limit test */
69384 + gr_learn_resource(current, RLIMIT_STACK, size, 1);
69385 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
69386 return -ENOMEM;
69387
69388 @@ -1664,6 +1893,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
69389 locked = mm->locked_vm + grow;
69390 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
69391 limit >>= PAGE_SHIFT;
69392 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
69393 if (locked > limit && !capable(CAP_IPC_LOCK))
69394 return -ENOMEM;
69395 }
69396 @@ -1694,37 +1924,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
69397 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
69398 * vma is the last one with address > vma->vm_end. Have to extend vma.
69399 */
69400 +#ifndef CONFIG_IA64
69401 +static
69402 +#endif
69403 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
69404 {
69405 int error;
69406 + bool locknext;
69407
69408 if (!(vma->vm_flags & VM_GROWSUP))
69409 return -EFAULT;
69410
69411 + /* Also guard against wrapping around to address 0. */
69412 + if (address < PAGE_ALIGN(address+1))
69413 + address = PAGE_ALIGN(address+1);
69414 + else
69415 + return -ENOMEM;
69416 +
69417 /*
69418 * We must make sure the anon_vma is allocated
69419 * so that the anon_vma locking is not a noop.
69420 */
69421 if (unlikely(anon_vma_prepare(vma)))
69422 return -ENOMEM;
69423 + locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
69424 + if (locknext && anon_vma_prepare(vma->vm_next))
69425 + return -ENOMEM;
69426 vma_lock_anon_vma(vma);
69427 + if (locknext)
69428 + vma_lock_anon_vma(vma->vm_next);
69429
69430 /*
69431 * vma->vm_start/vm_end cannot change under us because the caller
69432 * is required to hold the mmap_sem in read mode. We need the
69433 - * anon_vma lock to serialize against concurrent expand_stacks.
69434 - * Also guard against wrapping around to address 0.
69435 + * anon_vma locks to serialize against concurrent expand_stacks
69436 + * and expand_upwards.
69437 */
69438 - if (address < PAGE_ALIGN(address+4))
69439 - address = PAGE_ALIGN(address+4);
69440 - else {
69441 - vma_unlock_anon_vma(vma);
69442 - return -ENOMEM;
69443 - }
69444 error = 0;
69445
69446 /* Somebody else might have raced and expanded it already */
69447 - if (address > vma->vm_end) {
69448 + if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
69449 + error = -ENOMEM;
69450 + else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
69451 unsigned long size, grow;
69452
69453 size = address - vma->vm_start;
69454 @@ -1739,6 +1980,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
69455 }
69456 }
69457 }
69458 + if (locknext)
69459 + vma_unlock_anon_vma(vma->vm_next);
69460 vma_unlock_anon_vma(vma);
69461 khugepaged_enter_vma_merge(vma);
69462 return error;
69463 @@ -1752,6 +1995,8 @@ int expand_downwards(struct vm_area_struct *vma,
69464 unsigned long address)
69465 {
69466 int error;
69467 + bool lockprev = false;
69468 + struct vm_area_struct *prev;
69469
69470 /*
69471 * We must make sure the anon_vma is allocated
69472 @@ -1765,6 +2010,15 @@ int expand_downwards(struct vm_area_struct *vma,
69473 if (error)
69474 return error;
69475
69476 + prev = vma->vm_prev;
69477 +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
69478 + lockprev = prev && (prev->vm_flags & VM_GROWSUP);
69479 +#endif
69480 + if (lockprev && anon_vma_prepare(prev))
69481 + return -ENOMEM;
69482 + if (lockprev)
69483 + vma_lock_anon_vma(prev);
69484 +
69485 vma_lock_anon_vma(vma);
69486
69487 /*
69488 @@ -1774,9 +2028,17 @@ int expand_downwards(struct vm_area_struct *vma,
69489 */
69490
69491 /* Somebody else might have raced and expanded it already */
69492 - if (address < vma->vm_start) {
69493 + if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
69494 + error = -ENOMEM;
69495 + else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
69496 unsigned long size, grow;
69497
69498 +#ifdef CONFIG_PAX_SEGMEXEC
69499 + struct vm_area_struct *vma_m;
69500 +
69501 + vma_m = pax_find_mirror_vma(vma);
69502 +#endif
69503 +
69504 size = vma->vm_end - address;
69505 grow = (vma->vm_start - address) >> PAGE_SHIFT;
69506
69507 @@ -1786,11 +2048,22 @@ int expand_downwards(struct vm_area_struct *vma,
69508 if (!error) {
69509 vma->vm_start = address;
69510 vma->vm_pgoff -= grow;
69511 + track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
69512 +
69513 +#ifdef CONFIG_PAX_SEGMEXEC
69514 + if (vma_m) {
69515 + vma_m->vm_start -= grow << PAGE_SHIFT;
69516 + vma_m->vm_pgoff -= grow;
69517 + }
69518 +#endif
69519 +
69520 perf_event_mmap(vma);
69521 }
69522 }
69523 }
69524 vma_unlock_anon_vma(vma);
69525 + if (lockprev)
69526 + vma_unlock_anon_vma(prev);
69527 khugepaged_enter_vma_merge(vma);
69528 return error;
69529 }
69530 @@ -1860,6 +2133,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
69531 do {
69532 long nrpages = vma_pages(vma);
69533
69534 +#ifdef CONFIG_PAX_SEGMEXEC
69535 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
69536 + vma = remove_vma(vma);
69537 + continue;
69538 + }
69539 +#endif
69540 +
69541 mm->total_vm -= nrpages;
69542 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
69543 vma = remove_vma(vma);
69544 @@ -1905,6 +2185,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
69545 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
69546 vma->vm_prev = NULL;
69547 do {
69548 +
69549 +#ifdef CONFIG_PAX_SEGMEXEC
69550 + if (vma->vm_mirror) {
69551 + BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
69552 + vma->vm_mirror->vm_mirror = NULL;
69553 + vma->vm_mirror->vm_flags &= ~VM_EXEC;
69554 + vma->vm_mirror = NULL;
69555 + }
69556 +#endif
69557 +
69558 rb_erase(&vma->vm_rb, &mm->mm_rb);
69559 mm->map_count--;
69560 tail_vma = vma;
69561 @@ -1933,14 +2223,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
69562 struct vm_area_struct *new;
69563 int err = -ENOMEM;
69564
69565 +#ifdef CONFIG_PAX_SEGMEXEC
69566 + struct vm_area_struct *vma_m, *new_m = NULL;
69567 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
69568 +#endif
69569 +
69570 if (is_vm_hugetlb_page(vma) && (addr &
69571 ~(huge_page_mask(hstate_vma(vma)))))
69572 return -EINVAL;
69573
69574 +#ifdef CONFIG_PAX_SEGMEXEC
69575 + vma_m = pax_find_mirror_vma(vma);
69576 +#endif
69577 +
69578 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
69579 if (!new)
69580 goto out_err;
69581
69582 +#ifdef CONFIG_PAX_SEGMEXEC
69583 + if (vma_m) {
69584 + new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
69585 + if (!new_m) {
69586 + kmem_cache_free(vm_area_cachep, new);
69587 + goto out_err;
69588 + }
69589 + }
69590 +#endif
69591 +
69592 /* most fields are the same, copy all, and then fixup */
69593 *new = *vma;
69594
69595 @@ -1953,6 +2262,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
69596 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
69597 }
69598
69599 +#ifdef CONFIG_PAX_SEGMEXEC
69600 + if (vma_m) {
69601 + *new_m = *vma_m;
69602 + INIT_LIST_HEAD(&new_m->anon_vma_chain);
69603 + new_m->vm_mirror = new;
69604 + new->vm_mirror = new_m;
69605 +
69606 + if (new_below)
69607 + new_m->vm_end = addr_m;
69608 + else {
69609 + new_m->vm_start = addr_m;
69610 + new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
69611 + }
69612 + }
69613 +#endif
69614 +
69615 pol = mpol_dup(vma_policy(vma));
69616 if (IS_ERR(pol)) {
69617 err = PTR_ERR(pol);
69618 @@ -1978,6 +2303,42 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
69619 else
69620 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
69621
69622 +#ifdef CONFIG_PAX_SEGMEXEC
69623 + if (!err && vma_m) {
69624 + if (anon_vma_clone(new_m, vma_m))
69625 + goto out_free_mpol;
69626 +
69627 + mpol_get(pol);
69628 + vma_set_policy(new_m, pol);
69629 +
69630 + if (new_m->vm_file) {
69631 + get_file(new_m->vm_file);
69632 + if (vma_m->vm_flags & VM_EXECUTABLE)
69633 + added_exe_file_vma(mm);
69634 + }
69635 +
69636 + if (new_m->vm_ops && new_m->vm_ops->open)
69637 + new_m->vm_ops->open(new_m);
69638 +
69639 + if (new_below)
69640 + err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
69641 + ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
69642 + else
69643 + err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
69644 +
69645 + if (err) {
69646 + if (new_m->vm_ops && new_m->vm_ops->close)
69647 + new_m->vm_ops->close(new_m);
69648 + if (new_m->vm_file) {
69649 + if (vma_m->vm_flags & VM_EXECUTABLE)
69650 + removed_exe_file_vma(mm);
69651 + fput(new_m->vm_file);
69652 + }
69653 + mpol_put(pol);
69654 + }
69655 + }
69656 +#endif
69657 +
69658 /* Success. */
69659 if (!err)
69660 return 0;
69661 @@ -1990,10 +2351,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
69662 removed_exe_file_vma(mm);
69663 fput(new->vm_file);
69664 }
69665 - unlink_anon_vmas(new);
69666 out_free_mpol:
69667 mpol_put(pol);
69668 out_free_vma:
69669 +
69670 +#ifdef CONFIG_PAX_SEGMEXEC
69671 + if (new_m) {
69672 + unlink_anon_vmas(new_m);
69673 + kmem_cache_free(vm_area_cachep, new_m);
69674 + }
69675 +#endif
69676 +
69677 + unlink_anon_vmas(new);
69678 kmem_cache_free(vm_area_cachep, new);
69679 out_err:
69680 return err;
69681 @@ -2006,6 +2375,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
69682 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
69683 unsigned long addr, int new_below)
69684 {
69685 +
69686 +#ifdef CONFIG_PAX_SEGMEXEC
69687 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
69688 + BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
69689 + if (mm->map_count >= sysctl_max_map_count-1)
69690 + return -ENOMEM;
69691 + } else
69692 +#endif
69693 +
69694 if (mm->map_count >= sysctl_max_map_count)
69695 return -ENOMEM;
69696
69697 @@ -2017,11 +2395,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
69698 * work. This now handles partial unmappings.
69699 * Jeremy Fitzhardinge <jeremy@goop.org>
69700 */
69701 +#ifdef CONFIG_PAX_SEGMEXEC
69702 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69703 {
69704 + int ret = __do_munmap(mm, start, len);
69705 + if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
69706 + return ret;
69707 +
69708 + return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
69709 +}
69710 +
69711 +int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69712 +#else
69713 +int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69714 +#endif
69715 +{
69716 unsigned long end;
69717 struct vm_area_struct *vma, *prev, *last;
69718
69719 + /*
69720 + * mm->mmap_sem is required to protect against another thread
69721 + * changing the mappings in case we sleep.
69722 + */
69723 + verify_mm_writelocked(mm);
69724 +
69725 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
69726 return -EINVAL;
69727
69728 @@ -2096,6 +2493,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69729 /* Fix up all other VM information */
69730 remove_vma_list(mm, vma);
69731
69732 + track_exec_limit(mm, start, end, 0UL);
69733 +
69734 return 0;
69735 }
69736
69737 @@ -2108,22 +2507,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
69738
69739 profile_munmap(addr);
69740
69741 +#ifdef CONFIG_PAX_SEGMEXEC
69742 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
69743 + (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
69744 + return -EINVAL;
69745 +#endif
69746 +
69747 down_write(&mm->mmap_sem);
69748 ret = do_munmap(mm, addr, len);
69749 up_write(&mm->mmap_sem);
69750 return ret;
69751 }
69752
69753 -static inline void verify_mm_writelocked(struct mm_struct *mm)
69754 -{
69755 -#ifdef CONFIG_DEBUG_VM
69756 - if (unlikely(down_read_trylock(&mm->mmap_sem))) {
69757 - WARN_ON(1);
69758 - up_read(&mm->mmap_sem);
69759 - }
69760 -#endif
69761 -}
69762 -
69763 /*
69764 * this is really a simplified "do_mmap". it only handles
69765 * anonymous maps. eventually we may be able to do some
69766 @@ -2137,6 +2532,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
69767 struct rb_node ** rb_link, * rb_parent;
69768 pgoff_t pgoff = addr >> PAGE_SHIFT;
69769 int error;
69770 + unsigned long charged;
69771
69772 len = PAGE_ALIGN(len);
69773 if (!len)
69774 @@ -2148,16 +2544,30 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
69775
69776 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
69777
69778 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
69779 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
69780 + flags &= ~VM_EXEC;
69781 +
69782 +#ifdef CONFIG_PAX_MPROTECT
69783 + if (mm->pax_flags & MF_PAX_MPROTECT)
69784 + flags &= ~VM_MAYEXEC;
69785 +#endif
69786 +
69787 + }
69788 +#endif
69789 +
69790 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
69791 if (error & ~PAGE_MASK)
69792 return error;
69793
69794 + charged = len >> PAGE_SHIFT;
69795 +
69796 /*
69797 * mlock MCL_FUTURE?
69798 */
69799 if (mm->def_flags & VM_LOCKED) {
69800 unsigned long locked, lock_limit;
69801 - locked = len >> PAGE_SHIFT;
69802 + locked = charged;
69803 locked += mm->locked_vm;
69804 lock_limit = rlimit(RLIMIT_MEMLOCK);
69805 lock_limit >>= PAGE_SHIFT;
69806 @@ -2174,22 +2584,22 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
69807 /*
69808 * Clear old maps. this also does some error checking for us
69809 */
69810 - munmap_back:
69811 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
69812 if (vma && vma->vm_start < addr + len) {
69813 if (do_munmap(mm, addr, len))
69814 return -ENOMEM;
69815 - goto munmap_back;
69816 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
69817 + BUG_ON(vma && vma->vm_start < addr + len);
69818 }
69819
69820 /* Check against address space limits *after* clearing old maps... */
69821 - if (!may_expand_vm(mm, len >> PAGE_SHIFT))
69822 + if (!may_expand_vm(mm, charged))
69823 return -ENOMEM;
69824
69825 if (mm->map_count > sysctl_max_map_count)
69826 return -ENOMEM;
69827
69828 - if (security_vm_enough_memory(len >> PAGE_SHIFT))
69829 + if (security_vm_enough_memory(charged))
69830 return -ENOMEM;
69831
69832 /* Can we just expand an old private anonymous mapping? */
69833 @@ -2203,7 +2613,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
69834 */
69835 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69836 if (!vma) {
69837 - vm_unacct_memory(len >> PAGE_SHIFT);
69838 + vm_unacct_memory(charged);
69839 return -ENOMEM;
69840 }
69841
69842 @@ -2217,11 +2627,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
69843 vma_link(mm, vma, prev, rb_link, rb_parent);
69844 out:
69845 perf_event_mmap(vma);
69846 - mm->total_vm += len >> PAGE_SHIFT;
69847 + mm->total_vm += charged;
69848 if (flags & VM_LOCKED) {
69849 if (!mlock_vma_pages_range(vma, addr, addr + len))
69850 - mm->locked_vm += (len >> PAGE_SHIFT);
69851 + mm->locked_vm += charged;
69852 }
69853 + track_exec_limit(mm, addr, addr + len, flags);
69854 return addr;
69855 }
69856
69857 @@ -2268,8 +2679,10 @@ void exit_mmap(struct mm_struct *mm)
69858 * Walk the list again, actually closing and freeing it,
69859 * with preemption enabled, without holding any MM locks.
69860 */
69861 - while (vma)
69862 + while (vma) {
69863 + vma->vm_mirror = NULL;
69864 vma = remove_vma(vma);
69865 + }
69866
69867 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
69868 }
69869 @@ -2283,6 +2696,13 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
69870 struct vm_area_struct * __vma, * prev;
69871 struct rb_node ** rb_link, * rb_parent;
69872
69873 +#ifdef CONFIG_PAX_SEGMEXEC
69874 + struct vm_area_struct *vma_m = NULL;
69875 +#endif
69876 +
69877 + if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1))
69878 + return -EPERM;
69879 +
69880 /*
69881 * The vm_pgoff of a purely anonymous vma should be irrelevant
69882 * until its first write fault, when page's anon_vma and index
69883 @@ -2305,7 +2725,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
69884 if ((vma->vm_flags & VM_ACCOUNT) &&
69885 security_vm_enough_memory_mm(mm, vma_pages(vma)))
69886 return -ENOMEM;
69887 +
69888 +#ifdef CONFIG_PAX_SEGMEXEC
69889 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
69890 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69891 + if (!vma_m)
69892 + return -ENOMEM;
69893 + }
69894 +#endif
69895 +
69896 vma_link(mm, vma, prev, rb_link, rb_parent);
69897 +
69898 +#ifdef CONFIG_PAX_SEGMEXEC
69899 + if (vma_m)
69900 + BUG_ON(pax_mirror_vma(vma_m, vma));
69901 +#endif
69902 +
69903 return 0;
69904 }
69905
69906 @@ -2323,6 +2758,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
69907 struct rb_node **rb_link, *rb_parent;
69908 struct mempolicy *pol;
69909
69910 + BUG_ON(vma->vm_mirror);
69911 +
69912 /*
69913 * If anonymous vma has not yet been faulted, update new pgoff
69914 * to match new location, to increase its chance of merging.
69915 @@ -2373,6 +2810,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
69916 return NULL;
69917 }
69918
69919 +#ifdef CONFIG_PAX_SEGMEXEC
69920 +long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
69921 +{
69922 + struct vm_area_struct *prev_m;
69923 + struct rb_node **rb_link_m, *rb_parent_m;
69924 + struct mempolicy *pol_m;
69925 +
69926 + BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
69927 + BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
69928 + BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
69929 + *vma_m = *vma;
69930 + INIT_LIST_HEAD(&vma_m->anon_vma_chain);
69931 + if (anon_vma_clone(vma_m, vma))
69932 + return -ENOMEM;
69933 + pol_m = vma_policy(vma_m);
69934 + mpol_get(pol_m);
69935 + vma_set_policy(vma_m, pol_m);
69936 + vma_m->vm_start += SEGMEXEC_TASK_SIZE;
69937 + vma_m->vm_end += SEGMEXEC_TASK_SIZE;
69938 + vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
69939 + vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
69940 + if (vma_m->vm_file)
69941 + get_file(vma_m->vm_file);
69942 + if (vma_m->vm_ops && vma_m->vm_ops->open)
69943 + vma_m->vm_ops->open(vma_m);
69944 + find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
69945 + vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
69946 + vma_m->vm_mirror = vma;
69947 + vma->vm_mirror = vma_m;
69948 + return 0;
69949 +}
69950 +#endif
69951 +
69952 /*
69953 * Return true if the calling process may expand its vm space by the passed
69954 * number of pages
69955 @@ -2383,7 +2853,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
69956 unsigned long lim;
69957
69958 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
69959 -
69960 + gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
69961 if (cur + npages > lim)
69962 return 0;
69963 return 1;
69964 @@ -2454,6 +2924,22 @@ int install_special_mapping(struct mm_struct *mm,
69965 vma->vm_start = addr;
69966 vma->vm_end = addr + len;
69967
69968 +#ifdef CONFIG_PAX_MPROTECT
69969 + if (mm->pax_flags & MF_PAX_MPROTECT) {
69970 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
69971 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
69972 + return -EPERM;
69973 + if (!(vm_flags & VM_EXEC))
69974 + vm_flags &= ~VM_MAYEXEC;
69975 +#else
69976 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
69977 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
69978 +#endif
69979 + else
69980 + vm_flags &= ~VM_MAYWRITE;
69981 + }
69982 +#endif
69983 +
69984 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
69985 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
69986
69987 diff --git a/mm/mprotect.c b/mm/mprotect.c
69988 index 5a688a2..27e031c 100644
69989 --- a/mm/mprotect.c
69990 +++ b/mm/mprotect.c
69991 @@ -23,10 +23,16 @@
69992 #include <linux/mmu_notifier.h>
69993 #include <linux/migrate.h>
69994 #include <linux/perf_event.h>
69995 +
69996 +#ifdef CONFIG_PAX_MPROTECT
69997 +#include <linux/elf.h>
69998 +#endif
69999 +
70000 #include <asm/uaccess.h>
70001 #include <asm/pgtable.h>
70002 #include <asm/cacheflush.h>
70003 #include <asm/tlbflush.h>
70004 +#include <asm/mmu_context.h>
70005
70006 #ifndef pgprot_modify
70007 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
70008 @@ -141,6 +147,48 @@ static void change_protection(struct vm_area_struct *vma,
70009 flush_tlb_range(vma, start, end);
70010 }
70011
70012 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
70013 +/* called while holding the mmap semaphor for writing except stack expansion */
70014 +void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
70015 +{
70016 + unsigned long oldlimit, newlimit = 0UL;
70017 +
70018 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
70019 + return;
70020 +
70021 + spin_lock(&mm->page_table_lock);
70022 + oldlimit = mm->context.user_cs_limit;
70023 + if ((prot & VM_EXEC) && oldlimit < end)
70024 + /* USER_CS limit moved up */
70025 + newlimit = end;
70026 + else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
70027 + /* USER_CS limit moved down */
70028 + newlimit = start;
70029 +
70030 + if (newlimit) {
70031 + mm->context.user_cs_limit = newlimit;
70032 +
70033 +#ifdef CONFIG_SMP
70034 + wmb();
70035 + cpus_clear(mm->context.cpu_user_cs_mask);
70036 + cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
70037 +#endif
70038 +
70039 + set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
70040 + }
70041 + spin_unlock(&mm->page_table_lock);
70042 + if (newlimit == end) {
70043 + struct vm_area_struct *vma = find_vma(mm, oldlimit);
70044 +
70045 + for (; vma && vma->vm_start < end; vma = vma->vm_next)
70046 + if (is_vm_hugetlb_page(vma))
70047 + hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
70048 + else
70049 + change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
70050 + }
70051 +}
70052 +#endif
70053 +
70054 int
70055 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
70056 unsigned long start, unsigned long end, unsigned long newflags)
70057 @@ -153,11 +201,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
70058 int error;
70059 int dirty_accountable = 0;
70060
70061 +#ifdef CONFIG_PAX_SEGMEXEC
70062 + struct vm_area_struct *vma_m = NULL;
70063 + unsigned long start_m, end_m;
70064 +
70065 + start_m = start + SEGMEXEC_TASK_SIZE;
70066 + end_m = end + SEGMEXEC_TASK_SIZE;
70067 +#endif
70068 +
70069 if (newflags == oldflags) {
70070 *pprev = vma;
70071 return 0;
70072 }
70073
70074 + if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
70075 + struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
70076 +
70077 + if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
70078 + return -ENOMEM;
70079 +
70080 + if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
70081 + return -ENOMEM;
70082 + }
70083 +
70084 /*
70085 * If we make a private mapping writable we increase our commit;
70086 * but (without finer accounting) cannot reduce our commit if we
70087 @@ -174,6 +240,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
70088 }
70089 }
70090
70091 +#ifdef CONFIG_PAX_SEGMEXEC
70092 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
70093 + if (start != vma->vm_start) {
70094 + error = split_vma(mm, vma, start, 1);
70095 + if (error)
70096 + goto fail;
70097 + BUG_ON(!*pprev || (*pprev)->vm_next == vma);
70098 + *pprev = (*pprev)->vm_next;
70099 + }
70100 +
70101 + if (end != vma->vm_end) {
70102 + error = split_vma(mm, vma, end, 0);
70103 + if (error)
70104 + goto fail;
70105 + }
70106 +
70107 + if (pax_find_mirror_vma(vma)) {
70108 + error = __do_munmap(mm, start_m, end_m - start_m);
70109 + if (error)
70110 + goto fail;
70111 + } else {
70112 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
70113 + if (!vma_m) {
70114 + error = -ENOMEM;
70115 + goto fail;
70116 + }
70117 + vma->vm_flags = newflags;
70118 + error = pax_mirror_vma(vma_m, vma);
70119 + if (error) {
70120 + vma->vm_flags = oldflags;
70121 + goto fail;
70122 + }
70123 + }
70124 + }
70125 +#endif
70126 +
70127 /*
70128 * First try to merge with previous and/or next vma.
70129 */
70130 @@ -204,9 +306,21 @@ success:
70131 * vm_flags and vm_page_prot are protected by the mmap_sem
70132 * held in write mode.
70133 */
70134 +
70135 +#ifdef CONFIG_PAX_SEGMEXEC
70136 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
70137 + pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
70138 +#endif
70139 +
70140 vma->vm_flags = newflags;
70141 +
70142 +#ifdef CONFIG_PAX_MPROTECT
70143 + if (mm->binfmt && mm->binfmt->handle_mprotect)
70144 + mm->binfmt->handle_mprotect(vma, newflags);
70145 +#endif
70146 +
70147 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
70148 - vm_get_page_prot(newflags));
70149 + vm_get_page_prot(vma->vm_flags));
70150
70151 if (vma_wants_writenotify(vma)) {
70152 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
70153 @@ -248,6 +362,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
70154 end = start + len;
70155 if (end <= start)
70156 return -ENOMEM;
70157 +
70158 +#ifdef CONFIG_PAX_SEGMEXEC
70159 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
70160 + if (end > SEGMEXEC_TASK_SIZE)
70161 + return -EINVAL;
70162 + } else
70163 +#endif
70164 +
70165 + if (end > TASK_SIZE)
70166 + return -EINVAL;
70167 +
70168 if (!arch_validate_prot(prot))
70169 return -EINVAL;
70170
70171 @@ -255,7 +380,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
70172 /*
70173 * Does the application expect PROT_READ to imply PROT_EXEC:
70174 */
70175 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
70176 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
70177 prot |= PROT_EXEC;
70178
70179 vm_flags = calc_vm_prot_bits(prot);
70180 @@ -287,6 +412,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
70181 if (start > vma->vm_start)
70182 prev = vma;
70183
70184 +#ifdef CONFIG_PAX_MPROTECT
70185 + if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
70186 + current->mm->binfmt->handle_mprotect(vma, vm_flags);
70187 +#endif
70188 +
70189 for (nstart = start ; ; ) {
70190 unsigned long newflags;
70191
70192 @@ -296,6 +426,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
70193
70194 /* newflags >> 4 shift VM_MAY% in place of VM_% */
70195 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
70196 + if (prot & (PROT_WRITE | PROT_EXEC))
70197 + gr_log_rwxmprotect(vma->vm_file);
70198 +
70199 + error = -EACCES;
70200 + goto out;
70201 + }
70202 +
70203 + if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
70204 error = -EACCES;
70205 goto out;
70206 }
70207 @@ -310,6 +448,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
70208 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
70209 if (error)
70210 goto out;
70211 +
70212 + track_exec_limit(current->mm, nstart, tmp, vm_flags);
70213 +
70214 nstart = tmp;
70215
70216 if (nstart < prev->vm_end)
70217 diff --git a/mm/mremap.c b/mm/mremap.c
70218 index d6959cb..18a402a 100644
70219 --- a/mm/mremap.c
70220 +++ b/mm/mremap.c
70221 @@ -106,6 +106,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
70222 continue;
70223 pte = ptep_get_and_clear(mm, old_addr, old_pte);
70224 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
70225 +
70226 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
70227 + if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
70228 + pte = pte_exprotect(pte);
70229 +#endif
70230 +
70231 set_pte_at(mm, new_addr, new_pte, pte);
70232 }
70233
70234 @@ -290,6 +296,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
70235 if (is_vm_hugetlb_page(vma))
70236 goto Einval;
70237
70238 +#ifdef CONFIG_PAX_SEGMEXEC
70239 + if (pax_find_mirror_vma(vma))
70240 + goto Einval;
70241 +#endif
70242 +
70243 /* We can't remap across vm area boundaries */
70244 if (old_len > vma->vm_end - addr)
70245 goto Efault;
70246 @@ -346,20 +357,25 @@ static unsigned long mremap_to(unsigned long addr,
70247 unsigned long ret = -EINVAL;
70248 unsigned long charged = 0;
70249 unsigned long map_flags;
70250 + unsigned long pax_task_size = TASK_SIZE;
70251
70252 if (new_addr & ~PAGE_MASK)
70253 goto out;
70254
70255 - if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
70256 +#ifdef CONFIG_PAX_SEGMEXEC
70257 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
70258 + pax_task_size = SEGMEXEC_TASK_SIZE;
70259 +#endif
70260 +
70261 + pax_task_size -= PAGE_SIZE;
70262 +
70263 + if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
70264 goto out;
70265
70266 /* Check if the location we're moving into overlaps the
70267 * old location at all, and fail if it does.
70268 */
70269 - if ((new_addr <= addr) && (new_addr+new_len) > addr)
70270 - goto out;
70271 -
70272 - if ((addr <= new_addr) && (addr+old_len) > new_addr)
70273 + if (addr + old_len > new_addr && new_addr + new_len > addr)
70274 goto out;
70275
70276 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
70277 @@ -431,6 +447,7 @@ unsigned long do_mremap(unsigned long addr,
70278 struct vm_area_struct *vma;
70279 unsigned long ret = -EINVAL;
70280 unsigned long charged = 0;
70281 + unsigned long pax_task_size = TASK_SIZE;
70282
70283 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
70284 goto out;
70285 @@ -449,6 +466,17 @@ unsigned long do_mremap(unsigned long addr,
70286 if (!new_len)
70287 goto out;
70288
70289 +#ifdef CONFIG_PAX_SEGMEXEC
70290 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
70291 + pax_task_size = SEGMEXEC_TASK_SIZE;
70292 +#endif
70293 +
70294 + pax_task_size -= PAGE_SIZE;
70295 +
70296 + if (new_len > pax_task_size || addr > pax_task_size-new_len ||
70297 + old_len > pax_task_size || addr > pax_task_size-old_len)
70298 + goto out;
70299 +
70300 if (flags & MREMAP_FIXED) {
70301 if (flags & MREMAP_MAYMOVE)
70302 ret = mremap_to(addr, old_len, new_addr, new_len);
70303 @@ -498,6 +526,7 @@ unsigned long do_mremap(unsigned long addr,
70304 addr + new_len);
70305 }
70306 ret = addr;
70307 + track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
70308 goto out;
70309 }
70310 }
70311 @@ -524,7 +553,13 @@ unsigned long do_mremap(unsigned long addr,
70312 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
70313 if (ret)
70314 goto out;
70315 +
70316 + map_flags = vma->vm_flags;
70317 ret = move_vma(vma, addr, old_len, new_len, new_addr);
70318 + if (!(ret & ~PAGE_MASK)) {
70319 + track_exec_limit(current->mm, addr, addr + old_len, 0UL);
70320 + track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
70321 + }
70322 }
70323 out:
70324 if (ret & ~PAGE_MASK)
70325 diff --git a/mm/nobootmem.c b/mm/nobootmem.c
70326 index 7fa41b4..6087460 100644
70327 --- a/mm/nobootmem.c
70328 +++ b/mm/nobootmem.c
70329 @@ -110,19 +110,30 @@ static void __init __free_pages_memory(unsigned long start, unsigned long end)
70330 unsigned long __init free_all_memory_core_early(int nodeid)
70331 {
70332 int i;
70333 - u64 start, end;
70334 + u64 start, end, startrange, endrange;
70335 unsigned long count = 0;
70336 - struct range *range = NULL;
70337 + struct range *range = NULL, rangerange = { 0, 0 };
70338 int nr_range;
70339
70340 nr_range = get_free_all_memory_range(&range, nodeid);
70341 + startrange = __pa(range) >> PAGE_SHIFT;
70342 + endrange = (__pa(range + nr_range) - 1) >> PAGE_SHIFT;
70343
70344 for (i = 0; i < nr_range; i++) {
70345 start = range[i].start;
70346 end = range[i].end;
70347 + if (start <= endrange && startrange < end) {
70348 + BUG_ON(rangerange.start | rangerange.end);
70349 + rangerange = range[i];
70350 + continue;
70351 + }
70352 count += end - start;
70353 __free_pages_memory(start, end);
70354 }
70355 + start = rangerange.start;
70356 + end = rangerange.end;
70357 + count += end - start;
70358 + __free_pages_memory(start, end);
70359
70360 return count;
70361 }
70362 diff --git a/mm/nommu.c b/mm/nommu.c
70363 index b982290..7d73f53 100644
70364 --- a/mm/nommu.c
70365 +++ b/mm/nommu.c
70366 @@ -62,7 +62,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
70367 int sysctl_overcommit_ratio = 50; /* default is 50% */
70368 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
70369 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
70370 -int heap_stack_gap = 0;
70371
70372 atomic_long_t mmap_pages_allocated;
70373
70374 @@ -825,15 +824,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
70375 EXPORT_SYMBOL(find_vma);
70376
70377 /*
70378 - * find a VMA
70379 - * - we don't extend stack VMAs under NOMMU conditions
70380 - */
70381 -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
70382 -{
70383 - return find_vma(mm, addr);
70384 -}
70385 -
70386 -/*
70387 * expand a stack to a given address
70388 * - not supported under NOMMU conditions
70389 */
70390 @@ -1553,6 +1543,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
70391
70392 /* most fields are the same, copy all, and then fixup */
70393 *new = *vma;
70394 + INIT_LIST_HEAD(&new->anon_vma_chain);
70395 *region = *vma->vm_region;
70396 new->vm_region = region;
70397
70398 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
70399 index 485be89..c059ad3 100644
70400 --- a/mm/page_alloc.c
70401 +++ b/mm/page_alloc.c
70402 @@ -341,7 +341,7 @@ out:
70403 * This usage means that zero-order pages may not be compound.
70404 */
70405
70406 -static void free_compound_page(struct page *page)
70407 +void free_compound_page(struct page *page)
70408 {
70409 __free_pages_ok(page, compound_order(page));
70410 }
70411 @@ -654,6 +654,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
70412 int i;
70413 int bad = 0;
70414
70415 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
70416 + unsigned long index = 1UL << order;
70417 +#endif
70418 +
70419 trace_mm_page_free_direct(page, order);
70420 kmemcheck_free_shadow(page, order);
70421
70422 @@ -669,6 +673,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
70423 debug_check_no_obj_freed(page_address(page),
70424 PAGE_SIZE << order);
70425 }
70426 +
70427 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
70428 + for (; index; --index)
70429 + sanitize_highpage(page + index - 1);
70430 +#endif
70431 +
70432 arch_free_page(page, order);
70433 kernel_map_pages(page, 1 << order, 0);
70434
70435 @@ -784,8 +794,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
70436 arch_alloc_page(page, order);
70437 kernel_map_pages(page, 1 << order, 1);
70438
70439 +#ifndef CONFIG_PAX_MEMORY_SANITIZE
70440 if (gfp_flags & __GFP_ZERO)
70441 prep_zero_page(page, order, gfp_flags);
70442 +#endif
70443
70444 if (order && (gfp_flags & __GFP_COMP))
70445 prep_compound_page(page, order);
70446 @@ -3357,7 +3369,13 @@ static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
70447 unsigned long pfn;
70448
70449 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
70450 +#ifdef CONFIG_X86_32
70451 + /* boot failures in VMware 8 on 32bit vanilla since
70452 + this change */
70453 + if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
70454 +#else
70455 if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
70456 +#endif
70457 return 1;
70458 }
70459 return 0;
70460 diff --git a/mm/percpu.c b/mm/percpu.c
70461 index 716eb4a..8d10419 100644
70462 --- a/mm/percpu.c
70463 +++ b/mm/percpu.c
70464 @@ -121,7 +121,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
70465 static unsigned int pcpu_high_unit_cpu __read_mostly;
70466
70467 /* the address of the first chunk which starts with the kernel static area */
70468 -void *pcpu_base_addr __read_mostly;
70469 +void *pcpu_base_addr __read_only;
70470 EXPORT_SYMBOL_GPL(pcpu_base_addr);
70471
70472 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
70473 diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
70474 index e920aa3..137702a 100644
70475 --- a/mm/process_vm_access.c
70476 +++ b/mm/process_vm_access.c
70477 @@ -13,6 +13,7 @@
70478 #include <linux/uio.h>
70479 #include <linux/sched.h>
70480 #include <linux/highmem.h>
70481 +#include <linux/security.h>
70482 #include <linux/ptrace.h>
70483 #include <linux/slab.h>
70484 #include <linux/syscalls.h>
70485 @@ -258,19 +259,19 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
70486 size_t iov_l_curr_offset = 0;
70487 ssize_t iov_len;
70488
70489 + return -ENOSYS; // PaX: until properly audited
70490 +
70491 /*
70492 * Work out how many pages of struct pages we're going to need
70493 * when eventually calling get_user_pages
70494 */
70495 for (i = 0; i < riovcnt; i++) {
70496 iov_len = rvec[i].iov_len;
70497 - if (iov_len > 0) {
70498 - nr_pages_iov = ((unsigned long)rvec[i].iov_base
70499 - + iov_len)
70500 - / PAGE_SIZE - (unsigned long)rvec[i].iov_base
70501 - / PAGE_SIZE + 1;
70502 - nr_pages = max(nr_pages, nr_pages_iov);
70503 - }
70504 + if (iov_len <= 0)
70505 + continue;
70506 + nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
70507 + (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
70508 + nr_pages = max(nr_pages, nr_pages_iov);
70509 }
70510
70511 if (nr_pages == 0)
70512 @@ -298,23 +299,23 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
70513 goto free_proc_pages;
70514 }
70515
70516 - task_lock(task);
70517 - if (__ptrace_may_access(task, PTRACE_MODE_ATTACH)) {
70518 - task_unlock(task);
70519 + if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
70520 rc = -EPERM;
70521 goto put_task_struct;
70522 }
70523 - mm = task->mm;
70524
70525 - if (!mm || (task->flags & PF_KTHREAD)) {
70526 - task_unlock(task);
70527 - rc = -EINVAL;
70528 + mm = mm_access(task, PTRACE_MODE_ATTACH);
70529 + if (!mm || IS_ERR(mm)) {
70530 + rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
70531 + /*
70532 + * Explicitly map EACCES to EPERM as EPERM is a more a
70533 + * appropriate error code for process_vw_readv/writev
70534 + */
70535 + if (rc == -EACCES)
70536 + rc = -EPERM;
70537 goto put_task_struct;
70538 }
70539
70540 - atomic_inc(&mm->mm_users);
70541 - task_unlock(task);
70542 -
70543 for (i = 0; i < riovcnt && iov_l_curr_idx < liovcnt; i++) {
70544 rc = process_vm_rw_single_vec(
70545 (unsigned long)rvec[i].iov_base, rvec[i].iov_len,
70546 diff --git a/mm/rmap.c b/mm/rmap.c
70547 index a4fd368..e0ffec7 100644
70548 --- a/mm/rmap.c
70549 +++ b/mm/rmap.c
70550 @@ -152,6 +152,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
70551 struct anon_vma *anon_vma = vma->anon_vma;
70552 struct anon_vma_chain *avc;
70553
70554 +#ifdef CONFIG_PAX_SEGMEXEC
70555 + struct anon_vma_chain *avc_m = NULL;
70556 +#endif
70557 +
70558 might_sleep();
70559 if (unlikely(!anon_vma)) {
70560 struct mm_struct *mm = vma->vm_mm;
70561 @@ -161,6 +165,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
70562 if (!avc)
70563 goto out_enomem;
70564
70565 +#ifdef CONFIG_PAX_SEGMEXEC
70566 + avc_m = anon_vma_chain_alloc(GFP_KERNEL);
70567 + if (!avc_m)
70568 + goto out_enomem_free_avc;
70569 +#endif
70570 +
70571 anon_vma = find_mergeable_anon_vma(vma);
70572 allocated = NULL;
70573 if (!anon_vma) {
70574 @@ -174,6 +184,21 @@ int anon_vma_prepare(struct vm_area_struct *vma)
70575 /* page_table_lock to protect against threads */
70576 spin_lock(&mm->page_table_lock);
70577 if (likely(!vma->anon_vma)) {
70578 +
70579 +#ifdef CONFIG_PAX_SEGMEXEC
70580 + struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
70581 +
70582 + if (vma_m) {
70583 + BUG_ON(vma_m->anon_vma);
70584 + vma_m->anon_vma = anon_vma;
70585 + avc_m->anon_vma = anon_vma;
70586 + avc_m->vma = vma;
70587 + list_add(&avc_m->same_vma, &vma_m->anon_vma_chain);
70588 + list_add(&avc_m->same_anon_vma, &anon_vma->head);
70589 + avc_m = NULL;
70590 + }
70591 +#endif
70592 +
70593 vma->anon_vma = anon_vma;
70594 avc->anon_vma = anon_vma;
70595 avc->vma = vma;
70596 @@ -187,12 +212,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
70597
70598 if (unlikely(allocated))
70599 put_anon_vma(allocated);
70600 +
70601 +#ifdef CONFIG_PAX_SEGMEXEC
70602 + if (unlikely(avc_m))
70603 + anon_vma_chain_free(avc_m);
70604 +#endif
70605 +
70606 if (unlikely(avc))
70607 anon_vma_chain_free(avc);
70608 }
70609 return 0;
70610
70611 out_enomem_free_avc:
70612 +
70613 +#ifdef CONFIG_PAX_SEGMEXEC
70614 + if (avc_m)
70615 + anon_vma_chain_free(avc_m);
70616 +#endif
70617 +
70618 anon_vma_chain_free(avc);
70619 out_enomem:
70620 return -ENOMEM;
70621 @@ -243,7 +280,7 @@ static void anon_vma_chain_link(struct vm_area_struct *vma,
70622 * Attach the anon_vmas from src to dst.
70623 * Returns 0 on success, -ENOMEM on failure.
70624 */
70625 -int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
70626 +int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
70627 {
70628 struct anon_vma_chain *avc, *pavc;
70629 struct anon_vma *root = NULL;
70630 @@ -276,7 +313,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
70631 * the corresponding VMA in the parent process is attached to.
70632 * Returns 0 on success, non-zero on failure.
70633 */
70634 -int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
70635 +int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
70636 {
70637 struct anon_vma_chain *avc;
70638 struct anon_vma *anon_vma;
70639 diff --git a/mm/shmem.c b/mm/shmem.c
70640 index 6c253f7..367e20a 100644
70641 --- a/mm/shmem.c
70642 +++ b/mm/shmem.c
70643 @@ -31,7 +31,7 @@
70644 #include <linux/export.h>
70645 #include <linux/swap.h>
70646
70647 -static struct vfsmount *shm_mnt;
70648 +struct vfsmount *shm_mnt;
70649
70650 #ifdef CONFIG_SHMEM
70651 /*
70652 @@ -74,7 +74,7 @@ static struct vfsmount *shm_mnt;
70653 #define BOGO_DIRENT_SIZE 20
70654
70655 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
70656 -#define SHORT_SYMLINK_LEN 128
70657 +#define SHORT_SYMLINK_LEN 64
70658
70659 struct shmem_xattr {
70660 struct list_head list; /* anchored by shmem_inode_info->xattr_list */
70661 @@ -2180,8 +2180,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
70662 int err = -ENOMEM;
70663
70664 /* Round up to L1_CACHE_BYTES to resist false sharing */
70665 - sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
70666 - L1_CACHE_BYTES), GFP_KERNEL);
70667 + sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
70668 if (!sbinfo)
70669 return -ENOMEM;
70670
70671 diff --git a/mm/slab.c b/mm/slab.c
70672 index 83311c9a..fcf8f86 100644
70673 --- a/mm/slab.c
70674 +++ b/mm/slab.c
70675 @@ -151,7 +151,7 @@
70676
70677 /* Legal flag mask for kmem_cache_create(). */
70678 #if DEBUG
70679 -# define CREATE_MASK (SLAB_RED_ZONE | \
70680 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
70681 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
70682 SLAB_CACHE_DMA | \
70683 SLAB_STORE_USER | \
70684 @@ -159,7 +159,7 @@
70685 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
70686 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
70687 #else
70688 -# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
70689 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
70690 SLAB_CACHE_DMA | \
70691 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
70692 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
70693 @@ -288,7 +288,7 @@ struct kmem_list3 {
70694 * Need this for bootstrapping a per node allocator.
70695 */
70696 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
70697 -static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
70698 +static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
70699 #define CACHE_CACHE 0
70700 #define SIZE_AC MAX_NUMNODES
70701 #define SIZE_L3 (2 * MAX_NUMNODES)
70702 @@ -389,10 +389,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
70703 if ((x)->max_freeable < i) \
70704 (x)->max_freeable = i; \
70705 } while (0)
70706 -#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
70707 -#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
70708 -#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
70709 -#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
70710 +#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
70711 +#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
70712 +#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
70713 +#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
70714 #else
70715 #define STATS_INC_ACTIVE(x) do { } while (0)
70716 #define STATS_DEC_ACTIVE(x) do { } while (0)
70717 @@ -538,7 +538,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
70718 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
70719 */
70720 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
70721 - const struct slab *slab, void *obj)
70722 + const struct slab *slab, const void *obj)
70723 {
70724 u32 offset = (obj - slab->s_mem);
70725 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
70726 @@ -564,7 +564,7 @@ struct cache_names {
70727 static struct cache_names __initdata cache_names[] = {
70728 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
70729 #include <linux/kmalloc_sizes.h>
70730 - {NULL,}
70731 + {NULL}
70732 #undef CACHE
70733 };
70734
70735 @@ -1572,7 +1572,7 @@ void __init kmem_cache_init(void)
70736 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
70737 sizes[INDEX_AC].cs_size,
70738 ARCH_KMALLOC_MINALIGN,
70739 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
70740 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
70741 NULL);
70742
70743 if (INDEX_AC != INDEX_L3) {
70744 @@ -1580,7 +1580,7 @@ void __init kmem_cache_init(void)
70745 kmem_cache_create(names[INDEX_L3].name,
70746 sizes[INDEX_L3].cs_size,
70747 ARCH_KMALLOC_MINALIGN,
70748 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
70749 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
70750 NULL);
70751 }
70752
70753 @@ -1598,7 +1598,7 @@ void __init kmem_cache_init(void)
70754 sizes->cs_cachep = kmem_cache_create(names->name,
70755 sizes->cs_size,
70756 ARCH_KMALLOC_MINALIGN,
70757 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
70758 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
70759 NULL);
70760 }
70761 #ifdef CONFIG_ZONE_DMA
70762 @@ -4322,10 +4322,10 @@ static int s_show(struct seq_file *m, void *p)
70763 }
70764 /* cpu stats */
70765 {
70766 - unsigned long allochit = atomic_read(&cachep->allochit);
70767 - unsigned long allocmiss = atomic_read(&cachep->allocmiss);
70768 - unsigned long freehit = atomic_read(&cachep->freehit);
70769 - unsigned long freemiss = atomic_read(&cachep->freemiss);
70770 + unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
70771 + unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
70772 + unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
70773 + unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
70774
70775 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
70776 allochit, allocmiss, freehit, freemiss);
70777 @@ -4584,13 +4584,62 @@ static int __init slab_proc_init(void)
70778 {
70779 proc_create("slabinfo",S_IWUSR|S_IRUSR,NULL,&proc_slabinfo_operations);
70780 #ifdef CONFIG_DEBUG_SLAB_LEAK
70781 - proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
70782 + proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
70783 #endif
70784 return 0;
70785 }
70786 module_init(slab_proc_init);
70787 #endif
70788
70789 +void check_object_size(const void *ptr, unsigned long n, bool to)
70790 +{
70791 +
70792 +#ifdef CONFIG_PAX_USERCOPY
70793 + struct page *page;
70794 + struct kmem_cache *cachep = NULL;
70795 + struct slab *slabp;
70796 + unsigned int objnr;
70797 + unsigned long offset;
70798 + const char *type;
70799 +
70800 + if (!n)
70801 + return;
70802 +
70803 + type = "<null>";
70804 + if (ZERO_OR_NULL_PTR(ptr))
70805 + goto report;
70806 +
70807 + if (!virt_addr_valid(ptr))
70808 + return;
70809 +
70810 + page = virt_to_head_page(ptr);
70811 +
70812 + type = "<process stack>";
70813 + if (!PageSlab(page)) {
70814 + if (object_is_on_stack(ptr, n) == -1)
70815 + goto report;
70816 + return;
70817 + }
70818 +
70819 + cachep = page_get_cache(page);
70820 + type = cachep->name;
70821 + if (!(cachep->flags & SLAB_USERCOPY))
70822 + goto report;
70823 +
70824 + slabp = page_get_slab(page);
70825 + objnr = obj_to_index(cachep, slabp, ptr);
70826 + BUG_ON(objnr >= cachep->num);
70827 + offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
70828 + if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
70829 + return;
70830 +
70831 +report:
70832 + pax_report_usercopy(ptr, n, to, type);
70833 +#endif
70834 +
70835 +}
70836 +EXPORT_SYMBOL(check_object_size);
70837 +
70838 /**
70839 * ksize - get the actual amount of memory allocated for a given object
70840 * @objp: Pointer to the object
70841 diff --git a/mm/slob.c b/mm/slob.c
70842 index 8105be4..e045f96 100644
70843 --- a/mm/slob.c
70844 +++ b/mm/slob.c
70845 @@ -29,7 +29,7 @@
70846 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
70847 * alloc_pages() directly, allocating compound pages so the page order
70848 * does not have to be separately tracked, and also stores the exact
70849 - * allocation size in page->private so that it can be used to accurately
70850 + * allocation size in slob_page->size so that it can be used to accurately
70851 * provide ksize(). These objects are detected in kfree() because slob_page()
70852 * is false for them.
70853 *
70854 @@ -58,6 +58,7 @@
70855 */
70856
70857 #include <linux/kernel.h>
70858 +#include <linux/sched.h>
70859 #include <linux/slab.h>
70860 #include <linux/mm.h>
70861 #include <linux/swap.h> /* struct reclaim_state */
70862 @@ -102,7 +103,8 @@ struct slob_page {
70863 unsigned long flags; /* mandatory */
70864 atomic_t _count; /* mandatory */
70865 slobidx_t units; /* free units left in page */
70866 - unsigned long pad[2];
70867 + unsigned long pad[1];
70868 + unsigned long size; /* size when >=PAGE_SIZE */
70869 slob_t *free; /* first free slob_t in page */
70870 struct list_head list; /* linked list of free pages */
70871 };
70872 @@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
70873 */
70874 static inline int is_slob_page(struct slob_page *sp)
70875 {
70876 - return PageSlab((struct page *)sp);
70877 + return PageSlab((struct page *)sp) && !sp->size;
70878 }
70879
70880 static inline void set_slob_page(struct slob_page *sp)
70881 @@ -150,7 +152,7 @@ static inline void clear_slob_page(struct slob_page *sp)
70882
70883 static inline struct slob_page *slob_page(const void *addr)
70884 {
70885 - return (struct slob_page *)virt_to_page(addr);
70886 + return (struct slob_page *)virt_to_head_page(addr);
70887 }
70888
70889 /*
70890 @@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
70891 /*
70892 * Return the size of a slob block.
70893 */
70894 -static slobidx_t slob_units(slob_t *s)
70895 +static slobidx_t slob_units(const slob_t *s)
70896 {
70897 if (s->units > 0)
70898 return s->units;
70899 @@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
70900 /*
70901 * Return the next free slob block pointer after this one.
70902 */
70903 -static slob_t *slob_next(slob_t *s)
70904 +static slob_t *slob_next(const slob_t *s)
70905 {
70906 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
70907 slobidx_t next;
70908 @@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
70909 /*
70910 * Returns true if s is the last free block in its page.
70911 */
70912 -static int slob_last(slob_t *s)
70913 +static int slob_last(const slob_t *s)
70914 {
70915 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
70916 }
70917 @@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
70918 if (!page)
70919 return NULL;
70920
70921 + set_slob_page(page);
70922 return page_address(page);
70923 }
70924
70925 @@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
70926 if (!b)
70927 return NULL;
70928 sp = slob_page(b);
70929 - set_slob_page(sp);
70930
70931 spin_lock_irqsave(&slob_lock, flags);
70932 sp->units = SLOB_UNITS(PAGE_SIZE);
70933 sp->free = b;
70934 + sp->size = 0;
70935 INIT_LIST_HEAD(&sp->list);
70936 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
70937 set_slob_page_free(sp, slob_list);
70938 @@ -476,10 +479,9 @@ out:
70939 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
70940 */
70941
70942 -void *__kmalloc_node(size_t size, gfp_t gfp, int node)
70943 +static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
70944 {
70945 - unsigned int *m;
70946 - int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70947 + slob_t *m;
70948 void *ret;
70949
70950 gfp &= gfp_allowed_mask;
70951 @@ -494,7 +496,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
70952
70953 if (!m)
70954 return NULL;
70955 - *m = size;
70956 + BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
70957 + BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
70958 + m[0].units = size;
70959 + m[1].units = align;
70960 ret = (void *)m + align;
70961
70962 trace_kmalloc_node(_RET_IP_, ret,
70963 @@ -506,16 +511,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
70964 gfp |= __GFP_COMP;
70965 ret = slob_new_pages(gfp, order, node);
70966 if (ret) {
70967 - struct page *page;
70968 - page = virt_to_page(ret);
70969 - page->private = size;
70970 + struct slob_page *sp;
70971 + sp = slob_page(ret);
70972 + sp->size = size;
70973 }
70974
70975 trace_kmalloc_node(_RET_IP_, ret,
70976 size, PAGE_SIZE << order, gfp, node);
70977 }
70978
70979 - kmemleak_alloc(ret, size, 1, gfp);
70980 + return ret;
70981 +}
70982 +
70983 +void *__kmalloc_node(size_t size, gfp_t gfp, int node)
70984 +{
70985 + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70986 + void *ret = __kmalloc_node_align(size, gfp, node, align);
70987 +
70988 + if (!ZERO_OR_NULL_PTR(ret))
70989 + kmemleak_alloc(ret, size, 1, gfp);
70990 return ret;
70991 }
70992 EXPORT_SYMBOL(__kmalloc_node);
70993 @@ -533,13 +547,92 @@ void kfree(const void *block)
70994 sp = slob_page(block);
70995 if (is_slob_page(sp)) {
70996 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70997 - unsigned int *m = (unsigned int *)(block - align);
70998 - slob_free(m, *m + align);
70999 - } else
71000 + slob_t *m = (slob_t *)(block - align);
71001 + slob_free(m, m[0].units + align);
71002 + } else {
71003 + clear_slob_page(sp);
71004 + free_slob_page(sp);
71005 + sp->size = 0;
71006 put_page(&sp->page);
71007 + }
71008 }
71009 EXPORT_SYMBOL(kfree);
71010
71011 +void check_object_size(const void *ptr, unsigned long n, bool to)
71012 +{
71013 +
71014 +#ifdef CONFIG_PAX_USERCOPY
71015 + struct slob_page *sp;
71016 + const slob_t *free;
71017 + const void *base;
71018 + unsigned long flags;
71019 + const char *type;
71020 +
71021 + if (!n)
71022 + return;
71023 +
71024 + type = "<null>";
71025 + if (ZERO_OR_NULL_PTR(ptr))
71026 + goto report;
71027 +
71028 + if (!virt_addr_valid(ptr))
71029 + return;
71030 +
71031 + type = "<process stack>";
71032 + sp = slob_page(ptr);
71033 + if (!PageSlab((struct page *)sp)) {
71034 + if (object_is_on_stack(ptr, n) == -1)
71035 + goto report;
71036 + return;
71037 + }
71038 +
71039 + type = "<slob>";
71040 + if (sp->size) {
71041 + base = page_address(&sp->page);
71042 + if (base <= ptr && n <= sp->size - (ptr - base))
71043 + return;
71044 + goto report;
71045 + }
71046 +
71047 + /* some tricky double walking to find the chunk */
71048 + spin_lock_irqsave(&slob_lock, flags);
71049 + base = (void *)((unsigned long)ptr & PAGE_MASK);
71050 + free = sp->free;
71051 +
71052 + while (!slob_last(free) && (void *)free <= ptr) {
71053 + base = free + slob_units(free);
71054 + free = slob_next(free);
71055 + }
71056 +
71057 + while (base < (void *)free) {
71058 + slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
71059 + int size = SLOB_UNIT * SLOB_UNITS(m + align);
71060 + int offset;
71061 +
71062 + if (ptr < base + align)
71063 + break;
71064 +
71065 + offset = ptr - base - align;
71066 + if (offset >= m) {
71067 + base += size;
71068 + continue;
71069 + }
71070 +
71071 + if (n > m - offset)
71072 + break;
71073 +
71074 + spin_unlock_irqrestore(&slob_lock, flags);
71075 + return;
71076 + }
71077 +
71078 + spin_unlock_irqrestore(&slob_lock, flags);
71079 +report:
71080 + pax_report_usercopy(ptr, n, to, type);
71081 +#endif
71082 +
71083 +}
71084 +EXPORT_SYMBOL(check_object_size);
71085 +
71086 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
71087 size_t ksize(const void *block)
71088 {
71089 @@ -552,10 +645,10 @@ size_t ksize(const void *block)
71090 sp = slob_page(block);
71091 if (is_slob_page(sp)) {
71092 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
71093 - unsigned int *m = (unsigned int *)(block - align);
71094 - return SLOB_UNITS(*m) * SLOB_UNIT;
71095 + slob_t *m = (slob_t *)(block - align);
71096 + return SLOB_UNITS(m[0].units) * SLOB_UNIT;
71097 } else
71098 - return sp->page.private;
71099 + return sp->size;
71100 }
71101 EXPORT_SYMBOL(ksize);
71102
71103 @@ -571,8 +664,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
71104 {
71105 struct kmem_cache *c;
71106
71107 +#ifdef CONFIG_PAX_USERCOPY
71108 + c = __kmalloc_node_align(sizeof(struct kmem_cache),
71109 + GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
71110 +#else
71111 c = slob_alloc(sizeof(struct kmem_cache),
71112 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
71113 +#endif
71114
71115 if (c) {
71116 c->name = name;
71117 @@ -614,17 +712,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
71118
71119 lockdep_trace_alloc(flags);
71120
71121 +#ifdef CONFIG_PAX_USERCOPY
71122 + b = __kmalloc_node_align(c->size, flags, node, c->align);
71123 +#else
71124 if (c->size < PAGE_SIZE) {
71125 b = slob_alloc(c->size, flags, c->align, node);
71126 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
71127 SLOB_UNITS(c->size) * SLOB_UNIT,
71128 flags, node);
71129 } else {
71130 + struct slob_page *sp;
71131 +
71132 b = slob_new_pages(flags, get_order(c->size), node);
71133 + sp = slob_page(b);
71134 + sp->size = c->size;
71135 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
71136 PAGE_SIZE << get_order(c->size),
71137 flags, node);
71138 }
71139 +#endif
71140
71141 if (c->ctor)
71142 c->ctor(b);
71143 @@ -636,10 +742,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
71144
71145 static void __kmem_cache_free(void *b, int size)
71146 {
71147 - if (size < PAGE_SIZE)
71148 + struct slob_page *sp = slob_page(b);
71149 +
71150 + if (is_slob_page(sp))
71151 slob_free(b, size);
71152 - else
71153 + else {
71154 + clear_slob_page(sp);
71155 + free_slob_page(sp);
71156 + sp->size = 0;
71157 slob_free_pages(b, get_order(size));
71158 + }
71159 }
71160
71161 static void kmem_rcu_free(struct rcu_head *head)
71162 @@ -652,17 +764,31 @@ static void kmem_rcu_free(struct rcu_head *head)
71163
71164 void kmem_cache_free(struct kmem_cache *c, void *b)
71165 {
71166 + int size = c->size;
71167 +
71168 +#ifdef CONFIG_PAX_USERCOPY
71169 + if (size + c->align < PAGE_SIZE) {
71170 + size += c->align;
71171 + b -= c->align;
71172 + }
71173 +#endif
71174 +
71175 kmemleak_free_recursive(b, c->flags);
71176 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
71177 struct slob_rcu *slob_rcu;
71178 - slob_rcu = b + (c->size - sizeof(struct slob_rcu));
71179 - slob_rcu->size = c->size;
71180 + slob_rcu = b + (size - sizeof(struct slob_rcu));
71181 + slob_rcu->size = size;
71182 call_rcu(&slob_rcu->head, kmem_rcu_free);
71183 } else {
71184 - __kmem_cache_free(b, c->size);
71185 + __kmem_cache_free(b, size);
71186 }
71187
71188 +#ifdef CONFIG_PAX_USERCOPY
71189 + trace_kfree(_RET_IP_, b);
71190 +#else
71191 trace_kmem_cache_free(_RET_IP_, b);
71192 +#endif
71193 +
71194 }
71195 EXPORT_SYMBOL(kmem_cache_free);
71196
71197 diff --git a/mm/slub.c b/mm/slub.c
71198 index 1a919f0..1739c9b 100644
71199 --- a/mm/slub.c
71200 +++ b/mm/slub.c
71201 @@ -208,7 +208,7 @@ struct track {
71202
71203 enum track_item { TRACK_ALLOC, TRACK_FREE };
71204
71205 -#ifdef CONFIG_SYSFS
71206 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71207 static int sysfs_slab_add(struct kmem_cache *);
71208 static int sysfs_slab_alias(struct kmem_cache *, const char *);
71209 static void sysfs_slab_remove(struct kmem_cache *);
71210 @@ -530,7 +530,7 @@ static void print_track(const char *s, struct track *t)
71211 if (!t->addr)
71212 return;
71213
71214 - printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
71215 + printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
71216 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
71217 #ifdef CONFIG_STACKTRACE
71218 {
71219 @@ -2559,6 +2559,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
71220
71221 page = virt_to_head_page(x);
71222
71223 + BUG_ON(!PageSlab(page));
71224 +
71225 slab_free(s, page, x, _RET_IP_);
71226
71227 trace_kmem_cache_free(_RET_IP_, x);
71228 @@ -2592,7 +2594,7 @@ static int slub_min_objects;
71229 * Merge control. If this is set then no merging of slab caches will occur.
71230 * (Could be removed. This was introduced to pacify the merge skeptics.)
71231 */
71232 -static int slub_nomerge;
71233 +static int slub_nomerge = 1;
71234
71235 /*
71236 * Calculate the order of allocation given an slab object size.
71237 @@ -3042,7 +3044,7 @@ static int kmem_cache_open(struct kmem_cache *s,
71238 else
71239 s->cpu_partial = 30;
71240
71241 - s->refcount = 1;
71242 + atomic_set(&s->refcount, 1);
71243 #ifdef CONFIG_NUMA
71244 s->remote_node_defrag_ratio = 1000;
71245 #endif
71246 @@ -3146,8 +3148,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
71247 void kmem_cache_destroy(struct kmem_cache *s)
71248 {
71249 down_write(&slub_lock);
71250 - s->refcount--;
71251 - if (!s->refcount) {
71252 + if (atomic_dec_and_test(&s->refcount)) {
71253 list_del(&s->list);
71254 up_write(&slub_lock);
71255 if (kmem_cache_close(s)) {
71256 @@ -3358,6 +3359,50 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
71257 EXPORT_SYMBOL(__kmalloc_node);
71258 #endif
71259
71260 +void check_object_size(const void *ptr, unsigned long n, bool to)
71261 +{
71262 +
71263 +#ifdef CONFIG_PAX_USERCOPY
71264 + struct page *page;
71265 + struct kmem_cache *s = NULL;
71266 + unsigned long offset;
71267 + const char *type;
71268 +
71269 + if (!n)
71270 + return;
71271 +
71272 + type = "<null>";
71273 + if (ZERO_OR_NULL_PTR(ptr))
71274 + goto report;
71275 +
71276 + if (!virt_addr_valid(ptr))
71277 + return;
71278 +
71279 + page = virt_to_head_page(ptr);
71280 +
71281 + type = "<process stack>";
71282 + if (!PageSlab(page)) {
71283 + if (object_is_on_stack(ptr, n) == -1)
71284 + goto report;
71285 + return;
71286 + }
71287 +
71288 + s = page->slab;
71289 + type = s->name;
71290 + if (!(s->flags & SLAB_USERCOPY))
71291 + goto report;
71292 +
71293 + offset = (ptr - page_address(page)) % s->size;
71294 + if (offset <= s->objsize && n <= s->objsize - offset)
71295 + return;
71296 +
71297 +report:
71298 + pax_report_usercopy(ptr, n, to, type);
71299 +#endif
71300 +
71301 +}
71302 +EXPORT_SYMBOL(check_object_size);
71303 +
71304 size_t ksize(const void *object)
71305 {
71306 struct page *page;
71307 @@ -3632,7 +3677,7 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
71308 int node;
71309
71310 list_add(&s->list, &slab_caches);
71311 - s->refcount = -1;
71312 + atomic_set(&s->refcount, -1);
71313
71314 for_each_node_state(node, N_NORMAL_MEMORY) {
71315 struct kmem_cache_node *n = get_node(s, node);
71316 @@ -3749,17 +3794,17 @@ void __init kmem_cache_init(void)
71317
71318 /* Caches that are not of the two-to-the-power-of size */
71319 if (KMALLOC_MIN_SIZE <= 32) {
71320 - kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
71321 + kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
71322 caches++;
71323 }
71324
71325 if (KMALLOC_MIN_SIZE <= 64) {
71326 - kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
71327 + kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
71328 caches++;
71329 }
71330
71331 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
71332 - kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
71333 + kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
71334 caches++;
71335 }
71336
71337 @@ -3827,7 +3872,7 @@ static int slab_unmergeable(struct kmem_cache *s)
71338 /*
71339 * We may have set a slab to be unmergeable during bootstrap.
71340 */
71341 - if (s->refcount < 0)
71342 + if (atomic_read(&s->refcount) < 0)
71343 return 1;
71344
71345 return 0;
71346 @@ -3886,7 +3931,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
71347 down_write(&slub_lock);
71348 s = find_mergeable(size, align, flags, name, ctor);
71349 if (s) {
71350 - s->refcount++;
71351 + atomic_inc(&s->refcount);
71352 /*
71353 * Adjust the object sizes so that we clear
71354 * the complete object on kzalloc.
71355 @@ -3895,7 +3940,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
71356 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
71357
71358 if (sysfs_slab_alias(s, name)) {
71359 - s->refcount--;
71360 + atomic_dec(&s->refcount);
71361 goto err;
71362 }
71363 up_write(&slub_lock);
71364 @@ -4023,7 +4068,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
71365 }
71366 #endif
71367
71368 -#ifdef CONFIG_SYSFS
71369 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71370 static int count_inuse(struct page *page)
71371 {
71372 return page->inuse;
71373 @@ -4410,12 +4455,12 @@ static void resiliency_test(void)
71374 validate_slab_cache(kmalloc_caches[9]);
71375 }
71376 #else
71377 -#ifdef CONFIG_SYSFS
71378 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71379 static void resiliency_test(void) {};
71380 #endif
71381 #endif
71382
71383 -#ifdef CONFIG_SYSFS
71384 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71385 enum slab_stat_type {
71386 SL_ALL, /* All slabs */
71387 SL_PARTIAL, /* Only partially allocated slabs */
71388 @@ -4656,7 +4701,7 @@ SLAB_ATTR_RO(ctor);
71389
71390 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
71391 {
71392 - return sprintf(buf, "%d\n", s->refcount - 1);
71393 + return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
71394 }
71395 SLAB_ATTR_RO(aliases);
71396
71397 @@ -5223,6 +5268,7 @@ static char *create_unique_id(struct kmem_cache *s)
71398 return name;
71399 }
71400
71401 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71402 static int sysfs_slab_add(struct kmem_cache *s)
71403 {
71404 int err;
71405 @@ -5285,6 +5331,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
71406 kobject_del(&s->kobj);
71407 kobject_put(&s->kobj);
71408 }
71409 +#endif
71410
71411 /*
71412 * Need to buffer aliases during bootup until sysfs becomes
71413 @@ -5298,6 +5345,7 @@ struct saved_alias {
71414
71415 static struct saved_alias *alias_list;
71416
71417 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71418 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
71419 {
71420 struct saved_alias *al;
71421 @@ -5320,6 +5368,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
71422 alias_list = al;
71423 return 0;
71424 }
71425 +#endif
71426
71427 static int __init slab_sysfs_init(void)
71428 {
71429 diff --git a/mm/swap.c b/mm/swap.c
71430 index 55b266d..a532537 100644
71431 --- a/mm/swap.c
71432 +++ b/mm/swap.c
71433 @@ -31,6 +31,7 @@
71434 #include <linux/backing-dev.h>
71435 #include <linux/memcontrol.h>
71436 #include <linux/gfp.h>
71437 +#include <linux/hugetlb.h>
71438
71439 #include "internal.h"
71440
71441 @@ -71,6 +72,8 @@ static void __put_compound_page(struct page *page)
71442
71443 __page_cache_release(page);
71444 dtor = get_compound_page_dtor(page);
71445 + if (!PageHuge(page))
71446 + BUG_ON(dtor != free_compound_page);
71447 (*dtor)(page);
71448 }
71449
71450 diff --git a/mm/swapfile.c b/mm/swapfile.c
71451 index b1cd120..aaae885 100644
71452 --- a/mm/swapfile.c
71453 +++ b/mm/swapfile.c
71454 @@ -61,7 +61,7 @@ static DEFINE_MUTEX(swapon_mutex);
71455
71456 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
71457 /* Activity counter to indicate that a swapon or swapoff has occurred */
71458 -static atomic_t proc_poll_event = ATOMIC_INIT(0);
71459 +static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
71460
71461 static inline unsigned char swap_count(unsigned char ent)
71462 {
71463 @@ -1670,7 +1670,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
71464 }
71465 filp_close(swap_file, NULL);
71466 err = 0;
71467 - atomic_inc(&proc_poll_event);
71468 + atomic_inc_unchecked(&proc_poll_event);
71469 wake_up_interruptible(&proc_poll_wait);
71470
71471 out_dput:
71472 @@ -1686,8 +1686,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
71473
71474 poll_wait(file, &proc_poll_wait, wait);
71475
71476 - if (seq->poll_event != atomic_read(&proc_poll_event)) {
71477 - seq->poll_event = atomic_read(&proc_poll_event);
71478 + if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
71479 + seq->poll_event = atomic_read_unchecked(&proc_poll_event);
71480 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
71481 }
71482
71483 @@ -1785,7 +1785,7 @@ static int swaps_open(struct inode *inode, struct file *file)
71484 return ret;
71485
71486 seq = file->private_data;
71487 - seq->poll_event = atomic_read(&proc_poll_event);
71488 + seq->poll_event = atomic_read_unchecked(&proc_poll_event);
71489 return 0;
71490 }
71491
71492 @@ -2123,7 +2123,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
71493 (p->flags & SWP_DISCARDABLE) ? "D" : "");
71494
71495 mutex_unlock(&swapon_mutex);
71496 - atomic_inc(&proc_poll_event);
71497 + atomic_inc_unchecked(&proc_poll_event);
71498 wake_up_interruptible(&proc_poll_wait);
71499
71500 if (S_ISREG(inode->i_mode))
71501 diff --git a/mm/util.c b/mm/util.c
71502 index 136ac4f..5117eef 100644
71503 --- a/mm/util.c
71504 +++ b/mm/util.c
71505 @@ -114,6 +114,7 @@ EXPORT_SYMBOL(memdup_user);
71506 * allocated buffer. Use this if you don't want to free the buffer immediately
71507 * like, for example, with RCU.
71508 */
71509 +#undef __krealloc
71510 void *__krealloc(const void *p, size_t new_size, gfp_t flags)
71511 {
71512 void *ret;
71513 @@ -147,6 +148,7 @@ EXPORT_SYMBOL(__krealloc);
71514 * behaves exactly like kmalloc(). If @size is 0 and @p is not a
71515 * %NULL pointer, the object pointed to is freed.
71516 */
71517 +#undef krealloc
71518 void *krealloc(const void *p, size_t new_size, gfp_t flags)
71519 {
71520 void *ret;
71521 @@ -243,6 +245,12 @@ void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
71522 void arch_pick_mmap_layout(struct mm_struct *mm)
71523 {
71524 mm->mmap_base = TASK_UNMAPPED_BASE;
71525 +
71526 +#ifdef CONFIG_PAX_RANDMMAP
71527 + if (mm->pax_flags & MF_PAX_RANDMMAP)
71528 + mm->mmap_base += mm->delta_mmap;
71529 +#endif
71530 +
71531 mm->get_unmapped_area = arch_get_unmapped_area;
71532 mm->unmap_area = arch_unmap_area;
71533 }
71534 diff --git a/mm/vmalloc.c b/mm/vmalloc.c
71535 index 27be2f0..0aef2c2 100644
71536 --- a/mm/vmalloc.c
71537 +++ b/mm/vmalloc.c
71538 @@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
71539
71540 pte = pte_offset_kernel(pmd, addr);
71541 do {
71542 - pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
71543 - WARN_ON(!pte_none(ptent) && !pte_present(ptent));
71544 +
71545 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71546 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
71547 + BUG_ON(!pte_exec(*pte));
71548 + set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
71549 + continue;
71550 + }
71551 +#endif
71552 +
71553 + {
71554 + pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
71555 + WARN_ON(!pte_none(ptent) && !pte_present(ptent));
71556 + }
71557 } while (pte++, addr += PAGE_SIZE, addr != end);
71558 }
71559
71560 @@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
71561 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
71562 {
71563 pte_t *pte;
71564 + int ret = -ENOMEM;
71565
71566 /*
71567 * nr is a running index into the array which helps higher level
71568 @@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
71569 pte = pte_alloc_kernel(pmd, addr);
71570 if (!pte)
71571 return -ENOMEM;
71572 +
71573 + pax_open_kernel();
71574 do {
71575 struct page *page = pages[*nr];
71576
71577 - if (WARN_ON(!pte_none(*pte)))
71578 - return -EBUSY;
71579 - if (WARN_ON(!page))
71580 - return -ENOMEM;
71581 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71582 + if (pgprot_val(prot) & _PAGE_NX)
71583 +#endif
71584 +
71585 + if (WARN_ON(!pte_none(*pte))) {
71586 + ret = -EBUSY;
71587 + goto out;
71588 + }
71589 + if (WARN_ON(!page)) {
71590 + ret = -ENOMEM;
71591 + goto out;
71592 + }
71593 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
71594 (*nr)++;
71595 } while (pte++, addr += PAGE_SIZE, addr != end);
71596 - return 0;
71597 + ret = 0;
71598 +out:
71599 + pax_close_kernel();
71600 + return ret;
71601 }
71602
71603 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
71604 @@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void *x)
71605 * and fall back on vmalloc() if that fails. Others
71606 * just put it in the vmalloc space.
71607 */
71608 -#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
71609 +#ifdef CONFIG_MODULES
71610 +#ifdef MODULES_VADDR
71611 unsigned long addr = (unsigned long)x;
71612 if (addr >= MODULES_VADDR && addr < MODULES_END)
71613 return 1;
71614 #endif
71615 +
71616 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71617 + if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
71618 + return 1;
71619 +#endif
71620 +
71621 +#endif
71622 +
71623 return is_vmalloc_addr(x);
71624 }
71625
71626 @@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
71627
71628 if (!pgd_none(*pgd)) {
71629 pud_t *pud = pud_offset(pgd, addr);
71630 +#ifdef CONFIG_X86
71631 + if (!pud_large(*pud))
71632 +#endif
71633 if (!pud_none(*pud)) {
71634 pmd_t *pmd = pmd_offset(pud, addr);
71635 +#ifdef CONFIG_X86
71636 + if (!pmd_large(*pmd))
71637 +#endif
71638 if (!pmd_none(*pmd)) {
71639 pte_t *ptep, pte;
71640
71641 @@ -1294,6 +1334,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
71642 struct vm_struct *area;
71643
71644 BUG_ON(in_interrupt());
71645 +
71646 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
71647 + if (flags & VM_KERNEXEC) {
71648 + if (start != VMALLOC_START || end != VMALLOC_END)
71649 + return NULL;
71650 + start = (unsigned long)MODULES_EXEC_VADDR;
71651 + end = (unsigned long)MODULES_EXEC_END;
71652 + }
71653 +#endif
71654 +
71655 if (flags & VM_IOREMAP) {
71656 int bit = fls(size);
71657
71658 @@ -1526,6 +1576,11 @@ void *vmap(struct page **pages, unsigned int count,
71659 if (count > totalram_pages)
71660 return NULL;
71661
71662 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
71663 + if (!(pgprot_val(prot) & _PAGE_NX))
71664 + flags |= VM_KERNEXEC;
71665 +#endif
71666 +
71667 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
71668 __builtin_return_address(0));
71669 if (!area)
71670 @@ -1627,6 +1682,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
71671 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
71672 goto fail;
71673
71674 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
71675 + if (!(pgprot_val(prot) & _PAGE_NX))
71676 + area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
71677 + VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
71678 + else
71679 +#endif
71680 +
71681 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
71682 start, end, node, gfp_mask, caller);
71683 if (!area)
71684 @@ -1679,6 +1741,7 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
71685 gfp_mask, prot, node, caller);
71686 }
71687
71688 +#undef __vmalloc
71689 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
71690 {
71691 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
71692 @@ -1702,6 +1765,7 @@ static inline void *__vmalloc_node_flags(unsigned long size,
71693 * For tight control over page level allocator and protection flags
71694 * use __vmalloc() instead.
71695 */
71696 +#undef vmalloc
71697 void *vmalloc(unsigned long size)
71698 {
71699 return __vmalloc_node_flags(size, -1, GFP_KERNEL | __GFP_HIGHMEM);
71700 @@ -1718,6 +1782,7 @@ EXPORT_SYMBOL(vmalloc);
71701 * For tight control over page level allocator and protection flags
71702 * use __vmalloc() instead.
71703 */
71704 +#undef vzalloc
71705 void *vzalloc(unsigned long size)
71706 {
71707 return __vmalloc_node_flags(size, -1,
71708 @@ -1732,6 +1797,7 @@ EXPORT_SYMBOL(vzalloc);
71709 * The resulting memory area is zeroed so it can be mapped to userspace
71710 * without leaking data.
71711 */
71712 +#undef vmalloc_user
71713 void *vmalloc_user(unsigned long size)
71714 {
71715 struct vm_struct *area;
71716 @@ -1759,6 +1825,7 @@ EXPORT_SYMBOL(vmalloc_user);
71717 * For tight control over page level allocator and protection flags
71718 * use __vmalloc() instead.
71719 */
71720 +#undef vmalloc_node
71721 void *vmalloc_node(unsigned long size, int node)
71722 {
71723 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
71724 @@ -1778,6 +1845,7 @@ EXPORT_SYMBOL(vmalloc_node);
71725 * For tight control over page level allocator and protection flags
71726 * use __vmalloc_node() instead.
71727 */
71728 +#undef vzalloc_node
71729 void *vzalloc_node(unsigned long size, int node)
71730 {
71731 return __vmalloc_node_flags(size, node,
71732 @@ -1800,10 +1868,10 @@ EXPORT_SYMBOL(vzalloc_node);
71733 * For tight control over page level allocator and protection flags
71734 * use __vmalloc() instead.
71735 */
71736 -
71737 +#undef vmalloc_exec
71738 void *vmalloc_exec(unsigned long size)
71739 {
71740 - return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
71741 + return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
71742 -1, __builtin_return_address(0));
71743 }
71744
71745 @@ -1822,6 +1890,7 @@ void *vmalloc_exec(unsigned long size)
71746 * Allocate enough 32bit PA addressable pages to cover @size from the
71747 * page level allocator and map them into contiguous kernel virtual space.
71748 */
71749 +#undef vmalloc_32
71750 void *vmalloc_32(unsigned long size)
71751 {
71752 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
71753 @@ -1836,6 +1905,7 @@ EXPORT_SYMBOL(vmalloc_32);
71754 * The resulting memory area is 32bit addressable and zeroed so it can be
71755 * mapped to userspace without leaking data.
71756 */
71757 +#undef vmalloc_32_user
71758 void *vmalloc_32_user(unsigned long size)
71759 {
71760 struct vm_struct *area;
71761 @@ -2098,6 +2168,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
71762 unsigned long uaddr = vma->vm_start;
71763 unsigned long usize = vma->vm_end - vma->vm_start;
71764
71765 + BUG_ON(vma->vm_mirror);
71766 +
71767 if ((PAGE_SIZE-1) & (unsigned long)addr)
71768 return -EINVAL;
71769
71770 diff --git a/mm/vmstat.c b/mm/vmstat.c
71771 index 8fd603b..cf0d930 100644
71772 --- a/mm/vmstat.c
71773 +++ b/mm/vmstat.c
71774 @@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
71775 *
71776 * vm_stat contains the global counters
71777 */
71778 -atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
71779 +atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
71780 EXPORT_SYMBOL(vm_stat);
71781
71782 #ifdef CONFIG_SMP
71783 @@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
71784 v = p->vm_stat_diff[i];
71785 p->vm_stat_diff[i] = 0;
71786 local_irq_restore(flags);
71787 - atomic_long_add(v, &zone->vm_stat[i]);
71788 + atomic_long_add_unchecked(v, &zone->vm_stat[i]);
71789 global_diff[i] += v;
71790 #ifdef CONFIG_NUMA
71791 /* 3 seconds idle till flush */
71792 @@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
71793
71794 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
71795 if (global_diff[i])
71796 - atomic_long_add(global_diff[i], &vm_stat[i]);
71797 + atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
71798 }
71799
71800 #endif
71801 @@ -1208,10 +1208,20 @@ static int __init setup_vmstat(void)
71802 start_cpu_timer(cpu);
71803 #endif
71804 #ifdef CONFIG_PROC_FS
71805 - proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
71806 - proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
71807 - proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
71808 - proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
71809 + {
71810 + mode_t gr_mode = S_IRUGO;
71811 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
71812 + gr_mode = S_IRUSR;
71813 +#endif
71814 + proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
71815 + proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
71816 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
71817 + proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
71818 +#else
71819 + proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
71820 +#endif
71821 + proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
71822 + }
71823 #endif
71824 return 0;
71825 }
71826 diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
71827 index 5471628..cef8398 100644
71828 --- a/net/8021q/vlan.c
71829 +++ b/net/8021q/vlan.c
71830 @@ -588,8 +588,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
71831 err = -EPERM;
71832 if (!capable(CAP_NET_ADMIN))
71833 break;
71834 - if ((args.u.name_type >= 0) &&
71835 - (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
71836 + if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
71837 struct vlan_net *vn;
71838
71839 vn = net_generic(net, vlan_net_id);
71840 diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
71841 index fdfdb57..38d368c 100644
71842 --- a/net/9p/trans_fd.c
71843 +++ b/net/9p/trans_fd.c
71844 @@ -423,7 +423,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
71845 oldfs = get_fs();
71846 set_fs(get_ds());
71847 /* The cast to a user pointer is valid due to the set_fs() */
71848 - ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
71849 + ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
71850 set_fs(oldfs);
71851
71852 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
71853 diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
71854 index f41f026..fe76ea8 100644
71855 --- a/net/atm/atm_misc.c
71856 +++ b/net/atm/atm_misc.c
71857 @@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
71858 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
71859 return 1;
71860 atm_return(vcc, truesize);
71861 - atomic_inc(&vcc->stats->rx_drop);
71862 + atomic_inc_unchecked(&vcc->stats->rx_drop);
71863 return 0;
71864 }
71865 EXPORT_SYMBOL(atm_charge);
71866 @@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
71867 }
71868 }
71869 atm_return(vcc, guess);
71870 - atomic_inc(&vcc->stats->rx_drop);
71871 + atomic_inc_unchecked(&vcc->stats->rx_drop);
71872 return NULL;
71873 }
71874 EXPORT_SYMBOL(atm_alloc_charge);
71875 @@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
71876
71877 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
71878 {
71879 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
71880 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
71881 __SONET_ITEMS
71882 #undef __HANDLE_ITEM
71883 }
71884 @@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
71885
71886 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
71887 {
71888 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
71889 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
71890 __SONET_ITEMS
71891 #undef __HANDLE_ITEM
71892 }
71893 diff --git a/net/atm/lec.h b/net/atm/lec.h
71894 index dfc0719..47c5322 100644
71895 --- a/net/atm/lec.h
71896 +++ b/net/atm/lec.h
71897 @@ -48,7 +48,7 @@ struct lane2_ops {
71898 const u8 *tlvs, u32 sizeoftlvs);
71899 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
71900 const u8 *tlvs, u32 sizeoftlvs);
71901 -};
71902 +} __no_const;
71903
71904 /*
71905 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
71906 diff --git a/net/atm/mpc.h b/net/atm/mpc.h
71907 index 0919a88..a23d54e 100644
71908 --- a/net/atm/mpc.h
71909 +++ b/net/atm/mpc.h
71910 @@ -33,7 +33,7 @@ struct mpoa_client {
71911 struct mpc_parameters parameters; /* parameters for this client */
71912
71913 const struct net_device_ops *old_ops;
71914 - struct net_device_ops new_ops;
71915 + net_device_ops_no_const new_ops;
71916 };
71917
71918
71919 diff --git a/net/atm/proc.c b/net/atm/proc.c
71920 index 0d020de..011c7bb 100644
71921 --- a/net/atm/proc.c
71922 +++ b/net/atm/proc.c
71923 @@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
71924 const struct k_atm_aal_stats *stats)
71925 {
71926 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
71927 - atomic_read(&stats->tx), atomic_read(&stats->tx_err),
71928 - atomic_read(&stats->rx), atomic_read(&stats->rx_err),
71929 - atomic_read(&stats->rx_drop));
71930 + atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
71931 + atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
71932 + atomic_read_unchecked(&stats->rx_drop));
71933 }
71934
71935 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
71936 diff --git a/net/atm/resources.c b/net/atm/resources.c
71937 index 23f45ce..c748f1a 100644
71938 --- a/net/atm/resources.c
71939 +++ b/net/atm/resources.c
71940 @@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
71941 static void copy_aal_stats(struct k_atm_aal_stats *from,
71942 struct atm_aal_stats *to)
71943 {
71944 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
71945 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
71946 __AAL_STAT_ITEMS
71947 #undef __HANDLE_ITEM
71948 }
71949 @@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
71950 static void subtract_aal_stats(struct k_atm_aal_stats *from,
71951 struct atm_aal_stats *to)
71952 {
71953 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
71954 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
71955 __AAL_STAT_ITEMS
71956 #undef __HANDLE_ITEM
71957 }
71958 diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
71959 index 3512e25..2b33401 100644
71960 --- a/net/batman-adv/bat_iv_ogm.c
71961 +++ b/net/batman-adv/bat_iv_ogm.c
71962 @@ -541,7 +541,7 @@ void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes)
71963
71964 /* change sequence number to network order */
71965 batman_ogm_packet->seqno =
71966 - htonl((uint32_t)atomic_read(&hard_iface->seqno));
71967 + htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
71968
71969 batman_ogm_packet->ttvn = atomic_read(&bat_priv->ttvn);
71970 batman_ogm_packet->tt_crc = htons((uint16_t)
71971 @@ -561,7 +561,7 @@ void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes)
71972 else
71973 batman_ogm_packet->gw_flags = NO_FLAGS;
71974
71975 - atomic_inc(&hard_iface->seqno);
71976 + atomic_inc_unchecked(&hard_iface->seqno);
71977
71978 slide_own_bcast_window(hard_iface);
71979 bat_ogm_queue_add(bat_priv, hard_iface->packet_buff,
71980 @@ -922,7 +922,7 @@ static void bat_ogm_process(const struct ethhdr *ethhdr,
71981 return;
71982
71983 /* could be changed by schedule_own_packet() */
71984 - if_incoming_seqno = atomic_read(&if_incoming->seqno);
71985 + if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
71986
71987 has_directlink_flag = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0);
71988
71989 diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
71990 index 7704df4..beb4e16 100644
71991 --- a/net/batman-adv/hard-interface.c
71992 +++ b/net/batman-adv/hard-interface.c
71993 @@ -326,8 +326,8 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
71994 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
71995 dev_add_pack(&hard_iface->batman_adv_ptype);
71996
71997 - atomic_set(&hard_iface->seqno, 1);
71998 - atomic_set(&hard_iface->frag_seqno, 1);
71999 + atomic_set_unchecked(&hard_iface->seqno, 1);
72000 + atomic_set_unchecked(&hard_iface->frag_seqno, 1);
72001 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
72002 hard_iface->net_dev->name);
72003
72004 diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
72005 index f9cc957..efd9dae 100644
72006 --- a/net/batman-adv/soft-interface.c
72007 +++ b/net/batman-adv/soft-interface.c
72008 @@ -634,7 +634,7 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
72009
72010 /* set broadcast sequence number */
72011 bcast_packet->seqno =
72012 - htonl(atomic_inc_return(&bat_priv->bcast_seqno));
72013 + htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
72014
72015 add_bcast_packet_to_list(bat_priv, skb, 1);
72016
72017 @@ -828,7 +828,7 @@ struct net_device *softif_create(const char *name)
72018 atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
72019
72020 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
72021 - atomic_set(&bat_priv->bcast_seqno, 1);
72022 + atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
72023 atomic_set(&bat_priv->ttvn, 0);
72024 atomic_set(&bat_priv->tt_local_changes, 0);
72025 atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
72026 diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
72027 index ab8d0fe..ceba3fd 100644
72028 --- a/net/batman-adv/types.h
72029 +++ b/net/batman-adv/types.h
72030 @@ -38,8 +38,8 @@ struct hard_iface {
72031 int16_t if_num;
72032 char if_status;
72033 struct net_device *net_dev;
72034 - atomic_t seqno;
72035 - atomic_t frag_seqno;
72036 + atomic_unchecked_t seqno;
72037 + atomic_unchecked_t frag_seqno;
72038 unsigned char *packet_buff;
72039 int packet_len;
72040 struct kobject *hardif_obj;
72041 @@ -154,7 +154,7 @@ struct bat_priv {
72042 atomic_t orig_interval; /* uint */
72043 atomic_t hop_penalty; /* uint */
72044 atomic_t log_level; /* uint */
72045 - atomic_t bcast_seqno;
72046 + atomic_unchecked_t bcast_seqno;
72047 atomic_t bcast_queue_left;
72048 atomic_t batman_queue_left;
72049 atomic_t ttvn; /* translation table version number */
72050 diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
72051 index 07d1c1d..7e9bea9 100644
72052 --- a/net/batman-adv/unicast.c
72053 +++ b/net/batman-adv/unicast.c
72054 @@ -264,7 +264,7 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
72055 frag1->flags = UNI_FRAG_HEAD | large_tail;
72056 frag2->flags = large_tail;
72057
72058 - seqno = atomic_add_return(2, &hard_iface->frag_seqno);
72059 + seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
72060 frag1->seqno = htons(seqno - 1);
72061 frag2->seqno = htons(seqno);
72062
72063 diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
72064 index c1c597e..05ebb40 100644
72065 --- a/net/bluetooth/hci_conn.c
72066 +++ b/net/bluetooth/hci_conn.c
72067 @@ -234,7 +234,7 @@ void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16])
72068 memset(&cp, 0, sizeof(cp));
72069
72070 cp.handle = cpu_to_le16(conn->handle);
72071 - memcpy(cp.ltk, ltk, sizeof(ltk));
72072 + memcpy(cp.ltk, ltk, sizeof(cp.ltk));
72073
72074 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
72075 }
72076 diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
72077 index 17b5b1c..826d872 100644
72078 --- a/net/bluetooth/l2cap_core.c
72079 +++ b/net/bluetooth/l2cap_core.c
72080 @@ -2176,8 +2176,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
72081 break;
72082
72083 case L2CAP_CONF_RFC:
72084 - if (olen == sizeof(rfc))
72085 - memcpy(&rfc, (void *)val, olen);
72086 + if (olen != sizeof(rfc))
72087 + break;
72088 +
72089 + memcpy(&rfc, (void *)val, olen);
72090
72091 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
72092 rfc.mode != chan->mode)
72093 @@ -2265,8 +2267,10 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
72094
72095 switch (type) {
72096 case L2CAP_CONF_RFC:
72097 - if (olen == sizeof(rfc))
72098 - memcpy(&rfc, (void *)val, olen);
72099 + if (olen != sizeof(rfc))
72100 + break;
72101 +
72102 + memcpy(&rfc, (void *)val, olen);
72103 goto done;
72104 }
72105 }
72106 diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
72107 index a5f4e57..910ee6d 100644
72108 --- a/net/bridge/br_multicast.c
72109 +++ b/net/bridge/br_multicast.c
72110 @@ -1485,7 +1485,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
72111 nexthdr = ip6h->nexthdr;
72112 offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr);
72113
72114 - if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
72115 + if (nexthdr != IPPROTO_ICMPV6)
72116 return 0;
72117
72118 /* Okay, we found ICMPv6 header */
72119 diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
72120 index 5864cc4..121f3a3 100644
72121 --- a/net/bridge/netfilter/ebtables.c
72122 +++ b/net/bridge/netfilter/ebtables.c
72123 @@ -1513,7 +1513,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
72124 tmp.valid_hooks = t->table->valid_hooks;
72125 }
72126 mutex_unlock(&ebt_mutex);
72127 - if (copy_to_user(user, &tmp, *len) != 0){
72128 + if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
72129 BUGPRINT("c2u Didn't work\n");
72130 ret = -EFAULT;
72131 break;
72132 diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
72133 index a986280..13444a1 100644
72134 --- a/net/caif/caif_socket.c
72135 +++ b/net/caif/caif_socket.c
72136 @@ -48,19 +48,20 @@ static struct dentry *debugfsdir;
72137 #ifdef CONFIG_DEBUG_FS
72138 struct debug_fs_counter {
72139 atomic_t caif_nr_socks;
72140 - atomic_t caif_sock_create;
72141 - atomic_t num_connect_req;
72142 - atomic_t num_connect_resp;
72143 - atomic_t num_connect_fail_resp;
72144 - atomic_t num_disconnect;
72145 - atomic_t num_remote_shutdown_ind;
72146 - atomic_t num_tx_flow_off_ind;
72147 - atomic_t num_tx_flow_on_ind;
72148 - atomic_t num_rx_flow_off;
72149 - atomic_t num_rx_flow_on;
72150 + atomic_unchecked_t caif_sock_create;
72151 + atomic_unchecked_t num_connect_req;
72152 + atomic_unchecked_t num_connect_resp;
72153 + atomic_unchecked_t num_connect_fail_resp;
72154 + atomic_unchecked_t num_disconnect;
72155 + atomic_unchecked_t num_remote_shutdown_ind;
72156 + atomic_unchecked_t num_tx_flow_off_ind;
72157 + atomic_unchecked_t num_tx_flow_on_ind;
72158 + atomic_unchecked_t num_rx_flow_off;
72159 + atomic_unchecked_t num_rx_flow_on;
72160 };
72161 static struct debug_fs_counter cnt;
72162 #define dbfs_atomic_inc(v) atomic_inc_return(v)
72163 +#define dbfs_atomic_inc_unchecked(v) atomic_inc_return_unchecked(v)
72164 #define dbfs_atomic_dec(v) atomic_dec_return(v)
72165 #else
72166 #define dbfs_atomic_inc(v) 0
72167 @@ -161,7 +162,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
72168 atomic_read(&cf_sk->sk.sk_rmem_alloc),
72169 sk_rcvbuf_lowwater(cf_sk));
72170 set_rx_flow_off(cf_sk);
72171 - dbfs_atomic_inc(&cnt.num_rx_flow_off);
72172 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
72173 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
72174 }
72175
72176 @@ -172,7 +173,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
72177 set_rx_flow_off(cf_sk);
72178 if (net_ratelimit())
72179 pr_debug("sending flow OFF due to rmem_schedule\n");
72180 - dbfs_atomic_inc(&cnt.num_rx_flow_off);
72181 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
72182 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
72183 }
72184 skb->dev = NULL;
72185 @@ -233,14 +234,14 @@ static void caif_ctrl_cb(struct cflayer *layr,
72186 switch (flow) {
72187 case CAIF_CTRLCMD_FLOW_ON_IND:
72188 /* OK from modem to start sending again */
72189 - dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
72190 + dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_on_ind);
72191 set_tx_flow_on(cf_sk);
72192 cf_sk->sk.sk_state_change(&cf_sk->sk);
72193 break;
72194
72195 case CAIF_CTRLCMD_FLOW_OFF_IND:
72196 /* Modem asks us to shut up */
72197 - dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
72198 + dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_off_ind);
72199 set_tx_flow_off(cf_sk);
72200 cf_sk->sk.sk_state_change(&cf_sk->sk);
72201 break;
72202 @@ -249,7 +250,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
72203 /* We're now connected */
72204 caif_client_register_refcnt(&cf_sk->layer,
72205 cfsk_hold, cfsk_put);
72206 - dbfs_atomic_inc(&cnt.num_connect_resp);
72207 + dbfs_atomic_inc_unchecked(&cnt.num_connect_resp);
72208 cf_sk->sk.sk_state = CAIF_CONNECTED;
72209 set_tx_flow_on(cf_sk);
72210 cf_sk->sk.sk_state_change(&cf_sk->sk);
72211 @@ -263,7 +264,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
72212
72213 case CAIF_CTRLCMD_INIT_FAIL_RSP:
72214 /* Connect request failed */
72215 - dbfs_atomic_inc(&cnt.num_connect_fail_resp);
72216 + dbfs_atomic_inc_unchecked(&cnt.num_connect_fail_resp);
72217 cf_sk->sk.sk_err = ECONNREFUSED;
72218 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
72219 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
72220 @@ -277,7 +278,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
72221
72222 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
72223 /* Modem has closed this connection, or device is down. */
72224 - dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
72225 + dbfs_atomic_inc_unchecked(&cnt.num_remote_shutdown_ind);
72226 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
72227 cf_sk->sk.sk_err = ECONNRESET;
72228 set_rx_flow_on(cf_sk);
72229 @@ -297,7 +298,7 @@ static void caif_check_flow_release(struct sock *sk)
72230 return;
72231
72232 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
72233 - dbfs_atomic_inc(&cnt.num_rx_flow_on);
72234 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_on);
72235 set_rx_flow_on(cf_sk);
72236 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
72237 }
72238 @@ -854,7 +855,7 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
72239 /*ifindex = id of the interface.*/
72240 cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
72241
72242 - dbfs_atomic_inc(&cnt.num_connect_req);
72243 + dbfs_atomic_inc_unchecked(&cnt.num_connect_req);
72244 cf_sk->layer.receive = caif_sktrecv_cb;
72245
72246 err = caif_connect_client(sock_net(sk), &cf_sk->conn_req,
72247 @@ -943,7 +944,7 @@ static int caif_release(struct socket *sock)
72248 spin_unlock_bh(&sk->sk_receive_queue.lock);
72249 sock->sk = NULL;
72250
72251 - dbfs_atomic_inc(&cnt.num_disconnect);
72252 + dbfs_atomic_inc_unchecked(&cnt.num_disconnect);
72253
72254 WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir));
72255 if (cf_sk->debugfs_socket_dir != NULL)
72256 @@ -1122,7 +1123,7 @@ static int caif_create(struct net *net, struct socket *sock, int protocol,
72257 cf_sk->conn_req.protocol = protocol;
72258 /* Increase the number of sockets created. */
72259 dbfs_atomic_inc(&cnt.caif_nr_socks);
72260 - num = dbfs_atomic_inc(&cnt.caif_sock_create);
72261 + num = dbfs_atomic_inc_unchecked(&cnt.caif_sock_create);
72262 #ifdef CONFIG_DEBUG_FS
72263 if (!IS_ERR(debugfsdir)) {
72264
72265 diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
72266 index 5cf5222..6f704ad 100644
72267 --- a/net/caif/cfctrl.c
72268 +++ b/net/caif/cfctrl.c
72269 @@ -9,6 +9,7 @@
72270 #include <linux/stddef.h>
72271 #include <linux/spinlock.h>
72272 #include <linux/slab.h>
72273 +#include <linux/sched.h>
72274 #include <net/caif/caif_layer.h>
72275 #include <net/caif/cfpkt.h>
72276 #include <net/caif/cfctrl.h>
72277 @@ -42,8 +43,8 @@ struct cflayer *cfctrl_create(void)
72278 memset(&dev_info, 0, sizeof(dev_info));
72279 dev_info.id = 0xff;
72280 cfsrvl_init(&this->serv, 0, &dev_info, false);
72281 - atomic_set(&this->req_seq_no, 1);
72282 - atomic_set(&this->rsp_seq_no, 1);
72283 + atomic_set_unchecked(&this->req_seq_no, 1);
72284 + atomic_set_unchecked(&this->rsp_seq_no, 1);
72285 this->serv.layer.receive = cfctrl_recv;
72286 sprintf(this->serv.layer.name, "ctrl");
72287 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
72288 @@ -129,8 +130,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
72289 struct cfctrl_request_info *req)
72290 {
72291 spin_lock_bh(&ctrl->info_list_lock);
72292 - atomic_inc(&ctrl->req_seq_no);
72293 - req->sequence_no = atomic_read(&ctrl->req_seq_no);
72294 + atomic_inc_unchecked(&ctrl->req_seq_no);
72295 + req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
72296 list_add_tail(&req->list, &ctrl->list);
72297 spin_unlock_bh(&ctrl->info_list_lock);
72298 }
72299 @@ -148,7 +149,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
72300 if (p != first)
72301 pr_warn("Requests are not received in order\n");
72302
72303 - atomic_set(&ctrl->rsp_seq_no,
72304 + atomic_set_unchecked(&ctrl->rsp_seq_no,
72305 p->sequence_no);
72306 list_del(&p->list);
72307 goto out;
72308 diff --git a/net/can/gw.c b/net/can/gw.c
72309 index 3d79b12..8de85fa 100644
72310 --- a/net/can/gw.c
72311 +++ b/net/can/gw.c
72312 @@ -96,7 +96,7 @@ struct cf_mod {
72313 struct {
72314 void (*xor)(struct can_frame *cf, struct cgw_csum_xor *xor);
72315 void (*crc8)(struct can_frame *cf, struct cgw_csum_crc8 *crc8);
72316 - } csumfunc;
72317 + } __no_const csumfunc;
72318 };
72319
72320
72321 diff --git a/net/compat.c b/net/compat.c
72322 index 6def90e..c6992fa 100644
72323 --- a/net/compat.c
72324 +++ b/net/compat.c
72325 @@ -71,9 +71,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
72326 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
72327 __get_user(kmsg->msg_flags, &umsg->msg_flags))
72328 return -EFAULT;
72329 - kmsg->msg_name = compat_ptr(tmp1);
72330 - kmsg->msg_iov = compat_ptr(tmp2);
72331 - kmsg->msg_control = compat_ptr(tmp3);
72332 + kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
72333 + kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
72334 + kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
72335 return 0;
72336 }
72337
72338 @@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
72339
72340 if (kern_msg->msg_namelen) {
72341 if (mode == VERIFY_READ) {
72342 - int err = move_addr_to_kernel(kern_msg->msg_name,
72343 + int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
72344 kern_msg->msg_namelen,
72345 kern_address);
72346 if (err < 0)
72347 @@ -96,7 +96,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
72348 kern_msg->msg_name = NULL;
72349
72350 tot_len = iov_from_user_compat_to_kern(kern_iov,
72351 - (struct compat_iovec __user *)kern_msg->msg_iov,
72352 + (struct compat_iovec __force_user *)kern_msg->msg_iov,
72353 kern_msg->msg_iovlen);
72354 if (tot_len >= 0)
72355 kern_msg->msg_iov = kern_iov;
72356 @@ -116,20 +116,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
72357
72358 #define CMSG_COMPAT_FIRSTHDR(msg) \
72359 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
72360 - (struct compat_cmsghdr __user *)((msg)->msg_control) : \
72361 + (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
72362 (struct compat_cmsghdr __user *)NULL)
72363
72364 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
72365 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
72366 (ucmlen) <= (unsigned long) \
72367 ((mhdr)->msg_controllen - \
72368 - ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
72369 + ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
72370
72371 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
72372 struct compat_cmsghdr __user *cmsg, int cmsg_len)
72373 {
72374 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
72375 - if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
72376 + if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
72377 msg->msg_controllen)
72378 return NULL;
72379 return (struct compat_cmsghdr __user *)ptr;
72380 @@ -221,7 +221,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
72381 {
72382 struct compat_timeval ctv;
72383 struct compat_timespec cts[3];
72384 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
72385 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
72386 struct compat_cmsghdr cmhdr;
72387 int cmlen;
72388
72389 @@ -273,7 +273,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
72390
72391 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
72392 {
72393 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
72394 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
72395 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
72396 int fdnum = scm->fp->count;
72397 struct file **fp = scm->fp->fp;
72398 @@ -370,7 +370,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
72399 return -EFAULT;
72400 old_fs = get_fs();
72401 set_fs(KERNEL_DS);
72402 - err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
72403 + err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
72404 set_fs(old_fs);
72405
72406 return err;
72407 @@ -431,7 +431,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
72408 len = sizeof(ktime);
72409 old_fs = get_fs();
72410 set_fs(KERNEL_DS);
72411 - err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
72412 + err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
72413 set_fs(old_fs);
72414
72415 if (!err) {
72416 @@ -566,7 +566,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
72417 case MCAST_JOIN_GROUP:
72418 case MCAST_LEAVE_GROUP:
72419 {
72420 - struct compat_group_req __user *gr32 = (void *)optval;
72421 + struct compat_group_req __user *gr32 = (void __user *)optval;
72422 struct group_req __user *kgr =
72423 compat_alloc_user_space(sizeof(struct group_req));
72424 u32 interface;
72425 @@ -587,7 +587,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
72426 case MCAST_BLOCK_SOURCE:
72427 case MCAST_UNBLOCK_SOURCE:
72428 {
72429 - struct compat_group_source_req __user *gsr32 = (void *)optval;
72430 + struct compat_group_source_req __user *gsr32 = (void __user *)optval;
72431 struct group_source_req __user *kgsr = compat_alloc_user_space(
72432 sizeof(struct group_source_req));
72433 u32 interface;
72434 @@ -608,7 +608,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
72435 }
72436 case MCAST_MSFILTER:
72437 {
72438 - struct compat_group_filter __user *gf32 = (void *)optval;
72439 + struct compat_group_filter __user *gf32 = (void __user *)optval;
72440 struct group_filter __user *kgf;
72441 u32 interface, fmode, numsrc;
72442
72443 @@ -646,7 +646,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
72444 char __user *optval, int __user *optlen,
72445 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
72446 {
72447 - struct compat_group_filter __user *gf32 = (void *)optval;
72448 + struct compat_group_filter __user *gf32 = (void __user *)optval;
72449 struct group_filter __user *kgf;
72450 int __user *koptlen;
72451 u32 interface, fmode, numsrc;
72452 diff --git a/net/core/datagram.c b/net/core/datagram.c
72453 index 68bbf9f..5ef0d12 100644
72454 --- a/net/core/datagram.c
72455 +++ b/net/core/datagram.c
72456 @@ -285,7 +285,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
72457 }
72458
72459 kfree_skb(skb);
72460 - atomic_inc(&sk->sk_drops);
72461 + atomic_inc_unchecked(&sk->sk_drops);
72462 sk_mem_reclaim_partial(sk);
72463
72464 return err;
72465 diff --git a/net/core/dev.c b/net/core/dev.c
72466 index 5a13edf..a6f2bd2 100644
72467 --- a/net/core/dev.c
72468 +++ b/net/core/dev.c
72469 @@ -1139,10 +1139,14 @@ void dev_load(struct net *net, const char *name)
72470 if (no_module && capable(CAP_NET_ADMIN))
72471 no_module = request_module("netdev-%s", name);
72472 if (no_module && capable(CAP_SYS_MODULE)) {
72473 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
72474 + ___request_module(true, "grsec_modharden_netdev", "%s", name);
72475 +#else
72476 if (!request_module("%s", name))
72477 pr_err("Loading kernel module for a network device "
72478 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
72479 "instead\n", name);
72480 +#endif
72481 }
72482 }
72483 EXPORT_SYMBOL(dev_load);
72484 @@ -1573,7 +1577,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
72485 {
72486 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
72487 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
72488 - atomic_long_inc(&dev->rx_dropped);
72489 + atomic_long_inc_unchecked(&dev->rx_dropped);
72490 kfree_skb(skb);
72491 return NET_RX_DROP;
72492 }
72493 @@ -1583,7 +1587,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
72494 nf_reset(skb);
72495
72496 if (unlikely(!is_skb_forwardable(dev, skb))) {
72497 - atomic_long_inc(&dev->rx_dropped);
72498 + atomic_long_inc_unchecked(&dev->rx_dropped);
72499 kfree_skb(skb);
72500 return NET_RX_DROP;
72501 }
72502 @@ -2036,7 +2040,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
72503
72504 struct dev_gso_cb {
72505 void (*destructor)(struct sk_buff *skb);
72506 -};
72507 +} __no_const;
72508
72509 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
72510
72511 @@ -2970,7 +2974,7 @@ enqueue:
72512
72513 local_irq_restore(flags);
72514
72515 - atomic_long_inc(&skb->dev->rx_dropped);
72516 + atomic_long_inc_unchecked(&skb->dev->rx_dropped);
72517 kfree_skb(skb);
72518 return NET_RX_DROP;
72519 }
72520 @@ -3044,7 +3048,7 @@ int netif_rx_ni(struct sk_buff *skb)
72521 }
72522 EXPORT_SYMBOL(netif_rx_ni);
72523
72524 -static void net_tx_action(struct softirq_action *h)
72525 +static void net_tx_action(void)
72526 {
72527 struct softnet_data *sd = &__get_cpu_var(softnet_data);
72528
72529 @@ -3333,7 +3337,7 @@ ncls:
72530 if (pt_prev) {
72531 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
72532 } else {
72533 - atomic_long_inc(&skb->dev->rx_dropped);
72534 + atomic_long_inc_unchecked(&skb->dev->rx_dropped);
72535 kfree_skb(skb);
72536 /* Jamal, now you will not able to escape explaining
72537 * me how you were going to use this. :-)
72538 @@ -3891,7 +3895,7 @@ void netif_napi_del(struct napi_struct *napi)
72539 }
72540 EXPORT_SYMBOL(netif_napi_del);
72541
72542 -static void net_rx_action(struct softirq_action *h)
72543 +static void net_rx_action(void)
72544 {
72545 struct softnet_data *sd = &__get_cpu_var(softnet_data);
72546 unsigned long time_limit = jiffies + 2;
72547 @@ -5949,7 +5953,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
72548 } else {
72549 netdev_stats_to_stats64(storage, &dev->stats);
72550 }
72551 - storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
72552 + storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
72553 return storage;
72554 }
72555 EXPORT_SYMBOL(dev_get_stats);
72556 diff --git a/net/core/flow.c b/net/core/flow.c
72557 index e318c7e..168b1d0 100644
72558 --- a/net/core/flow.c
72559 +++ b/net/core/flow.c
72560 @@ -61,7 +61,7 @@ struct flow_cache {
72561 struct timer_list rnd_timer;
72562 };
72563
72564 -atomic_t flow_cache_genid = ATOMIC_INIT(0);
72565 +atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
72566 EXPORT_SYMBOL(flow_cache_genid);
72567 static struct flow_cache flow_cache_global;
72568 static struct kmem_cache *flow_cachep __read_mostly;
72569 @@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
72570
72571 static int flow_entry_valid(struct flow_cache_entry *fle)
72572 {
72573 - if (atomic_read(&flow_cache_genid) != fle->genid)
72574 + if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
72575 return 0;
72576 if (fle->object && !fle->object->ops->check(fle->object))
72577 return 0;
72578 @@ -259,7 +259,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
72579 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
72580 fcp->hash_count++;
72581 }
72582 - } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
72583 + } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
72584 flo = fle->object;
72585 if (!flo)
72586 goto ret_object;
72587 @@ -280,7 +280,7 @@ nocache:
72588 }
72589 flo = resolver(net, key, family, dir, flo, ctx);
72590 if (fle) {
72591 - fle->genid = atomic_read(&flow_cache_genid);
72592 + fle->genid = atomic_read_unchecked(&flow_cache_genid);
72593 if (!IS_ERR(flo))
72594 fle->object = flo;
72595 else
72596 diff --git a/net/core/iovec.c b/net/core/iovec.c
72597 index c40f27e..7f49254 100644
72598 --- a/net/core/iovec.c
72599 +++ b/net/core/iovec.c
72600 @@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
72601 if (m->msg_namelen) {
72602 if (mode == VERIFY_READ) {
72603 void __user *namep;
72604 - namep = (void __user __force *) m->msg_name;
72605 + namep = (void __force_user *) m->msg_name;
72606 err = move_addr_to_kernel(namep, m->msg_namelen,
72607 address);
72608 if (err < 0)
72609 @@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
72610 }
72611
72612 size = m->msg_iovlen * sizeof(struct iovec);
72613 - if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
72614 + if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
72615 return -EFAULT;
72616
72617 m->msg_iov = iov;
72618 diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
72619 index 9083e82..1673203 100644
72620 --- a/net/core/rtnetlink.c
72621 +++ b/net/core/rtnetlink.c
72622 @@ -57,7 +57,7 @@ struct rtnl_link {
72623 rtnl_doit_func doit;
72624 rtnl_dumpit_func dumpit;
72625 rtnl_calcit_func calcit;
72626 -};
72627 +} __no_const;
72628
72629 static DEFINE_MUTEX(rtnl_mutex);
72630 static u16 min_ifinfo_dump_size;
72631 diff --git a/net/core/scm.c b/net/core/scm.c
72632 index ff52ad0..aff1c0f 100644
72633 --- a/net/core/scm.c
72634 +++ b/net/core/scm.c
72635 @@ -220,7 +220,7 @@ EXPORT_SYMBOL(__scm_send);
72636 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
72637 {
72638 struct cmsghdr __user *cm
72639 - = (__force struct cmsghdr __user *)msg->msg_control;
72640 + = (struct cmsghdr __force_user *)msg->msg_control;
72641 struct cmsghdr cmhdr;
72642 int cmlen = CMSG_LEN(len);
72643 int err;
72644 @@ -243,7 +243,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
72645 err = -EFAULT;
72646 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
72647 goto out;
72648 - if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
72649 + if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
72650 goto out;
72651 cmlen = CMSG_SPACE(len);
72652 if (msg->msg_controllen < cmlen)
72653 @@ -259,7 +259,7 @@ EXPORT_SYMBOL(put_cmsg);
72654 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
72655 {
72656 struct cmsghdr __user *cm
72657 - = (__force struct cmsghdr __user*)msg->msg_control;
72658 + = (struct cmsghdr __force_user *)msg->msg_control;
72659
72660 int fdmax = 0;
72661 int fdnum = scm->fp->count;
72662 @@ -279,7 +279,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
72663 if (fdnum < fdmax)
72664 fdmax = fdnum;
72665
72666 - for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
72667 + for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
72668 i++, cmfptr++)
72669 {
72670 int new_fd;
72671 diff --git a/net/core/sock.c b/net/core/sock.c
72672 index b23f174..b9a0d26 100644
72673 --- a/net/core/sock.c
72674 +++ b/net/core/sock.c
72675 @@ -289,7 +289,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
72676 struct sk_buff_head *list = &sk->sk_receive_queue;
72677
72678 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
72679 - atomic_inc(&sk->sk_drops);
72680 + atomic_inc_unchecked(&sk->sk_drops);
72681 trace_sock_rcvqueue_full(sk, skb);
72682 return -ENOMEM;
72683 }
72684 @@ -299,7 +299,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
72685 return err;
72686
72687 if (!sk_rmem_schedule(sk, skb->truesize)) {
72688 - atomic_inc(&sk->sk_drops);
72689 + atomic_inc_unchecked(&sk->sk_drops);
72690 return -ENOBUFS;
72691 }
72692
72693 @@ -319,7 +319,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
72694 skb_dst_force(skb);
72695
72696 spin_lock_irqsave(&list->lock, flags);
72697 - skb->dropcount = atomic_read(&sk->sk_drops);
72698 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
72699 __skb_queue_tail(list, skb);
72700 spin_unlock_irqrestore(&list->lock, flags);
72701
72702 @@ -339,7 +339,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
72703 skb->dev = NULL;
72704
72705 if (sk_rcvqueues_full(sk, skb)) {
72706 - atomic_inc(&sk->sk_drops);
72707 + atomic_inc_unchecked(&sk->sk_drops);
72708 goto discard_and_relse;
72709 }
72710 if (nested)
72711 @@ -357,7 +357,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
72712 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
72713 } else if (sk_add_backlog(sk, skb)) {
72714 bh_unlock_sock(sk);
72715 - atomic_inc(&sk->sk_drops);
72716 + atomic_inc_unchecked(&sk->sk_drops);
72717 goto discard_and_relse;
72718 }
72719
72720 @@ -917,7 +917,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
72721 if (len > sizeof(peercred))
72722 len = sizeof(peercred);
72723 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
72724 - if (copy_to_user(optval, &peercred, len))
72725 + if (len > sizeof(peercred) || copy_to_user(optval, &peercred, len))
72726 return -EFAULT;
72727 goto lenout;
72728 }
72729 @@ -930,7 +930,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
72730 return -ENOTCONN;
72731 if (lv < len)
72732 return -EINVAL;
72733 - if (copy_to_user(optval, address, len))
72734 + if (len > sizeof(address) || copy_to_user(optval, address, len))
72735 return -EFAULT;
72736 goto lenout;
72737 }
72738 @@ -963,7 +963,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
72739
72740 if (len > lv)
72741 len = lv;
72742 - if (copy_to_user(optval, &v, len))
72743 + if (len > sizeof(v) || copy_to_user(optval, &v, len))
72744 return -EFAULT;
72745 lenout:
72746 if (put_user(len, optlen))
72747 @@ -2020,7 +2020,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
72748 */
72749 smp_wmb();
72750 atomic_set(&sk->sk_refcnt, 1);
72751 - atomic_set(&sk->sk_drops, 0);
72752 + atomic_set_unchecked(&sk->sk_drops, 0);
72753 }
72754 EXPORT_SYMBOL(sock_init_data);
72755
72756 diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
72757 index 02e75d1..9a57a7c 100644
72758 --- a/net/decnet/sysctl_net_decnet.c
72759 +++ b/net/decnet/sysctl_net_decnet.c
72760 @@ -174,7 +174,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
72761
72762 if (len > *lenp) len = *lenp;
72763
72764 - if (copy_to_user(buffer, addr, len))
72765 + if (len > sizeof addr || copy_to_user(buffer, addr, len))
72766 return -EFAULT;
72767
72768 *lenp = len;
72769 @@ -237,7 +237,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
72770
72771 if (len > *lenp) len = *lenp;
72772
72773 - if (copy_to_user(buffer, devname, len))
72774 + if (len > sizeof devname || copy_to_user(buffer, devname, len))
72775 return -EFAULT;
72776
72777 *lenp = len;
72778 diff --git a/net/econet/Kconfig b/net/econet/Kconfig
72779 index 39a2d29..f39c0fe 100644
72780 --- a/net/econet/Kconfig
72781 +++ b/net/econet/Kconfig
72782 @@ -4,7 +4,7 @@
72783
72784 config ECONET
72785 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
72786 - depends on EXPERIMENTAL && INET
72787 + depends on EXPERIMENTAL && INET && BROKEN
72788 ---help---
72789 Econet is a fairly old and slow networking protocol mainly used by
72790 Acorn computers to access file and print servers. It uses native
72791 diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
72792 index 92fc5f6..b790d91 100644
72793 --- a/net/ipv4/fib_frontend.c
72794 +++ b/net/ipv4/fib_frontend.c
72795 @@ -970,12 +970,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
72796 #ifdef CONFIG_IP_ROUTE_MULTIPATH
72797 fib_sync_up(dev);
72798 #endif
72799 - atomic_inc(&net->ipv4.dev_addr_genid);
72800 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
72801 rt_cache_flush(dev_net(dev), -1);
72802 break;
72803 case NETDEV_DOWN:
72804 fib_del_ifaddr(ifa, NULL);
72805 - atomic_inc(&net->ipv4.dev_addr_genid);
72806 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
72807 if (ifa->ifa_dev->ifa_list == NULL) {
72808 /* Last address was deleted from this interface.
72809 * Disable IP.
72810 @@ -1011,7 +1011,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
72811 #ifdef CONFIG_IP_ROUTE_MULTIPATH
72812 fib_sync_up(dev);
72813 #endif
72814 - atomic_inc(&net->ipv4.dev_addr_genid);
72815 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
72816 rt_cache_flush(dev_net(dev), -1);
72817 break;
72818 case NETDEV_DOWN:
72819 diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
72820 index 80106d8..232e898 100644
72821 --- a/net/ipv4/fib_semantics.c
72822 +++ b/net/ipv4/fib_semantics.c
72823 @@ -699,7 +699,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
72824 nh->nh_saddr = inet_select_addr(nh->nh_dev,
72825 nh->nh_gw,
72826 nh->nh_parent->fib_scope);
72827 - nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
72828 + nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
72829
72830 return nh->nh_saddr;
72831 }
72832 diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
72833 index ccee270..db23c3c 100644
72834 --- a/net/ipv4/inet_diag.c
72835 +++ b/net/ipv4/inet_diag.c
72836 @@ -114,8 +114,14 @@ static int inet_csk_diag_fill(struct sock *sk,
72837 r->idiag_retrans = 0;
72838
72839 r->id.idiag_if = sk->sk_bound_dev_if;
72840 +
72841 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72842 + r->id.idiag_cookie[0] = 0;
72843 + r->id.idiag_cookie[1] = 0;
72844 +#else
72845 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
72846 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
72847 +#endif
72848
72849 r->id.idiag_sport = inet->inet_sport;
72850 r->id.idiag_dport = inet->inet_dport;
72851 @@ -210,8 +216,15 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
72852 r->idiag_family = tw->tw_family;
72853 r->idiag_retrans = 0;
72854 r->id.idiag_if = tw->tw_bound_dev_if;
72855 +
72856 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72857 + r->id.idiag_cookie[0] = 0;
72858 + r->id.idiag_cookie[1] = 0;
72859 +#else
72860 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
72861 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
72862 +#endif
72863 +
72864 r->id.idiag_sport = tw->tw_sport;
72865 r->id.idiag_dport = tw->tw_dport;
72866 r->id.idiag_src[0] = tw->tw_rcv_saddr;
72867 @@ -294,12 +307,14 @@ static int inet_diag_get_exact(struct sk_buff *in_skb,
72868 if (sk == NULL)
72869 goto unlock;
72870
72871 +#ifndef CONFIG_GRKERNSEC_HIDESYM
72872 err = -ESTALE;
72873 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
72874 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
72875 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
72876 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
72877 goto out;
72878 +#endif
72879
72880 err = -ENOMEM;
72881 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
72882 @@ -589,8 +604,14 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
72883 r->idiag_retrans = req->retrans;
72884
72885 r->id.idiag_if = sk->sk_bound_dev_if;
72886 +
72887 +#ifdef CONFIG_GRKERNSEC_HIDESYM
72888 + r->id.idiag_cookie[0] = 0;
72889 + r->id.idiag_cookie[1] = 0;
72890 +#else
72891 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
72892 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
72893 +#endif
72894
72895 tmo = req->expires - jiffies;
72896 if (tmo < 0)
72897 diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
72898 index 984ec65..97ac518 100644
72899 --- a/net/ipv4/inet_hashtables.c
72900 +++ b/net/ipv4/inet_hashtables.c
72901 @@ -18,12 +18,15 @@
72902 #include <linux/sched.h>
72903 #include <linux/slab.h>
72904 #include <linux/wait.h>
72905 +#include <linux/security.h>
72906
72907 #include <net/inet_connection_sock.h>
72908 #include <net/inet_hashtables.h>
72909 #include <net/secure_seq.h>
72910 #include <net/ip.h>
72911
72912 +extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
72913 +
72914 /*
72915 * Allocate and initialize a new local port bind bucket.
72916 * The bindhash mutex for snum's hash chain must be held here.
72917 @@ -530,6 +533,8 @@ ok:
72918 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
72919 spin_unlock(&head->lock);
72920
72921 + gr_update_task_in_ip_table(current, inet_sk(sk));
72922 +
72923 if (tw) {
72924 inet_twsk_deschedule(tw, death_row);
72925 while (twrefcnt) {
72926 diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
72927 index 86f13c67..59a35b5 100644
72928 --- a/net/ipv4/inetpeer.c
72929 +++ b/net/ipv4/inetpeer.c
72930 @@ -436,8 +436,8 @@ relookup:
72931 if (p) {
72932 p->daddr = *daddr;
72933 atomic_set(&p->refcnt, 1);
72934 - atomic_set(&p->rid, 0);
72935 - atomic_set(&p->ip_id_count,
72936 + atomic_set_unchecked(&p->rid, 0);
72937 + atomic_set_unchecked(&p->ip_id_count,
72938 (daddr->family == AF_INET) ?
72939 secure_ip_id(daddr->addr.a4) :
72940 secure_ipv6_id(daddr->addr.a6));
72941 diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
72942 index fdaabf2..0ec3205 100644
72943 --- a/net/ipv4/ip_fragment.c
72944 +++ b/net/ipv4/ip_fragment.c
72945 @@ -316,7 +316,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
72946 return 0;
72947
72948 start = qp->rid;
72949 - end = atomic_inc_return(&peer->rid);
72950 + end = atomic_inc_return_unchecked(&peer->rid);
72951 qp->rid = end;
72952
72953 rc = qp->q.fragments && (end - start) > max;
72954 diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
72955 index 09ff51b..d3968eb 100644
72956 --- a/net/ipv4/ip_sockglue.c
72957 +++ b/net/ipv4/ip_sockglue.c
72958 @@ -1111,7 +1111,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
72959 len = min_t(unsigned int, len, opt->optlen);
72960 if (put_user(len, optlen))
72961 return -EFAULT;
72962 - if (copy_to_user(optval, opt->__data, len))
72963 + if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
72964 + copy_to_user(optval, opt->__data, len))
72965 return -EFAULT;
72966 return 0;
72967 }
72968 @@ -1239,7 +1240,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
72969 if (sk->sk_type != SOCK_STREAM)
72970 return -ENOPROTOOPT;
72971
72972 - msg.msg_control = optval;
72973 + msg.msg_control = (void __force_kernel *)optval;
72974 msg.msg_controllen = len;
72975 msg.msg_flags = flags;
72976
72977 diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
72978 index 99ec116..c5628fe 100644
72979 --- a/net/ipv4/ipconfig.c
72980 +++ b/net/ipv4/ipconfig.c
72981 @@ -318,7 +318,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
72982
72983 mm_segment_t oldfs = get_fs();
72984 set_fs(get_ds());
72985 - res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
72986 + res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
72987 set_fs(oldfs);
72988 return res;
72989 }
72990 @@ -329,7 +329,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
72991
72992 mm_segment_t oldfs = get_fs();
72993 set_fs(get_ds());
72994 - res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
72995 + res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
72996 set_fs(oldfs);
72997 return res;
72998 }
72999 @@ -340,7 +340,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
73000
73001 mm_segment_t oldfs = get_fs();
73002 set_fs(get_ds());
73003 - res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
73004 + res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
73005 set_fs(oldfs);
73006 return res;
73007 }
73008 diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
73009 index 2133c30..5c4b40b 100644
73010 --- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
73011 +++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
73012 @@ -399,7 +399,7 @@ static unsigned char asn1_octets_decode(struct asn1_ctx *ctx,
73013
73014 *len = 0;
73015
73016 - *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
73017 + *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
73018 if (*octets == NULL)
73019 return 0;
73020
73021 diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
73022 index 43d4c3b..1914409 100644
73023 --- a/net/ipv4/ping.c
73024 +++ b/net/ipv4/ping.c
73025 @@ -836,7 +836,7 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f,
73026 sk_rmem_alloc_get(sp),
73027 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
73028 atomic_read(&sp->sk_refcnt), sp,
73029 - atomic_read(&sp->sk_drops), len);
73030 + atomic_read_unchecked(&sp->sk_drops), len);
73031 }
73032
73033 static int ping_seq_show(struct seq_file *seq, void *v)
73034 diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
73035 index 007e2eb..85a18a0 100644
73036 --- a/net/ipv4/raw.c
73037 +++ b/net/ipv4/raw.c
73038 @@ -303,7 +303,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
73039 int raw_rcv(struct sock *sk, struct sk_buff *skb)
73040 {
73041 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
73042 - atomic_inc(&sk->sk_drops);
73043 + atomic_inc_unchecked(&sk->sk_drops);
73044 kfree_skb(skb);
73045 return NET_RX_DROP;
73046 }
73047 @@ -738,16 +738,20 @@ static int raw_init(struct sock *sk)
73048
73049 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
73050 {
73051 + struct icmp_filter filter;
73052 +
73053 if (optlen > sizeof(struct icmp_filter))
73054 optlen = sizeof(struct icmp_filter);
73055 - if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
73056 + if (copy_from_user(&filter, optval, optlen))
73057 return -EFAULT;
73058 + raw_sk(sk)->filter = filter;
73059 return 0;
73060 }
73061
73062 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
73063 {
73064 int len, ret = -EFAULT;
73065 + struct icmp_filter filter;
73066
73067 if (get_user(len, optlen))
73068 goto out;
73069 @@ -757,8 +761,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
73070 if (len > sizeof(struct icmp_filter))
73071 len = sizeof(struct icmp_filter);
73072 ret = -EFAULT;
73073 - if (put_user(len, optlen) ||
73074 - copy_to_user(optval, &raw_sk(sk)->filter, len))
73075 + filter = raw_sk(sk)->filter;
73076 + if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
73077 goto out;
73078 ret = 0;
73079 out: return ret;
73080 @@ -986,7 +990,13 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
73081 sk_wmem_alloc_get(sp),
73082 sk_rmem_alloc_get(sp),
73083 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
73084 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
73085 + atomic_read(&sp->sk_refcnt),
73086 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73087 + NULL,
73088 +#else
73089 + sp,
73090 +#endif
73091 + atomic_read_unchecked(&sp->sk_drops));
73092 }
73093
73094 static int raw_seq_show(struct seq_file *seq, void *v)
73095 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
73096 index 94cdbc5..0cb0063 100644
73097 --- a/net/ipv4/route.c
73098 +++ b/net/ipv4/route.c
73099 @@ -313,7 +313,7 @@ static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
73100
73101 static inline int rt_genid(struct net *net)
73102 {
73103 - return atomic_read(&net->ipv4.rt_genid);
73104 + return atomic_read_unchecked(&net->ipv4.rt_genid);
73105 }
73106
73107 #ifdef CONFIG_PROC_FS
73108 @@ -937,7 +937,7 @@ static void rt_cache_invalidate(struct net *net)
73109 unsigned char shuffle;
73110
73111 get_random_bytes(&shuffle, sizeof(shuffle));
73112 - atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
73113 + atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
73114 redirect_genid++;
73115 }
73116
73117 @@ -3022,7 +3022,7 @@ static int rt_fill_info(struct net *net,
73118 error = rt->dst.error;
73119 if (peer) {
73120 inet_peer_refcheck(rt->peer);
73121 - id = atomic_read(&peer->ip_id_count) & 0xffff;
73122 + id = atomic_read_unchecked(&peer->ip_id_count) & 0xffff;
73123 if (peer->tcp_ts_stamp) {
73124 ts = peer->tcp_ts;
73125 tsage = get_seconds() - peer->tcp_ts_stamp;
73126 diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
73127 index c89e354..8bd55c8 100644
73128 --- a/net/ipv4/tcp_ipv4.c
73129 +++ b/net/ipv4/tcp_ipv4.c
73130 @@ -87,6 +87,9 @@ int sysctl_tcp_tw_reuse __read_mostly;
73131 int sysctl_tcp_low_latency __read_mostly;
73132 EXPORT_SYMBOL(sysctl_tcp_low_latency);
73133
73134 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73135 +extern int grsec_enable_blackhole;
73136 +#endif
73137
73138 #ifdef CONFIG_TCP_MD5SIG
73139 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
73140 @@ -1627,6 +1630,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
73141 return 0;
73142
73143 reset:
73144 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73145 + if (!grsec_enable_blackhole)
73146 +#endif
73147 tcp_v4_send_reset(rsk, skb);
73148 discard:
73149 kfree_skb(skb);
73150 @@ -1689,12 +1695,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
73151 TCP_SKB_CB(skb)->sacked = 0;
73152
73153 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
73154 - if (!sk)
73155 + if (!sk) {
73156 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73157 + ret = 1;
73158 +#endif
73159 goto no_tcp_socket;
73160 -
73161 + }
73162 process:
73163 - if (sk->sk_state == TCP_TIME_WAIT)
73164 + if (sk->sk_state == TCP_TIME_WAIT) {
73165 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73166 + ret = 2;
73167 +#endif
73168 goto do_time_wait;
73169 + }
73170
73171 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
73172 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
73173 @@ -1744,6 +1757,10 @@ no_tcp_socket:
73174 bad_packet:
73175 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
73176 } else {
73177 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73178 + if (!grsec_enable_blackhole || (ret == 1 &&
73179 + (skb->dev->flags & IFF_LOOPBACK)))
73180 +#endif
73181 tcp_v4_send_reset(NULL, skb);
73182 }
73183
73184 @@ -2404,7 +2421,11 @@ static void get_openreq4(const struct sock *sk, const struct request_sock *req,
73185 0, /* non standard timer */
73186 0, /* open_requests have no inode */
73187 atomic_read(&sk->sk_refcnt),
73188 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73189 + NULL,
73190 +#else
73191 req,
73192 +#endif
73193 len);
73194 }
73195
73196 @@ -2454,7 +2475,12 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
73197 sock_i_uid(sk),
73198 icsk->icsk_probes_out,
73199 sock_i_ino(sk),
73200 - atomic_read(&sk->sk_refcnt), sk,
73201 + atomic_read(&sk->sk_refcnt),
73202 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73203 + NULL,
73204 +#else
73205 + sk,
73206 +#endif
73207 jiffies_to_clock_t(icsk->icsk_rto),
73208 jiffies_to_clock_t(icsk->icsk_ack.ato),
73209 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
73210 @@ -2482,7 +2508,13 @@ static void get_timewait4_sock(const struct inet_timewait_sock *tw,
73211 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
73212 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
73213 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
73214 - atomic_read(&tw->tw_refcnt), tw, len);
73215 + atomic_read(&tw->tw_refcnt),
73216 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73217 + NULL,
73218 +#else
73219 + tw,
73220 +#endif
73221 + len);
73222 }
73223
73224 #define TMPSZ 150
73225 diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
73226 index 66363b6..b0654a3 100644
73227 --- a/net/ipv4/tcp_minisocks.c
73228 +++ b/net/ipv4/tcp_minisocks.c
73229 @@ -27,6 +27,10 @@
73230 #include <net/inet_common.h>
73231 #include <net/xfrm.h>
73232
73233 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73234 +extern int grsec_enable_blackhole;
73235 +#endif
73236 +
73237 int sysctl_tcp_syncookies __read_mostly = 1;
73238 EXPORT_SYMBOL(sysctl_tcp_syncookies);
73239
73240 @@ -751,6 +755,10 @@ listen_overflow:
73241
73242 embryonic_reset:
73243 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
73244 +
73245 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73246 + if (!grsec_enable_blackhole)
73247 +#endif
73248 if (!(flg & TCP_FLAG_RST))
73249 req->rsk_ops->send_reset(sk, skb);
73250
73251 diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
73252 index 85ee7eb..53277ab 100644
73253 --- a/net/ipv4/tcp_probe.c
73254 +++ b/net/ipv4/tcp_probe.c
73255 @@ -202,7 +202,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
73256 if (cnt + width >= len)
73257 break;
73258
73259 - if (copy_to_user(buf + cnt, tbuf, width))
73260 + if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
73261 return -EFAULT;
73262 cnt += width;
73263 }
73264 diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
73265 index 2e0f0af..e2948bf 100644
73266 --- a/net/ipv4/tcp_timer.c
73267 +++ b/net/ipv4/tcp_timer.c
73268 @@ -22,6 +22,10 @@
73269 #include <linux/gfp.h>
73270 #include <net/tcp.h>
73271
73272 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73273 +extern int grsec_lastack_retries;
73274 +#endif
73275 +
73276 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
73277 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
73278 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
73279 @@ -199,6 +203,13 @@ static int tcp_write_timeout(struct sock *sk)
73280 }
73281 }
73282
73283 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73284 + if ((sk->sk_state == TCP_LAST_ACK) &&
73285 + (grsec_lastack_retries > 0) &&
73286 + (grsec_lastack_retries < retry_until))
73287 + retry_until = grsec_lastack_retries;
73288 +#endif
73289 +
73290 if (retransmits_timed_out(sk, retry_until,
73291 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
73292 /* Has it gone just too far? */
73293 diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
73294 index 5a65eea..bd913a1 100644
73295 --- a/net/ipv4/udp.c
73296 +++ b/net/ipv4/udp.c
73297 @@ -86,6 +86,7 @@
73298 #include <linux/types.h>
73299 #include <linux/fcntl.h>
73300 #include <linux/module.h>
73301 +#include <linux/security.h>
73302 #include <linux/socket.h>
73303 #include <linux/sockios.h>
73304 #include <linux/igmp.h>
73305 @@ -108,6 +109,10 @@
73306 #include <trace/events/udp.h>
73307 #include "udp_impl.h"
73308
73309 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73310 +extern int grsec_enable_blackhole;
73311 +#endif
73312 +
73313 struct udp_table udp_table __read_mostly;
73314 EXPORT_SYMBOL(udp_table);
73315
73316 @@ -565,6 +570,9 @@ found:
73317 return s;
73318 }
73319
73320 +extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
73321 +extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
73322 +
73323 /*
73324 * This routine is called by the ICMP module when it gets some
73325 * sort of error condition. If err < 0 then the socket should
73326 @@ -856,9 +864,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
73327 dport = usin->sin_port;
73328 if (dport == 0)
73329 return -EINVAL;
73330 +
73331 + err = gr_search_udp_sendmsg(sk, usin);
73332 + if (err)
73333 + return err;
73334 } else {
73335 if (sk->sk_state != TCP_ESTABLISHED)
73336 return -EDESTADDRREQ;
73337 +
73338 + err = gr_search_udp_sendmsg(sk, NULL);
73339 + if (err)
73340 + return err;
73341 +
73342 daddr = inet->inet_daddr;
73343 dport = inet->inet_dport;
73344 /* Open fast path for connected socket.
73345 @@ -1099,7 +1116,7 @@ static unsigned int first_packet_length(struct sock *sk)
73346 udp_lib_checksum_complete(skb)) {
73347 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
73348 IS_UDPLITE(sk));
73349 - atomic_inc(&sk->sk_drops);
73350 + atomic_inc_unchecked(&sk->sk_drops);
73351 __skb_unlink(skb, rcvq);
73352 __skb_queue_tail(&list_kill, skb);
73353 }
73354 @@ -1185,6 +1202,10 @@ try_again:
73355 if (!skb)
73356 goto out;
73357
73358 + err = gr_search_udp_recvmsg(sk, skb);
73359 + if (err)
73360 + goto out_free;
73361 +
73362 ulen = skb->len - sizeof(struct udphdr);
73363 copied = len;
73364 if (copied > ulen)
73365 @@ -1487,7 +1508,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
73366
73367 drop:
73368 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
73369 - atomic_inc(&sk->sk_drops);
73370 + atomic_inc_unchecked(&sk->sk_drops);
73371 kfree_skb(skb);
73372 return -1;
73373 }
73374 @@ -1506,7 +1527,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
73375 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
73376
73377 if (!skb1) {
73378 - atomic_inc(&sk->sk_drops);
73379 + atomic_inc_unchecked(&sk->sk_drops);
73380 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
73381 IS_UDPLITE(sk));
73382 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
73383 @@ -1675,6 +1696,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
73384 goto csum_error;
73385
73386 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
73387 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73388 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
73389 +#endif
73390 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
73391
73392 /*
73393 @@ -2098,8 +2122,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
73394 sk_wmem_alloc_get(sp),
73395 sk_rmem_alloc_get(sp),
73396 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
73397 - atomic_read(&sp->sk_refcnt), sp,
73398 - atomic_read(&sp->sk_drops), len);
73399 + atomic_read(&sp->sk_refcnt),
73400 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73401 + NULL,
73402 +#else
73403 + sp,
73404 +#endif
73405 + atomic_read_unchecked(&sp->sk_drops), len);
73406 }
73407
73408 int udp4_seq_show(struct seq_file *seq, void *v)
73409 diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
73410 index 836c4ea..cbb74dc 100644
73411 --- a/net/ipv6/addrconf.c
73412 +++ b/net/ipv6/addrconf.c
73413 @@ -2149,7 +2149,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
73414 p.iph.ihl = 5;
73415 p.iph.protocol = IPPROTO_IPV6;
73416 p.iph.ttl = 64;
73417 - ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
73418 + ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
73419
73420 if (ops->ndo_do_ioctl) {
73421 mm_segment_t oldfs = get_fs();
73422 diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
73423 index 1567fb1..29af910 100644
73424 --- a/net/ipv6/inet6_connection_sock.c
73425 +++ b/net/ipv6/inet6_connection_sock.c
73426 @@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
73427 #ifdef CONFIG_XFRM
73428 {
73429 struct rt6_info *rt = (struct rt6_info *)dst;
73430 - rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
73431 + rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
73432 }
73433 #endif
73434 }
73435 @@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
73436 #ifdef CONFIG_XFRM
73437 if (dst) {
73438 struct rt6_info *rt = (struct rt6_info *)dst;
73439 - if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
73440 + if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
73441 __sk_dst_reset(sk);
73442 dst = NULL;
73443 }
73444 diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
73445 index 26cb08c..8af9877 100644
73446 --- a/net/ipv6/ipv6_sockglue.c
73447 +++ b/net/ipv6/ipv6_sockglue.c
73448 @@ -960,7 +960,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
73449 if (sk->sk_type != SOCK_STREAM)
73450 return -ENOPROTOOPT;
73451
73452 - msg.msg_control = optval;
73453 + msg.msg_control = (void __force_kernel *)optval;
73454 msg.msg_controllen = len;
73455 msg.msg_flags = flags;
73456
73457 diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
73458 index 361ebf3..d5628fb 100644
73459 --- a/net/ipv6/raw.c
73460 +++ b/net/ipv6/raw.c
73461 @@ -377,7 +377,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
73462 {
73463 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
73464 skb_checksum_complete(skb)) {
73465 - atomic_inc(&sk->sk_drops);
73466 + atomic_inc_unchecked(&sk->sk_drops);
73467 kfree_skb(skb);
73468 return NET_RX_DROP;
73469 }
73470 @@ -404,7 +404,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
73471 struct raw6_sock *rp = raw6_sk(sk);
73472
73473 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
73474 - atomic_inc(&sk->sk_drops);
73475 + atomic_inc_unchecked(&sk->sk_drops);
73476 kfree_skb(skb);
73477 return NET_RX_DROP;
73478 }
73479 @@ -428,7 +428,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
73480
73481 if (inet->hdrincl) {
73482 if (skb_checksum_complete(skb)) {
73483 - atomic_inc(&sk->sk_drops);
73484 + atomic_inc_unchecked(&sk->sk_drops);
73485 kfree_skb(skb);
73486 return NET_RX_DROP;
73487 }
73488 @@ -601,7 +601,7 @@ out:
73489 return err;
73490 }
73491
73492 -static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
73493 +static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
73494 struct flowi6 *fl6, struct dst_entry **dstp,
73495 unsigned int flags)
73496 {
73497 @@ -909,12 +909,15 @@ do_confirm:
73498 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
73499 char __user *optval, int optlen)
73500 {
73501 + struct icmp6_filter filter;
73502 +
73503 switch (optname) {
73504 case ICMPV6_FILTER:
73505 if (optlen > sizeof(struct icmp6_filter))
73506 optlen = sizeof(struct icmp6_filter);
73507 - if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
73508 + if (copy_from_user(&filter, optval, optlen))
73509 return -EFAULT;
73510 + raw6_sk(sk)->filter = filter;
73511 return 0;
73512 default:
73513 return -ENOPROTOOPT;
73514 @@ -927,6 +930,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
73515 char __user *optval, int __user *optlen)
73516 {
73517 int len;
73518 + struct icmp6_filter filter;
73519
73520 switch (optname) {
73521 case ICMPV6_FILTER:
73522 @@ -938,7 +942,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
73523 len = sizeof(struct icmp6_filter);
73524 if (put_user(len, optlen))
73525 return -EFAULT;
73526 - if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
73527 + filter = raw6_sk(sk)->filter;
73528 + if (len > sizeof filter || copy_to_user(optval, &filter, len))
73529 return -EFAULT;
73530 return 0;
73531 default:
73532 @@ -1245,7 +1250,13 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
73533 0, 0L, 0,
73534 sock_i_uid(sp), 0,
73535 sock_i_ino(sp),
73536 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
73537 + atomic_read(&sp->sk_refcnt),
73538 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73539 + NULL,
73540 +#else
73541 + sp,
73542 +#endif
73543 + atomic_read_unchecked(&sp->sk_drops));
73544 }
73545
73546 static int raw6_seq_show(struct seq_file *seq, void *v)
73547 diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
73548 index b859e4a..f9d1589 100644
73549 --- a/net/ipv6/tcp_ipv6.c
73550 +++ b/net/ipv6/tcp_ipv6.c
73551 @@ -93,6 +93,10 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
73552 }
73553 #endif
73554
73555 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73556 +extern int grsec_enable_blackhole;
73557 +#endif
73558 +
73559 static void tcp_v6_hash(struct sock *sk)
73560 {
73561 if (sk->sk_state != TCP_CLOSE) {
73562 @@ -1651,6 +1655,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
73563 return 0;
73564
73565 reset:
73566 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73567 + if (!grsec_enable_blackhole)
73568 +#endif
73569 tcp_v6_send_reset(sk, skb);
73570 discard:
73571 if (opt_skb)
73572 @@ -1730,12 +1737,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
73573 TCP_SKB_CB(skb)->sacked = 0;
73574
73575 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
73576 - if (!sk)
73577 + if (!sk) {
73578 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73579 + ret = 1;
73580 +#endif
73581 goto no_tcp_socket;
73582 + }
73583
73584 process:
73585 - if (sk->sk_state == TCP_TIME_WAIT)
73586 + if (sk->sk_state == TCP_TIME_WAIT) {
73587 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73588 + ret = 2;
73589 +#endif
73590 goto do_time_wait;
73591 + }
73592
73593 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
73594 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
73595 @@ -1783,6 +1798,10 @@ no_tcp_socket:
73596 bad_packet:
73597 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
73598 } else {
73599 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73600 + if (!grsec_enable_blackhole || (ret == 1 &&
73601 + (skb->dev->flags & IFF_LOOPBACK)))
73602 +#endif
73603 tcp_v6_send_reset(NULL, skb);
73604 }
73605
73606 @@ -2043,7 +2062,13 @@ static void get_openreq6(struct seq_file *seq,
73607 uid,
73608 0, /* non standard timer */
73609 0, /* open_requests have no inode */
73610 - 0, req);
73611 + 0,
73612 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73613 + NULL
73614 +#else
73615 + req
73616 +#endif
73617 + );
73618 }
73619
73620 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
73621 @@ -2093,7 +2118,12 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
73622 sock_i_uid(sp),
73623 icsk->icsk_probes_out,
73624 sock_i_ino(sp),
73625 - atomic_read(&sp->sk_refcnt), sp,
73626 + atomic_read(&sp->sk_refcnt),
73627 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73628 + NULL,
73629 +#else
73630 + sp,
73631 +#endif
73632 jiffies_to_clock_t(icsk->icsk_rto),
73633 jiffies_to_clock_t(icsk->icsk_ack.ato),
73634 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
73635 @@ -2128,7 +2158,13 @@ static void get_timewait6_sock(struct seq_file *seq,
73636 dest->s6_addr32[2], dest->s6_addr32[3], destp,
73637 tw->tw_substate, 0, 0,
73638 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
73639 - atomic_read(&tw->tw_refcnt), tw);
73640 + atomic_read(&tw->tw_refcnt),
73641 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73642 + NULL
73643 +#else
73644 + tw
73645 +#endif
73646 + );
73647 }
73648
73649 static int tcp6_seq_show(struct seq_file *seq, void *v)
73650 diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
73651 index 8c25419..47a51ae 100644
73652 --- a/net/ipv6/udp.c
73653 +++ b/net/ipv6/udp.c
73654 @@ -50,6 +50,10 @@
73655 #include <linux/seq_file.h>
73656 #include "udp_impl.h"
73657
73658 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73659 +extern int grsec_enable_blackhole;
73660 +#endif
73661 +
73662 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
73663 {
73664 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
73665 @@ -549,7 +553,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
73666
73667 return 0;
73668 drop:
73669 - atomic_inc(&sk->sk_drops);
73670 + atomic_inc_unchecked(&sk->sk_drops);
73671 drop_no_sk_drops_inc:
73672 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
73673 kfree_skb(skb);
73674 @@ -625,7 +629,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
73675 continue;
73676 }
73677 drop:
73678 - atomic_inc(&sk->sk_drops);
73679 + atomic_inc_unchecked(&sk->sk_drops);
73680 UDP6_INC_STATS_BH(sock_net(sk),
73681 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
73682 UDP6_INC_STATS_BH(sock_net(sk),
73683 @@ -780,6 +784,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
73684 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
73685 proto == IPPROTO_UDPLITE);
73686
73687 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73688 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
73689 +#endif
73690 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
73691
73692 kfree_skb(skb);
73693 @@ -796,7 +803,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
73694 if (!sock_owned_by_user(sk))
73695 udpv6_queue_rcv_skb(sk, skb);
73696 else if (sk_add_backlog(sk, skb)) {
73697 - atomic_inc(&sk->sk_drops);
73698 + atomic_inc_unchecked(&sk->sk_drops);
73699 bh_unlock_sock(sk);
73700 sock_put(sk);
73701 goto discard;
73702 @@ -1407,8 +1414,13 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
73703 0, 0L, 0,
73704 sock_i_uid(sp), 0,
73705 sock_i_ino(sp),
73706 - atomic_read(&sp->sk_refcnt), sp,
73707 - atomic_read(&sp->sk_drops));
73708 + atomic_read(&sp->sk_refcnt),
73709 +#ifdef CONFIG_GRKERNSEC_HIDESYM
73710 + NULL,
73711 +#else
73712 + sp,
73713 +#endif
73714 + atomic_read_unchecked(&sp->sk_drops));
73715 }
73716
73717 int udp6_seq_show(struct seq_file *seq, void *v)
73718 diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
73719 index 253695d..9481ce8 100644
73720 --- a/net/irda/ircomm/ircomm_tty.c
73721 +++ b/net/irda/ircomm/ircomm_tty.c
73722 @@ -282,16 +282,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
73723 add_wait_queue(&self->open_wait, &wait);
73724
73725 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
73726 - __FILE__,__LINE__, tty->driver->name, self->open_count );
73727 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
73728
73729 /* As far as I can see, we protect open_count - Jean II */
73730 spin_lock_irqsave(&self->spinlock, flags);
73731 if (!tty_hung_up_p(filp)) {
73732 extra_count = 1;
73733 - self->open_count--;
73734 + local_dec(&self->open_count);
73735 }
73736 spin_unlock_irqrestore(&self->spinlock, flags);
73737 - self->blocked_open++;
73738 + local_inc(&self->blocked_open);
73739
73740 while (1) {
73741 if (tty->termios->c_cflag & CBAUD) {
73742 @@ -331,7 +331,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
73743 }
73744
73745 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
73746 - __FILE__,__LINE__, tty->driver->name, self->open_count );
73747 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
73748
73749 schedule();
73750 }
73751 @@ -342,13 +342,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
73752 if (extra_count) {
73753 /* ++ is not atomic, so this should be protected - Jean II */
73754 spin_lock_irqsave(&self->spinlock, flags);
73755 - self->open_count++;
73756 + local_inc(&self->open_count);
73757 spin_unlock_irqrestore(&self->spinlock, flags);
73758 }
73759 - self->blocked_open--;
73760 + local_dec(&self->blocked_open);
73761
73762 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
73763 - __FILE__,__LINE__, tty->driver->name, self->open_count);
73764 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
73765
73766 if (!retval)
73767 self->flags |= ASYNC_NORMAL_ACTIVE;
73768 @@ -417,14 +417,14 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
73769 }
73770 /* ++ is not atomic, so this should be protected - Jean II */
73771 spin_lock_irqsave(&self->spinlock, flags);
73772 - self->open_count++;
73773 + local_inc(&self->open_count);
73774
73775 tty->driver_data = self;
73776 self->tty = tty;
73777 spin_unlock_irqrestore(&self->spinlock, flags);
73778
73779 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
73780 - self->line, self->open_count);
73781 + self->line, local_read(&self->open_count));
73782
73783 /* Not really used by us, but lets do it anyway */
73784 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
73785 @@ -510,7 +510,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
73786 return;
73787 }
73788
73789 - if ((tty->count == 1) && (self->open_count != 1)) {
73790 + if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
73791 /*
73792 * Uh, oh. tty->count is 1, which means that the tty
73793 * structure will be freed. state->count should always
73794 @@ -520,16 +520,16 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
73795 */
73796 IRDA_DEBUG(0, "%s(), bad serial port count; "
73797 "tty->count is 1, state->count is %d\n", __func__ ,
73798 - self->open_count);
73799 - self->open_count = 1;
73800 + local_read(&self->open_count));
73801 + local_set(&self->open_count, 1);
73802 }
73803
73804 - if (--self->open_count < 0) {
73805 + if (local_dec_return(&self->open_count) < 0) {
73806 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
73807 - __func__, self->line, self->open_count);
73808 - self->open_count = 0;
73809 + __func__, self->line, local_read(&self->open_count));
73810 + local_set(&self->open_count, 0);
73811 }
73812 - if (self->open_count) {
73813 + if (local_read(&self->open_count)) {
73814 spin_unlock_irqrestore(&self->spinlock, flags);
73815
73816 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
73817 @@ -561,7 +561,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
73818 tty->closing = 0;
73819 self->tty = NULL;
73820
73821 - if (self->blocked_open) {
73822 + if (local_read(&self->blocked_open)) {
73823 if (self->close_delay)
73824 schedule_timeout_interruptible(self->close_delay);
73825 wake_up_interruptible(&self->open_wait);
73826 @@ -1013,7 +1013,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
73827 spin_lock_irqsave(&self->spinlock, flags);
73828 self->flags &= ~ASYNC_NORMAL_ACTIVE;
73829 self->tty = NULL;
73830 - self->open_count = 0;
73831 + local_set(&self->open_count, 0);
73832 spin_unlock_irqrestore(&self->spinlock, flags);
73833
73834 wake_up_interruptible(&self->open_wait);
73835 @@ -1360,7 +1360,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
73836 seq_putc(m, '\n');
73837
73838 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
73839 - seq_printf(m, "Open count: %d\n", self->open_count);
73840 + seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
73841 seq_printf(m, "Max data size: %d\n", self->max_data_size);
73842 seq_printf(m, "Max header size: %d\n", self->max_header_size);
73843
73844 diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
73845 index 274d150..656a144 100644
73846 --- a/net/iucv/af_iucv.c
73847 +++ b/net/iucv/af_iucv.c
73848 @@ -787,10 +787,10 @@ static int iucv_sock_autobind(struct sock *sk)
73849
73850 write_lock_bh(&iucv_sk_list.lock);
73851
73852 - sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
73853 + sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
73854 while (__iucv_get_sock_by_name(name)) {
73855 sprintf(name, "%08x",
73856 - atomic_inc_return(&iucv_sk_list.autobind_name));
73857 + atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
73858 }
73859
73860 write_unlock_bh(&iucv_sk_list.lock);
73861 diff --git a/net/key/af_key.c b/net/key/af_key.c
73862 index 1e733e9..3d73c9f 100644
73863 --- a/net/key/af_key.c
73864 +++ b/net/key/af_key.c
73865 @@ -3016,10 +3016,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
73866 static u32 get_acqseq(void)
73867 {
73868 u32 res;
73869 - static atomic_t acqseq;
73870 + static atomic_unchecked_t acqseq;
73871
73872 do {
73873 - res = atomic_inc_return(&acqseq);
73874 + res = atomic_inc_return_unchecked(&acqseq);
73875 } while (!res);
73876 return res;
73877 }
73878 diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
73879 index 73495f1..ad51356 100644
73880 --- a/net/mac80211/ieee80211_i.h
73881 +++ b/net/mac80211/ieee80211_i.h
73882 @@ -27,6 +27,7 @@
73883 #include <net/ieee80211_radiotap.h>
73884 #include <net/cfg80211.h>
73885 #include <net/mac80211.h>
73886 +#include <asm/local.h>
73887 #include "key.h"
73888 #include "sta_info.h"
73889
73890 @@ -764,7 +765,7 @@ struct ieee80211_local {
73891 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
73892 spinlock_t queue_stop_reason_lock;
73893
73894 - int open_count;
73895 + local_t open_count;
73896 int monitors, cooked_mntrs;
73897 /* number of interfaces with corresponding FIF_ flags */
73898 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
73899 diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
73900 index 30d7355..e260095 100644
73901 --- a/net/mac80211/iface.c
73902 +++ b/net/mac80211/iface.c
73903 @@ -211,7 +211,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
73904 break;
73905 }
73906
73907 - if (local->open_count == 0) {
73908 + if (local_read(&local->open_count) == 0) {
73909 res = drv_start(local);
73910 if (res)
73911 goto err_del_bss;
73912 @@ -235,7 +235,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
73913 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
73914
73915 if (!is_valid_ether_addr(dev->dev_addr)) {
73916 - if (!local->open_count)
73917 + if (!local_read(&local->open_count))
73918 drv_stop(local);
73919 return -EADDRNOTAVAIL;
73920 }
73921 @@ -327,7 +327,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
73922 mutex_unlock(&local->mtx);
73923
73924 if (coming_up)
73925 - local->open_count++;
73926 + local_inc(&local->open_count);
73927
73928 if (hw_reconf_flags) {
73929 ieee80211_hw_config(local, hw_reconf_flags);
73930 @@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
73931 err_del_interface:
73932 drv_remove_interface(local, &sdata->vif);
73933 err_stop:
73934 - if (!local->open_count)
73935 + if (!local_read(&local->open_count))
73936 drv_stop(local);
73937 err_del_bss:
73938 sdata->bss = NULL;
73939 @@ -472,7 +472,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
73940 }
73941
73942 if (going_down)
73943 - local->open_count--;
73944 + local_dec(&local->open_count);
73945
73946 switch (sdata->vif.type) {
73947 case NL80211_IFTYPE_AP_VLAN:
73948 @@ -531,7 +531,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
73949
73950 ieee80211_recalc_ps(local, -1);
73951
73952 - if (local->open_count == 0) {
73953 + if (local_read(&local->open_count) == 0) {
73954 if (local->ops->napi_poll)
73955 napi_disable(&local->napi);
73956 ieee80211_clear_tx_pending(local);
73957 diff --git a/net/mac80211/main.c b/net/mac80211/main.c
73958 index a7536fd..4039cc0 100644
73959 --- a/net/mac80211/main.c
73960 +++ b/net/mac80211/main.c
73961 @@ -163,7 +163,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
73962 local->hw.conf.power_level = power;
73963 }
73964
73965 - if (changed && local->open_count) {
73966 + if (changed && local_read(&local->open_count)) {
73967 ret = drv_config(local, changed);
73968 /*
73969 * Goal:
73970 diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
73971 index 9ee7164..56c5061 100644
73972 --- a/net/mac80211/pm.c
73973 +++ b/net/mac80211/pm.c
73974 @@ -34,7 +34,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
73975 struct ieee80211_sub_if_data *sdata;
73976 struct sta_info *sta;
73977
73978 - if (!local->open_count)
73979 + if (!local_read(&local->open_count))
73980 goto suspend;
73981
73982 ieee80211_scan_cancel(local);
73983 @@ -72,7 +72,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
73984 cancel_work_sync(&local->dynamic_ps_enable_work);
73985 del_timer_sync(&local->dynamic_ps_timer);
73986
73987 - local->wowlan = wowlan && local->open_count;
73988 + local->wowlan = wowlan && local_read(&local->open_count);
73989 if (local->wowlan) {
73990 int err = drv_suspend(local, wowlan);
73991 if (err < 0) {
73992 @@ -129,7 +129,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
73993 }
73994
73995 /* stop hardware - this must stop RX */
73996 - if (local->open_count)
73997 + if (local_read(&local->open_count))
73998 ieee80211_stop_device(local);
73999
74000 suspend:
74001 diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
74002 index 5a5a776..9600b11 100644
74003 --- a/net/mac80211/rate.c
74004 +++ b/net/mac80211/rate.c
74005 @@ -401,7 +401,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
74006
74007 ASSERT_RTNL();
74008
74009 - if (local->open_count)
74010 + if (local_read(&local->open_count))
74011 return -EBUSY;
74012
74013 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
74014 diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
74015 index c97a065..ff61928 100644
74016 --- a/net/mac80211/rc80211_pid_debugfs.c
74017 +++ b/net/mac80211/rc80211_pid_debugfs.c
74018 @@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
74019
74020 spin_unlock_irqrestore(&events->lock, status);
74021
74022 - if (copy_to_user(buf, pb, p))
74023 + if (p > sizeof(pb) || copy_to_user(buf, pb, p))
74024 return -EFAULT;
74025
74026 return p;
74027 diff --git a/net/mac80211/util.c b/net/mac80211/util.c
74028 index d5230ec..c604b21 100644
74029 --- a/net/mac80211/util.c
74030 +++ b/net/mac80211/util.c
74031 @@ -1000,7 +1000,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
74032 drv_set_coverage_class(local, hw->wiphy->coverage_class);
74033
74034 /* everything else happens only if HW was up & running */
74035 - if (!local->open_count)
74036 + if (!local_read(&local->open_count))
74037 goto wake_up;
74038
74039 /*
74040 diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
74041 index d5597b7..ab6d39c 100644
74042 --- a/net/netfilter/Kconfig
74043 +++ b/net/netfilter/Kconfig
74044 @@ -779,6 +779,16 @@ config NETFILTER_XT_MATCH_ESP
74045
74046 To compile it as a module, choose M here. If unsure, say N.
74047
74048 +config NETFILTER_XT_MATCH_GRADM
74049 + tristate '"gradm" match support'
74050 + depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
74051 + depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
74052 + ---help---
74053 + The gradm match allows to match on grsecurity RBAC being enabled.
74054 + It is useful when iptables rules are applied early on bootup to
74055 + prevent connections to the machine (except from a trusted host)
74056 + while the RBAC system is disabled.
74057 +
74058 config NETFILTER_XT_MATCH_HASHLIMIT
74059 tristate '"hashlimit" match support'
74060 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
74061 diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
74062 index 1a02853..5d8c22e 100644
74063 --- a/net/netfilter/Makefile
74064 +++ b/net/netfilter/Makefile
74065 @@ -81,6 +81,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
74066 obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
74067 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
74068 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
74069 +obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
74070 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
74071 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
74072 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
74073 diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
74074 index 29fa5ba..8debc79 100644
74075 --- a/net/netfilter/ipvs/ip_vs_conn.c
74076 +++ b/net/netfilter/ipvs/ip_vs_conn.c
74077 @@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
74078 /* Increase the refcnt counter of the dest */
74079 atomic_inc(&dest->refcnt);
74080
74081 - conn_flags = atomic_read(&dest->conn_flags);
74082 + conn_flags = atomic_read_unchecked(&dest->conn_flags);
74083 if (cp->protocol != IPPROTO_UDP)
74084 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
74085 /* Bind with the destination and its corresponding transmitter */
74086 @@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
74087 atomic_set(&cp->refcnt, 1);
74088
74089 atomic_set(&cp->n_control, 0);
74090 - atomic_set(&cp->in_pkts, 0);
74091 + atomic_set_unchecked(&cp->in_pkts, 0);
74092
74093 atomic_inc(&ipvs->conn_count);
74094 if (flags & IP_VS_CONN_F_NO_CPORT)
74095 @@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
74096
74097 /* Don't drop the entry if its number of incoming packets is not
74098 located in [0, 8] */
74099 - i = atomic_read(&cp->in_pkts);
74100 + i = atomic_read_unchecked(&cp->in_pkts);
74101 if (i > 8 || i < 0) return 0;
74102
74103 if (!todrop_rate[i]) return 0;
74104 diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
74105 index 093cc32..9209ae1 100644
74106 --- a/net/netfilter/ipvs/ip_vs_core.c
74107 +++ b/net/netfilter/ipvs/ip_vs_core.c
74108 @@ -562,7 +562,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
74109 ret = cp->packet_xmit(skb, cp, pd->pp);
74110 /* do not touch skb anymore */
74111
74112 - atomic_inc(&cp->in_pkts);
74113 + atomic_inc_unchecked(&cp->in_pkts);
74114 ip_vs_conn_put(cp);
74115 return ret;
74116 }
74117 @@ -1611,7 +1611,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
74118 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
74119 pkts = sysctl_sync_threshold(ipvs);
74120 else
74121 - pkts = atomic_add_return(1, &cp->in_pkts);
74122 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
74123
74124 if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
74125 cp->protocol == IPPROTO_SCTP) {
74126 diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
74127 index e1a66cf..0910076 100644
74128 --- a/net/netfilter/ipvs/ip_vs_ctl.c
74129 +++ b/net/netfilter/ipvs/ip_vs_ctl.c
74130 @@ -788,7 +788,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
74131 ip_vs_rs_hash(ipvs, dest);
74132 write_unlock_bh(&ipvs->rs_lock);
74133 }
74134 - atomic_set(&dest->conn_flags, conn_flags);
74135 + atomic_set_unchecked(&dest->conn_flags, conn_flags);
74136
74137 /* bind the service */
74138 if (!dest->svc) {
74139 @@ -2028,7 +2028,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
74140 " %-7s %-6d %-10d %-10d\n",
74141 &dest->addr.in6,
74142 ntohs(dest->port),
74143 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
74144 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
74145 atomic_read(&dest->weight),
74146 atomic_read(&dest->activeconns),
74147 atomic_read(&dest->inactconns));
74148 @@ -2039,7 +2039,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
74149 "%-7s %-6d %-10d %-10d\n",
74150 ntohl(dest->addr.ip),
74151 ntohs(dest->port),
74152 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
74153 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
74154 atomic_read(&dest->weight),
74155 atomic_read(&dest->activeconns),
74156 atomic_read(&dest->inactconns));
74157 @@ -2509,7 +2509,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
74158
74159 entry.addr = dest->addr.ip;
74160 entry.port = dest->port;
74161 - entry.conn_flags = atomic_read(&dest->conn_flags);
74162 + entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
74163 entry.weight = atomic_read(&dest->weight);
74164 entry.u_threshold = dest->u_threshold;
74165 entry.l_threshold = dest->l_threshold;
74166 @@ -3042,7 +3042,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
74167 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
74168
74169 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
74170 - atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
74171 + atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
74172 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
74173 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
74174 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
74175 diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
74176 index 2b6678c0..aaa41fc 100644
74177 --- a/net/netfilter/ipvs/ip_vs_sync.c
74178 +++ b/net/netfilter/ipvs/ip_vs_sync.c
74179 @@ -649,7 +649,7 @@ control:
74180 * i.e only increment in_pkts for Templates.
74181 */
74182 if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
74183 - int pkts = atomic_add_return(1, &cp->in_pkts);
74184 + int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
74185
74186 if (pkts % sysctl_sync_period(ipvs) != 1)
74187 return;
74188 @@ -795,7 +795,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
74189
74190 if (opt)
74191 memcpy(&cp->in_seq, opt, sizeof(*opt));
74192 - atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
74193 + atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
74194 cp->state = state;
74195 cp->old_state = cp->state;
74196 /*
74197 diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
74198 index aa2d720..d8aa111 100644
74199 --- a/net/netfilter/ipvs/ip_vs_xmit.c
74200 +++ b/net/netfilter/ipvs/ip_vs_xmit.c
74201 @@ -1151,7 +1151,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
74202 else
74203 rc = NF_ACCEPT;
74204 /* do not touch skb anymore */
74205 - atomic_inc(&cp->in_pkts);
74206 + atomic_inc_unchecked(&cp->in_pkts);
74207 goto out;
74208 }
74209
74210 @@ -1272,7 +1272,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
74211 else
74212 rc = NF_ACCEPT;
74213 /* do not touch skb anymore */
74214 - atomic_inc(&cp->in_pkts);
74215 + atomic_inc_unchecked(&cp->in_pkts);
74216 goto out;
74217 }
74218
74219 diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
74220 index 66b2c54..c7884e3 100644
74221 --- a/net/netfilter/nfnetlink_log.c
74222 +++ b/net/netfilter/nfnetlink_log.c
74223 @@ -70,7 +70,7 @@ struct nfulnl_instance {
74224 };
74225
74226 static DEFINE_SPINLOCK(instances_lock);
74227 -static atomic_t global_seq;
74228 +static atomic_unchecked_t global_seq;
74229
74230 #define INSTANCE_BUCKETS 16
74231 static struct hlist_head instance_table[INSTANCE_BUCKETS];
74232 @@ -502,7 +502,7 @@ __build_packet_message(struct nfulnl_instance *inst,
74233 /* global sequence number */
74234 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
74235 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
74236 - htonl(atomic_inc_return(&global_seq)));
74237 + htonl(atomic_inc_return_unchecked(&global_seq)));
74238
74239 if (data_len) {
74240 struct nlattr *nla;
74241 diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
74242 new file mode 100644
74243 index 0000000..6905327
74244 --- /dev/null
74245 +++ b/net/netfilter/xt_gradm.c
74246 @@ -0,0 +1,51 @@
74247 +/*
74248 + * gradm match for netfilter
74249 + * Copyright © Zbigniew Krzystolik, 2010
74250 + *
74251 + * This program is free software; you can redistribute it and/or modify
74252 + * it under the terms of the GNU General Public License; either version
74253 + * 2 or 3 as published by the Free Software Foundation.
74254 + */
74255 +#include <linux/module.h>
74256 +#include <linux/moduleparam.h>
74257 +#include <linux/skbuff.h>
74258 +#include <linux/netfilter/x_tables.h>
74259 +#include <linux/grsecurity.h>
74260 +#include <linux/netfilter/xt_gradm.h>
74261 +
74262 +static bool
74263 +gradm_mt(const struct sk_buff *skb, struct xt_action_param *par)
74264 +{
74265 + const struct xt_gradm_mtinfo *info = par->matchinfo;
74266 + bool retval = false;
74267 + if (gr_acl_is_enabled())
74268 + retval = true;
74269 + return retval ^ info->invflags;
74270 +}
74271 +
74272 +static struct xt_match gradm_mt_reg __read_mostly = {
74273 + .name = "gradm",
74274 + .revision = 0,
74275 + .family = NFPROTO_UNSPEC,
74276 + .match = gradm_mt,
74277 + .matchsize = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)),
74278 + .me = THIS_MODULE,
74279 +};
74280 +
74281 +static int __init gradm_mt_init(void)
74282 +{
74283 + return xt_register_match(&gradm_mt_reg);
74284 +}
74285 +
74286 +static void __exit gradm_mt_exit(void)
74287 +{
74288 + xt_unregister_match(&gradm_mt_reg);
74289 +}
74290 +
74291 +module_init(gradm_mt_init);
74292 +module_exit(gradm_mt_exit);
74293 +MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@destrukcja.pl>");
74294 +MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match");
74295 +MODULE_LICENSE("GPL");
74296 +MODULE_ALIAS("ipt_gradm");
74297 +MODULE_ALIAS("ip6t_gradm");
74298 diff --git a/net/netfilter/xt_statistic.c b/net/netfilter/xt_statistic.c
74299 index 4fe4fb4..87a89e5 100644
74300 --- a/net/netfilter/xt_statistic.c
74301 +++ b/net/netfilter/xt_statistic.c
74302 @@ -19,7 +19,7 @@
74303 #include <linux/module.h>
74304
74305 struct xt_statistic_priv {
74306 - atomic_t count;
74307 + atomic_unchecked_t count;
74308 } ____cacheline_aligned_in_smp;
74309
74310 MODULE_LICENSE("GPL");
74311 @@ -42,9 +42,9 @@ statistic_mt(const struct sk_buff *skb, struct xt_action_param *par)
74312 break;
74313 case XT_STATISTIC_MODE_NTH:
74314 do {
74315 - oval = atomic_read(&info->master->count);
74316 + oval = atomic_read_unchecked(&info->master->count);
74317 nval = (oval == info->u.nth.every) ? 0 : oval + 1;
74318 - } while (atomic_cmpxchg(&info->master->count, oval, nval) != oval);
74319 + } while (atomic_cmpxchg_unchecked(&info->master->count, oval, nval) != oval);
74320 if (nval == 0)
74321 ret = !ret;
74322 break;
74323 @@ -64,7 +64,7 @@ static int statistic_mt_check(const struct xt_mtchk_param *par)
74324 info->master = kzalloc(sizeof(*info->master), GFP_KERNEL);
74325 if (info->master == NULL)
74326 return -ENOMEM;
74327 - atomic_set(&info->master->count, info->u.nth.count);
74328 + atomic_set_unchecked(&info->master->count, info->u.nth.count);
74329
74330 return 0;
74331 }
74332 diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
74333 index 1201b6d..bcff8c6 100644
74334 --- a/net/netlink/af_netlink.c
74335 +++ b/net/netlink/af_netlink.c
74336 @@ -742,7 +742,7 @@ static void netlink_overrun(struct sock *sk)
74337 sk->sk_error_report(sk);
74338 }
74339 }
74340 - atomic_inc(&sk->sk_drops);
74341 + atomic_inc_unchecked(&sk->sk_drops);
74342 }
74343
74344 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
74345 @@ -1999,7 +1999,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
74346 sk_wmem_alloc_get(s),
74347 nlk->cb,
74348 atomic_read(&s->sk_refcnt),
74349 - atomic_read(&s->sk_drops),
74350 + atomic_read_unchecked(&s->sk_drops),
74351 sock_i_ino(s)
74352 );
74353
74354 diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
74355 index 732152f..60bb09e 100644
74356 --- a/net/netrom/af_netrom.c
74357 +++ b/net/netrom/af_netrom.c
74358 @@ -839,6 +839,7 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
74359 struct sock *sk = sock->sk;
74360 struct nr_sock *nr = nr_sk(sk);
74361
74362 + memset(sax, 0, sizeof(*sax));
74363 lock_sock(sk);
74364 if (peer != 0) {
74365 if (sk->sk_state != TCP_ESTABLISHED) {
74366 @@ -853,7 +854,6 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
74367 *uaddr_len = sizeof(struct full_sockaddr_ax25);
74368 } else {
74369 sax->fsa_ax25.sax25_family = AF_NETROM;
74370 - sax->fsa_ax25.sax25_ndigis = 0;
74371 sax->fsa_ax25.sax25_call = nr->source_addr;
74372 *uaddr_len = sizeof(struct sockaddr_ax25);
74373 }
74374 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
74375 index d9d4970..d5a6a68 100644
74376 --- a/net/packet/af_packet.c
74377 +++ b/net/packet/af_packet.c
74378 @@ -1675,7 +1675,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
74379
74380 spin_lock(&sk->sk_receive_queue.lock);
74381 po->stats.tp_packets++;
74382 - skb->dropcount = atomic_read(&sk->sk_drops);
74383 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
74384 __skb_queue_tail(&sk->sk_receive_queue, skb);
74385 spin_unlock(&sk->sk_receive_queue.lock);
74386 sk->sk_data_ready(sk, skb->len);
74387 @@ -1684,7 +1684,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
74388 drop_n_acct:
74389 spin_lock(&sk->sk_receive_queue.lock);
74390 po->stats.tp_drops++;
74391 - atomic_inc(&sk->sk_drops);
74392 + atomic_inc_unchecked(&sk->sk_drops);
74393 spin_unlock(&sk->sk_receive_queue.lock);
74394
74395 drop_n_restore:
74396 @@ -3266,7 +3266,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
74397 case PACKET_HDRLEN:
74398 if (len > sizeof(int))
74399 len = sizeof(int);
74400 - if (copy_from_user(&val, optval, len))
74401 + if (len > sizeof(val) || copy_from_user(&val, optval, len))
74402 return -EFAULT;
74403 switch (val) {
74404 case TPACKET_V1:
74405 @@ -3316,7 +3316,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
74406
74407 if (put_user(len, optlen))
74408 return -EFAULT;
74409 - if (copy_to_user(optval, data, len))
74410 + if (len > sizeof(st) || copy_to_user(optval, data, len))
74411 return -EFAULT;
74412 return 0;
74413 }
74414 diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
74415 index d65f699..05aa6ce 100644
74416 --- a/net/phonet/af_phonet.c
74417 +++ b/net/phonet/af_phonet.c
74418 @@ -41,7 +41,7 @@ static struct phonet_protocol *phonet_proto_get(unsigned int protocol)
74419 {
74420 struct phonet_protocol *pp;
74421
74422 - if (protocol >= PHONET_NPROTO)
74423 + if (protocol < 0 || protocol >= PHONET_NPROTO)
74424 return NULL;
74425
74426 rcu_read_lock();
74427 @@ -469,7 +469,7 @@ int __init_or_module phonet_proto_register(unsigned int protocol,
74428 {
74429 int err = 0;
74430
74431 - if (protocol >= PHONET_NPROTO)
74432 + if (protocol < 0 || protocol >= PHONET_NPROTO)
74433 return -EINVAL;
74434
74435 err = proto_register(pp->prot, 1);
74436 diff --git a/net/phonet/pep.c b/net/phonet/pep.c
74437 index 2ba6e9f..409573f 100644
74438 --- a/net/phonet/pep.c
74439 +++ b/net/phonet/pep.c
74440 @@ -388,7 +388,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
74441
74442 case PNS_PEP_CTRL_REQ:
74443 if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
74444 - atomic_inc(&sk->sk_drops);
74445 + atomic_inc_unchecked(&sk->sk_drops);
74446 break;
74447 }
74448 __skb_pull(skb, 4);
74449 @@ -409,7 +409,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
74450 }
74451
74452 if (pn->rx_credits == 0) {
74453 - atomic_inc(&sk->sk_drops);
74454 + atomic_inc_unchecked(&sk->sk_drops);
74455 err = -ENOBUFS;
74456 break;
74457 }
74458 @@ -557,7 +557,7 @@ static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb)
74459 }
74460
74461 if (pn->rx_credits == 0) {
74462 - atomic_inc(&sk->sk_drops);
74463 + atomic_inc_unchecked(&sk->sk_drops);
74464 err = NET_RX_DROP;
74465 break;
74466 }
74467 diff --git a/net/phonet/socket.c b/net/phonet/socket.c
74468 index 4c7eff3..59c727f 100644
74469 --- a/net/phonet/socket.c
74470 +++ b/net/phonet/socket.c
74471 @@ -613,8 +613,13 @@ static int pn_sock_seq_show(struct seq_file *seq, void *v)
74472 pn->resource, sk->sk_state,
74473 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
74474 sock_i_uid(sk), sock_i_ino(sk),
74475 - atomic_read(&sk->sk_refcnt), sk,
74476 - atomic_read(&sk->sk_drops), &len);
74477 + atomic_read(&sk->sk_refcnt),
74478 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74479 + NULL,
74480 +#else
74481 + sk,
74482 +#endif
74483 + atomic_read_unchecked(&sk->sk_drops), &len);
74484 }
74485 seq_printf(seq, "%*s\n", 127 - len, "");
74486 return 0;
74487 diff --git a/net/rds/cong.c b/net/rds/cong.c
74488 index e5b65ac..f3b6fb7 100644
74489 --- a/net/rds/cong.c
74490 +++ b/net/rds/cong.c
74491 @@ -78,7 +78,7 @@
74492 * finds that the saved generation number is smaller than the global generation
74493 * number, it wakes up the process.
74494 */
74495 -static atomic_t rds_cong_generation = ATOMIC_INIT(0);
74496 +static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0);
74497
74498 /*
74499 * Congestion monitoring
74500 @@ -233,7 +233,7 @@ void rds_cong_map_updated(struct rds_cong_map *map, uint64_t portmask)
74501 rdsdebug("waking map %p for %pI4\n",
74502 map, &map->m_addr);
74503 rds_stats_inc(s_cong_update_received);
74504 - atomic_inc(&rds_cong_generation);
74505 + atomic_inc_unchecked(&rds_cong_generation);
74506 if (waitqueue_active(&map->m_waitq))
74507 wake_up(&map->m_waitq);
74508 if (waitqueue_active(&rds_poll_waitq))
74509 @@ -259,7 +259,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
74510
74511 int rds_cong_updated_since(unsigned long *recent)
74512 {
74513 - unsigned long gen = atomic_read(&rds_cong_generation);
74514 + unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
74515
74516 if (likely(*recent == gen))
74517 return 0;
74518 diff --git a/net/rds/ib.h b/net/rds/ib.h
74519 index edfaaaf..8c89879 100644
74520 --- a/net/rds/ib.h
74521 +++ b/net/rds/ib.h
74522 @@ -128,7 +128,7 @@ struct rds_ib_connection {
74523 /* sending acks */
74524 unsigned long i_ack_flags;
74525 #ifdef KERNEL_HAS_ATOMIC64
74526 - atomic64_t i_ack_next; /* next ACK to send */
74527 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
74528 #else
74529 spinlock_t i_ack_lock; /* protect i_ack_next */
74530 u64 i_ack_next; /* next ACK to send */
74531 diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
74532 index 51c8689..36c555f 100644
74533 --- a/net/rds/ib_cm.c
74534 +++ b/net/rds/ib_cm.c
74535 @@ -718,7 +718,7 @@ void rds_ib_conn_shutdown(struct rds_connection *conn)
74536 /* Clear the ACK state */
74537 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
74538 #ifdef KERNEL_HAS_ATOMIC64
74539 - atomic64_set(&ic->i_ack_next, 0);
74540 + atomic64_set_unchecked(&ic->i_ack_next, 0);
74541 #else
74542 ic->i_ack_next = 0;
74543 #endif
74544 diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
74545 index e29e0ca..fa3a6a3 100644
74546 --- a/net/rds/ib_recv.c
74547 +++ b/net/rds/ib_recv.c
74548 @@ -592,7 +592,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
74549 static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
74550 int ack_required)
74551 {
74552 - atomic64_set(&ic->i_ack_next, seq);
74553 + atomic64_set_unchecked(&ic->i_ack_next, seq);
74554 if (ack_required) {
74555 smp_mb__before_clear_bit();
74556 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
74557 @@ -604,7 +604,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
74558 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
74559 smp_mb__after_clear_bit();
74560
74561 - return atomic64_read(&ic->i_ack_next);
74562 + return atomic64_read_unchecked(&ic->i_ack_next);
74563 }
74564 #endif
74565
74566 diff --git a/net/rds/iw.h b/net/rds/iw.h
74567 index 04ce3b1..48119a6 100644
74568 --- a/net/rds/iw.h
74569 +++ b/net/rds/iw.h
74570 @@ -134,7 +134,7 @@ struct rds_iw_connection {
74571 /* sending acks */
74572 unsigned long i_ack_flags;
74573 #ifdef KERNEL_HAS_ATOMIC64
74574 - atomic64_t i_ack_next; /* next ACK to send */
74575 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
74576 #else
74577 spinlock_t i_ack_lock; /* protect i_ack_next */
74578 u64 i_ack_next; /* next ACK to send */
74579 diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c
74580 index 9556d28..f046d0e 100644
74581 --- a/net/rds/iw_cm.c
74582 +++ b/net/rds/iw_cm.c
74583 @@ -663,7 +663,7 @@ void rds_iw_conn_shutdown(struct rds_connection *conn)
74584 /* Clear the ACK state */
74585 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
74586 #ifdef KERNEL_HAS_ATOMIC64
74587 - atomic64_set(&ic->i_ack_next, 0);
74588 + atomic64_set_unchecked(&ic->i_ack_next, 0);
74589 #else
74590 ic->i_ack_next = 0;
74591 #endif
74592 diff --git a/net/rds/iw_recv.c b/net/rds/iw_recv.c
74593 index 5e57347..3916042 100644
74594 --- a/net/rds/iw_recv.c
74595 +++ b/net/rds/iw_recv.c
74596 @@ -427,7 +427,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
74597 static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq,
74598 int ack_required)
74599 {
74600 - atomic64_set(&ic->i_ack_next, seq);
74601 + atomic64_set_unchecked(&ic->i_ack_next, seq);
74602 if (ack_required) {
74603 smp_mb__before_clear_bit();
74604 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
74605 @@ -439,7 +439,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
74606 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
74607 smp_mb__after_clear_bit();
74608
74609 - return atomic64_read(&ic->i_ack_next);
74610 + return atomic64_read_unchecked(&ic->i_ack_next);
74611 }
74612 #endif
74613
74614 diff --git a/net/rds/tcp.c b/net/rds/tcp.c
74615 index edac9ef..16bcb98 100644
74616 --- a/net/rds/tcp.c
74617 +++ b/net/rds/tcp.c
74618 @@ -59,7 +59,7 @@ void rds_tcp_nonagle(struct socket *sock)
74619 int val = 1;
74620
74621 set_fs(KERNEL_DS);
74622 - sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __user *)&val,
74623 + sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __force_user *)&val,
74624 sizeof(val));
74625 set_fs(oldfs);
74626 }
74627 diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
74628 index 1b4fd68..2234175 100644
74629 --- a/net/rds/tcp_send.c
74630 +++ b/net/rds/tcp_send.c
74631 @@ -43,7 +43,7 @@ static void rds_tcp_cork(struct socket *sock, int val)
74632
74633 oldfs = get_fs();
74634 set_fs(KERNEL_DS);
74635 - sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __user *)&val,
74636 + sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __force_user *)&val,
74637 sizeof(val));
74638 set_fs(oldfs);
74639 }
74640 diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
74641 index 74c064c..fdec26f 100644
74642 --- a/net/rxrpc/af_rxrpc.c
74643 +++ b/net/rxrpc/af_rxrpc.c
74644 @@ -39,7 +39,7 @@ static const struct proto_ops rxrpc_rpc_ops;
74645 __be32 rxrpc_epoch;
74646
74647 /* current debugging ID */
74648 -atomic_t rxrpc_debug_id;
74649 +atomic_unchecked_t rxrpc_debug_id;
74650
74651 /* count of skbs currently in use */
74652 atomic_t rxrpc_n_skbs;
74653 diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c
74654 index f99cfce..cc529dd 100644
74655 --- a/net/rxrpc/ar-ack.c
74656 +++ b/net/rxrpc/ar-ack.c
74657 @@ -175,7 +175,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
74658
74659 _enter("{%d,%d,%d,%d},",
74660 call->acks_hard, call->acks_unacked,
74661 - atomic_read(&call->sequence),
74662 + atomic_read_unchecked(&call->sequence),
74663 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
74664
74665 stop = 0;
74666 @@ -199,7 +199,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
74667
74668 /* each Tx packet has a new serial number */
74669 sp->hdr.serial =
74670 - htonl(atomic_inc_return(&call->conn->serial));
74671 + htonl(atomic_inc_return_unchecked(&call->conn->serial));
74672
74673 hdr = (struct rxrpc_header *) txb->head;
74674 hdr->serial = sp->hdr.serial;
74675 @@ -403,7 +403,7 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, u32 hard)
74676 */
74677 static void rxrpc_clear_tx_window(struct rxrpc_call *call)
74678 {
74679 - rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
74680 + rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
74681 }
74682
74683 /*
74684 @@ -629,7 +629,7 @@ process_further:
74685
74686 latest = ntohl(sp->hdr.serial);
74687 hard = ntohl(ack.firstPacket);
74688 - tx = atomic_read(&call->sequence);
74689 + tx = atomic_read_unchecked(&call->sequence);
74690
74691 _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
74692 latest,
74693 @@ -1161,7 +1161,7 @@ void rxrpc_process_call(struct work_struct *work)
74694 goto maybe_reschedule;
74695
74696 send_ACK_with_skew:
74697 - ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
74698 + ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
74699 ntohl(ack.serial));
74700 send_ACK:
74701 mtu = call->conn->trans->peer->if_mtu;
74702 @@ -1173,7 +1173,7 @@ send_ACK:
74703 ackinfo.rxMTU = htonl(5692);
74704 ackinfo.jumbo_max = htonl(4);
74705
74706 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
74707 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
74708 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
74709 ntohl(hdr.serial),
74710 ntohs(ack.maxSkew),
74711 @@ -1191,7 +1191,7 @@ send_ACK:
74712 send_message:
74713 _debug("send message");
74714
74715 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
74716 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
74717 _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
74718 send_message_2:
74719
74720 diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c
74721 index bf656c2..48f9d27 100644
74722 --- a/net/rxrpc/ar-call.c
74723 +++ b/net/rxrpc/ar-call.c
74724 @@ -83,7 +83,7 @@ static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
74725 spin_lock_init(&call->lock);
74726 rwlock_init(&call->state_lock);
74727 atomic_set(&call->usage, 1);
74728 - call->debug_id = atomic_inc_return(&rxrpc_debug_id);
74729 + call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
74730 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
74731
74732 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
74733 diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c
74734 index 4106ca9..a338d7a 100644
74735 --- a/net/rxrpc/ar-connection.c
74736 +++ b/net/rxrpc/ar-connection.c
74737 @@ -206,7 +206,7 @@ static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
74738 rwlock_init(&conn->lock);
74739 spin_lock_init(&conn->state_lock);
74740 atomic_set(&conn->usage, 1);
74741 - conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
74742 + conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
74743 conn->avail_calls = RXRPC_MAXCALLS;
74744 conn->size_align = 4;
74745 conn->header_size = sizeof(struct rxrpc_header);
74746 diff --git a/net/rxrpc/ar-connevent.c b/net/rxrpc/ar-connevent.c
74747 index e7ed43a..6afa140 100644
74748 --- a/net/rxrpc/ar-connevent.c
74749 +++ b/net/rxrpc/ar-connevent.c
74750 @@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
74751
74752 len = iov[0].iov_len + iov[1].iov_len;
74753
74754 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
74755 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
74756 _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
74757
74758 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
74759 diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c
74760 index 1a2b0633..e8d1382 100644
74761 --- a/net/rxrpc/ar-input.c
74762 +++ b/net/rxrpc/ar-input.c
74763 @@ -340,9 +340,9 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
74764 /* track the latest serial number on this connection for ACK packet
74765 * information */
74766 serial = ntohl(sp->hdr.serial);
74767 - hi_serial = atomic_read(&call->conn->hi_serial);
74768 + hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
74769 while (serial > hi_serial)
74770 - hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
74771 + hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
74772 serial);
74773
74774 /* request ACK generation for any ACK or DATA packet that requests
74775 diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
74776 index 8e22bd3..f66d1c0 100644
74777 --- a/net/rxrpc/ar-internal.h
74778 +++ b/net/rxrpc/ar-internal.h
74779 @@ -272,8 +272,8 @@ struct rxrpc_connection {
74780 int error; /* error code for local abort */
74781 int debug_id; /* debug ID for printks */
74782 unsigned call_counter; /* call ID counter */
74783 - atomic_t serial; /* packet serial number counter */
74784 - atomic_t hi_serial; /* highest serial number received */
74785 + atomic_unchecked_t serial; /* packet serial number counter */
74786 + atomic_unchecked_t hi_serial; /* highest serial number received */
74787 u8 avail_calls; /* number of calls available */
74788 u8 size_align; /* data size alignment (for security) */
74789 u8 header_size; /* rxrpc + security header size */
74790 @@ -346,7 +346,7 @@ struct rxrpc_call {
74791 spinlock_t lock;
74792 rwlock_t state_lock; /* lock for state transition */
74793 atomic_t usage;
74794 - atomic_t sequence; /* Tx data packet sequence counter */
74795 + atomic_unchecked_t sequence; /* Tx data packet sequence counter */
74796 u32 abort_code; /* local/remote abort code */
74797 enum { /* current state of call */
74798 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
74799 @@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(struct rxrpc_call *call, u32 abort_code)
74800 */
74801 extern atomic_t rxrpc_n_skbs;
74802 extern __be32 rxrpc_epoch;
74803 -extern atomic_t rxrpc_debug_id;
74804 +extern atomic_unchecked_t rxrpc_debug_id;
74805 extern struct workqueue_struct *rxrpc_workqueue;
74806
74807 /*
74808 diff --git a/net/rxrpc/ar-local.c b/net/rxrpc/ar-local.c
74809 index 87f7135..74d3703 100644
74810 --- a/net/rxrpc/ar-local.c
74811 +++ b/net/rxrpc/ar-local.c
74812 @@ -45,7 +45,7 @@ struct rxrpc_local *rxrpc_alloc_local(struct sockaddr_rxrpc *srx)
74813 spin_lock_init(&local->lock);
74814 rwlock_init(&local->services_lock);
74815 atomic_set(&local->usage, 1);
74816 - local->debug_id = atomic_inc_return(&rxrpc_debug_id);
74817 + local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
74818 memcpy(&local->srx, srx, sizeof(*srx));
74819 }
74820
74821 diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c
74822 index 338d793..47391d0 100644
74823 --- a/net/rxrpc/ar-output.c
74824 +++ b/net/rxrpc/ar-output.c
74825 @@ -682,9 +682,9 @@ static int rxrpc_send_data(struct kiocb *iocb,
74826 sp->hdr.cid = call->cid;
74827 sp->hdr.callNumber = call->call_id;
74828 sp->hdr.seq =
74829 - htonl(atomic_inc_return(&call->sequence));
74830 + htonl(atomic_inc_return_unchecked(&call->sequence));
74831 sp->hdr.serial =
74832 - htonl(atomic_inc_return(&conn->serial));
74833 + htonl(atomic_inc_return_unchecked(&conn->serial));
74834 sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
74835 sp->hdr.userStatus = 0;
74836 sp->hdr.securityIndex = conn->security_ix;
74837 diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c
74838 index 2754f09..b20e38f 100644
74839 --- a/net/rxrpc/ar-peer.c
74840 +++ b/net/rxrpc/ar-peer.c
74841 @@ -72,7 +72,7 @@ static struct rxrpc_peer *rxrpc_alloc_peer(struct sockaddr_rxrpc *srx,
74842 INIT_LIST_HEAD(&peer->error_targets);
74843 spin_lock_init(&peer->lock);
74844 atomic_set(&peer->usage, 1);
74845 - peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
74846 + peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
74847 memcpy(&peer->srx, srx, sizeof(*srx));
74848
74849 rxrpc_assess_MTU_size(peer);
74850 diff --git a/net/rxrpc/ar-proc.c b/net/rxrpc/ar-proc.c
74851 index 38047f7..9f48511 100644
74852 --- a/net/rxrpc/ar-proc.c
74853 +++ b/net/rxrpc/ar-proc.c
74854 @@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
74855 atomic_read(&conn->usage),
74856 rxrpc_conn_states[conn->state],
74857 key_serial(conn->key),
74858 - atomic_read(&conn->serial),
74859 - atomic_read(&conn->hi_serial));
74860 + atomic_read_unchecked(&conn->serial),
74861 + atomic_read_unchecked(&conn->hi_serial));
74862
74863 return 0;
74864 }
74865 diff --git a/net/rxrpc/ar-transport.c b/net/rxrpc/ar-transport.c
74866 index 92df566..87ec1bf 100644
74867 --- a/net/rxrpc/ar-transport.c
74868 +++ b/net/rxrpc/ar-transport.c
74869 @@ -47,7 +47,7 @@ static struct rxrpc_transport *rxrpc_alloc_transport(struct rxrpc_local *local,
74870 spin_lock_init(&trans->client_lock);
74871 rwlock_init(&trans->conn_lock);
74872 atomic_set(&trans->usage, 1);
74873 - trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
74874 + trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
74875
74876 if (peer->srx.transport.family == AF_INET) {
74877 switch (peer->srx.transport_type) {
74878 diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
74879 index 7635107..4670276 100644
74880 --- a/net/rxrpc/rxkad.c
74881 +++ b/net/rxrpc/rxkad.c
74882 @@ -610,7 +610,7 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
74883
74884 len = iov[0].iov_len + iov[1].iov_len;
74885
74886 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
74887 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
74888 _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
74889
74890 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
74891 @@ -660,7 +660,7 @@ static int rxkad_send_response(struct rxrpc_connection *conn,
74892
74893 len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
74894
74895 - hdr->serial = htonl(atomic_inc_return(&conn->serial));
74896 + hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
74897 _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
74898
74899 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
74900 diff --git a/net/sctp/proc.c b/net/sctp/proc.c
74901 index 1e2eee8..ce3967e 100644
74902 --- a/net/sctp/proc.c
74903 +++ b/net/sctp/proc.c
74904 @@ -319,7 +319,8 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
74905 seq_printf(seq,
74906 "%8pK %8pK %-3d %-3d %-2d %-4d "
74907 "%4d %8d %8d %7d %5lu %-5d %5d ",
74908 - assoc, sk, sctp_sk(sk)->type, sk->sk_state,
74909 + assoc, sk,
74910 + sctp_sk(sk)->type, sk->sk_state,
74911 assoc->state, hash,
74912 assoc->assoc_id,
74913 assoc->sndbuf_used,
74914 diff --git a/net/sctp/socket.c b/net/sctp/socket.c
74915 index 54a7cd2..944edae 100644
74916 --- a/net/sctp/socket.c
74917 +++ b/net/sctp/socket.c
74918 @@ -4574,7 +4574,7 @@ static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
74919 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
74920 if (space_left < addrlen)
74921 return -ENOMEM;
74922 - if (copy_to_user(to, &temp, addrlen))
74923 + if (addrlen > sizeof(temp) || copy_to_user(to, &temp, addrlen))
74924 return -EFAULT;
74925 to += addrlen;
74926 cnt++;
74927 diff --git a/net/socket.c b/net/socket.c
74928 index 2dce67a..1e91168 100644
74929 --- a/net/socket.c
74930 +++ b/net/socket.c
74931 @@ -88,6 +88,7 @@
74932 #include <linux/nsproxy.h>
74933 #include <linux/magic.h>
74934 #include <linux/slab.h>
74935 +#include <linux/in.h>
74936
74937 #include <asm/uaccess.h>
74938 #include <asm/unistd.h>
74939 @@ -105,6 +106,8 @@
74940 #include <linux/sockios.h>
74941 #include <linux/atalk.h>
74942
74943 +#include <linux/grsock.h>
74944 +
74945 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
74946 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
74947 unsigned long nr_segs, loff_t pos);
74948 @@ -321,7 +324,7 @@ static struct dentry *sockfs_mount(struct file_system_type *fs_type,
74949 &sockfs_dentry_operations, SOCKFS_MAGIC);
74950 }
74951
74952 -static struct vfsmount *sock_mnt __read_mostly;
74953 +struct vfsmount *sock_mnt __read_mostly;
74954
74955 static struct file_system_type sock_fs_type = {
74956 .name = "sockfs",
74957 @@ -1187,6 +1190,8 @@ int __sock_create(struct net *net, int family, int type, int protocol,
74958 return -EAFNOSUPPORT;
74959 if (type < 0 || type >= SOCK_MAX)
74960 return -EINVAL;
74961 + if (protocol < 0)
74962 + return -EINVAL;
74963
74964 /* Compatibility.
74965
74966 @@ -1319,6 +1324,16 @@ SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol)
74967 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
74968 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
74969
74970 + if(!gr_search_socket(family, type, protocol)) {
74971 + retval = -EACCES;
74972 + goto out;
74973 + }
74974 +
74975 + if (gr_handle_sock_all(family, type, protocol)) {
74976 + retval = -EACCES;
74977 + goto out;
74978 + }
74979 +
74980 retval = sock_create(family, type, protocol, &sock);
74981 if (retval < 0)
74982 goto out;
74983 @@ -1431,6 +1446,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
74984 if (sock) {
74985 err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address);
74986 if (err >= 0) {
74987 + if (gr_handle_sock_server((struct sockaddr *)&address)) {
74988 + err = -EACCES;
74989 + goto error;
74990 + }
74991 + err = gr_search_bind(sock, (struct sockaddr_in *)&address);
74992 + if (err)
74993 + goto error;
74994 +
74995 err = security_socket_bind(sock,
74996 (struct sockaddr *)&address,
74997 addrlen);
74998 @@ -1439,6 +1462,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
74999 (struct sockaddr *)
75000 &address, addrlen);
75001 }
75002 +error:
75003 fput_light(sock->file, fput_needed);
75004 }
75005 return err;
75006 @@ -1462,10 +1486,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog)
75007 if ((unsigned)backlog > somaxconn)
75008 backlog = somaxconn;
75009
75010 + if (gr_handle_sock_server_other(sock->sk)) {
75011 + err = -EPERM;
75012 + goto error;
75013 + }
75014 +
75015 + err = gr_search_listen(sock);
75016 + if (err)
75017 + goto error;
75018 +
75019 err = security_socket_listen(sock, backlog);
75020 if (!err)
75021 err = sock->ops->listen(sock, backlog);
75022
75023 +error:
75024 fput_light(sock->file, fput_needed);
75025 }
75026 return err;
75027 @@ -1509,6 +1543,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
75028 newsock->type = sock->type;
75029 newsock->ops = sock->ops;
75030
75031 + if (gr_handle_sock_server_other(sock->sk)) {
75032 + err = -EPERM;
75033 + sock_release(newsock);
75034 + goto out_put;
75035 + }
75036 +
75037 + err = gr_search_accept(sock);
75038 + if (err) {
75039 + sock_release(newsock);
75040 + goto out_put;
75041 + }
75042 +
75043 /*
75044 * We don't need try_module_get here, as the listening socket (sock)
75045 * has the protocol module (sock->ops->owner) held.
75046 @@ -1547,6 +1593,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
75047 fd_install(newfd, newfile);
75048 err = newfd;
75049
75050 + gr_attach_curr_ip(newsock->sk);
75051 +
75052 out_put:
75053 fput_light(sock->file, fput_needed);
75054 out:
75055 @@ -1579,6 +1627,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
75056 int, addrlen)
75057 {
75058 struct socket *sock;
75059 + struct sockaddr *sck;
75060 struct sockaddr_storage address;
75061 int err, fput_needed;
75062
75063 @@ -1589,6 +1638,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
75064 if (err < 0)
75065 goto out_put;
75066
75067 + sck = (struct sockaddr *)&address;
75068 +
75069 + if (gr_handle_sock_client(sck)) {
75070 + err = -EACCES;
75071 + goto out_put;
75072 + }
75073 +
75074 + err = gr_search_connect(sock, (struct sockaddr_in *)sck);
75075 + if (err)
75076 + goto out_put;
75077 +
75078 err =
75079 security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
75080 if (err)
75081 @@ -1950,7 +2010,7 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
75082 * checking falls down on this.
75083 */
75084 if (copy_from_user(ctl_buf,
75085 - (void __user __force *)msg_sys->msg_control,
75086 + (void __force_user *)msg_sys->msg_control,
75087 ctl_len))
75088 goto out_freectl;
75089 msg_sys->msg_control = ctl_buf;
75090 @@ -2120,7 +2180,7 @@ static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
75091 * kernel msghdr to use the kernel address space)
75092 */
75093
75094 - uaddr = (__force void __user *)msg_sys->msg_name;
75095 + uaddr = (void __force_user *)msg_sys->msg_name;
75096 uaddr_len = COMPAT_NAMELEN(msg);
75097 if (MSG_CMSG_COMPAT & flags) {
75098 err = verify_compat_iovec(msg_sys, iov,
75099 @@ -2748,7 +2808,7 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
75100 }
75101
75102 ifr = compat_alloc_user_space(buf_size);
75103 - rxnfc = (void *)ifr + ALIGN(sizeof(struct ifreq), 8);
75104 + rxnfc = (void __user *)ifr + ALIGN(sizeof(struct ifreq), 8);
75105
75106 if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ))
75107 return -EFAULT;
75108 @@ -2772,12 +2832,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
75109 offsetof(struct ethtool_rxnfc, fs.ring_cookie));
75110
75111 if (copy_in_user(rxnfc, compat_rxnfc,
75112 - (void *)(&rxnfc->fs.m_ext + 1) -
75113 - (void *)rxnfc) ||
75114 + (void __user *)(&rxnfc->fs.m_ext + 1) -
75115 + (void __user *)rxnfc) ||
75116 copy_in_user(&rxnfc->fs.ring_cookie,
75117 &compat_rxnfc->fs.ring_cookie,
75118 - (void *)(&rxnfc->fs.location + 1) -
75119 - (void *)&rxnfc->fs.ring_cookie) ||
75120 + (void __user *)(&rxnfc->fs.location + 1) -
75121 + (void __user *)&rxnfc->fs.ring_cookie) ||
75122 copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt,
75123 sizeof(rxnfc->rule_cnt)))
75124 return -EFAULT;
75125 @@ -2789,12 +2849,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
75126
75127 if (convert_out) {
75128 if (copy_in_user(compat_rxnfc, rxnfc,
75129 - (const void *)(&rxnfc->fs.m_ext + 1) -
75130 - (const void *)rxnfc) ||
75131 + (const void __user *)(&rxnfc->fs.m_ext + 1) -
75132 + (const void __user *)rxnfc) ||
75133 copy_in_user(&compat_rxnfc->fs.ring_cookie,
75134 &rxnfc->fs.ring_cookie,
75135 - (const void *)(&rxnfc->fs.location + 1) -
75136 - (const void *)&rxnfc->fs.ring_cookie) ||
75137 + (const void __user *)(&rxnfc->fs.location + 1) -
75138 + (const void __user *)&rxnfc->fs.ring_cookie) ||
75139 copy_in_user(&compat_rxnfc->rule_cnt, &rxnfc->rule_cnt,
75140 sizeof(rxnfc->rule_cnt)))
75141 return -EFAULT;
75142 @@ -2864,7 +2924,7 @@ static int bond_ioctl(struct net *net, unsigned int cmd,
75143 old_fs = get_fs();
75144 set_fs(KERNEL_DS);
75145 err = dev_ioctl(net, cmd,
75146 - (struct ifreq __user __force *) &kifr);
75147 + (struct ifreq __force_user *) &kifr);
75148 set_fs(old_fs);
75149
75150 return err;
75151 @@ -2973,7 +3033,7 @@ static int compat_sioc_ifmap(struct net *net, unsigned int cmd,
75152
75153 old_fs = get_fs();
75154 set_fs(KERNEL_DS);
75155 - err = dev_ioctl(net, cmd, (void __user __force *)&ifr);
75156 + err = dev_ioctl(net, cmd, (void __force_user *)&ifr);
75157 set_fs(old_fs);
75158
75159 if (cmd == SIOCGIFMAP && !err) {
75160 @@ -3078,7 +3138,7 @@ static int routing_ioctl(struct net *net, struct socket *sock,
75161 ret |= __get_user(rtdev, &(ur4->rt_dev));
75162 if (rtdev) {
75163 ret |= copy_from_user(devname, compat_ptr(rtdev), 15);
75164 - r4.rt_dev = (char __user __force *)devname;
75165 + r4.rt_dev = (char __force_user *)devname;
75166 devname[15] = 0;
75167 } else
75168 r4.rt_dev = NULL;
75169 @@ -3318,8 +3378,8 @@ int kernel_getsockopt(struct socket *sock, int level, int optname,
75170 int __user *uoptlen;
75171 int err;
75172
75173 - uoptval = (char __user __force *) optval;
75174 - uoptlen = (int __user __force *) optlen;
75175 + uoptval = (char __force_user *) optval;
75176 + uoptlen = (int __force_user *) optlen;
75177
75178 set_fs(KERNEL_DS);
75179 if (level == SOL_SOCKET)
75180 @@ -3339,7 +3399,7 @@ int kernel_setsockopt(struct socket *sock, int level, int optname,
75181 char __user *uoptval;
75182 int err;
75183
75184 - uoptval = (char __user __force *) optval;
75185 + uoptval = (char __force_user *) optval;
75186
75187 set_fs(KERNEL_DS);
75188 if (level == SOL_SOCKET)
75189 diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
75190 index 00a1a2a..6a0138a 100644
75191 --- a/net/sunrpc/sched.c
75192 +++ b/net/sunrpc/sched.c
75193 @@ -238,9 +238,9 @@ static int rpc_wait_bit_killable(void *word)
75194 #ifdef RPC_DEBUG
75195 static void rpc_task_set_debuginfo(struct rpc_task *task)
75196 {
75197 - static atomic_t rpc_pid;
75198 + static atomic_unchecked_t rpc_pid;
75199
75200 - task->tk_pid = atomic_inc_return(&rpc_pid);
75201 + task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
75202 }
75203 #else
75204 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
75205 diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
75206 index 71bed1c..5dff36d 100644
75207 --- a/net/sunrpc/svcsock.c
75208 +++ b/net/sunrpc/svcsock.c
75209 @@ -396,7 +396,7 @@ static int svc_partial_recvfrom(struct svc_rqst *rqstp,
75210 int buflen, unsigned int base)
75211 {
75212 size_t save_iovlen;
75213 - void __user *save_iovbase;
75214 + void *save_iovbase;
75215 unsigned int i;
75216 int ret;
75217
75218 diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c
75219 index 09af4fa..77110a9 100644
75220 --- a/net/sunrpc/xprtrdma/svc_rdma.c
75221 +++ b/net/sunrpc/xprtrdma/svc_rdma.c
75222 @@ -61,15 +61,15 @@ unsigned int svcrdma_max_req_size = RPCRDMA_MAX_REQ_SIZE;
75223 static unsigned int min_max_inline = 4096;
75224 static unsigned int max_max_inline = 65536;
75225
75226 -atomic_t rdma_stat_recv;
75227 -atomic_t rdma_stat_read;
75228 -atomic_t rdma_stat_write;
75229 -atomic_t rdma_stat_sq_starve;
75230 -atomic_t rdma_stat_rq_starve;
75231 -atomic_t rdma_stat_rq_poll;
75232 -atomic_t rdma_stat_rq_prod;
75233 -atomic_t rdma_stat_sq_poll;
75234 -atomic_t rdma_stat_sq_prod;
75235 +atomic_unchecked_t rdma_stat_recv;
75236 +atomic_unchecked_t rdma_stat_read;
75237 +atomic_unchecked_t rdma_stat_write;
75238 +atomic_unchecked_t rdma_stat_sq_starve;
75239 +atomic_unchecked_t rdma_stat_rq_starve;
75240 +atomic_unchecked_t rdma_stat_rq_poll;
75241 +atomic_unchecked_t rdma_stat_rq_prod;
75242 +atomic_unchecked_t rdma_stat_sq_poll;
75243 +atomic_unchecked_t rdma_stat_sq_prod;
75244
75245 /* Temporary NFS request map and context caches */
75246 struct kmem_cache *svc_rdma_map_cachep;
75247 @@ -109,7 +109,7 @@ static int read_reset_stat(ctl_table *table, int write,
75248 len -= *ppos;
75249 if (len > *lenp)
75250 len = *lenp;
75251 - if (len && copy_to_user(buffer, str_buf, len))
75252 + if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
75253 return -EFAULT;
75254 *lenp = len;
75255 *ppos += len;
75256 @@ -150,63 +150,63 @@ static ctl_table svcrdma_parm_table[] = {
75257 {
75258 .procname = "rdma_stat_read",
75259 .data = &rdma_stat_read,
75260 - .maxlen = sizeof(atomic_t),
75261 + .maxlen = sizeof(atomic_unchecked_t),
75262 .mode = 0644,
75263 .proc_handler = read_reset_stat,
75264 },
75265 {
75266 .procname = "rdma_stat_recv",
75267 .data = &rdma_stat_recv,
75268 - .maxlen = sizeof(atomic_t),
75269 + .maxlen = sizeof(atomic_unchecked_t),
75270 .mode = 0644,
75271 .proc_handler = read_reset_stat,
75272 },
75273 {
75274 .procname = "rdma_stat_write",
75275 .data = &rdma_stat_write,
75276 - .maxlen = sizeof(atomic_t),
75277 + .maxlen = sizeof(atomic_unchecked_t),
75278 .mode = 0644,
75279 .proc_handler = read_reset_stat,
75280 },
75281 {
75282 .procname = "rdma_stat_sq_starve",
75283 .data = &rdma_stat_sq_starve,
75284 - .maxlen = sizeof(atomic_t),
75285 + .maxlen = sizeof(atomic_unchecked_t),
75286 .mode = 0644,
75287 .proc_handler = read_reset_stat,
75288 },
75289 {
75290 .procname = "rdma_stat_rq_starve",
75291 .data = &rdma_stat_rq_starve,
75292 - .maxlen = sizeof(atomic_t),
75293 + .maxlen = sizeof(atomic_unchecked_t),
75294 .mode = 0644,
75295 .proc_handler = read_reset_stat,
75296 },
75297 {
75298 .procname = "rdma_stat_rq_poll",
75299 .data = &rdma_stat_rq_poll,
75300 - .maxlen = sizeof(atomic_t),
75301 + .maxlen = sizeof(atomic_unchecked_t),
75302 .mode = 0644,
75303 .proc_handler = read_reset_stat,
75304 },
75305 {
75306 .procname = "rdma_stat_rq_prod",
75307 .data = &rdma_stat_rq_prod,
75308 - .maxlen = sizeof(atomic_t),
75309 + .maxlen = sizeof(atomic_unchecked_t),
75310 .mode = 0644,
75311 .proc_handler = read_reset_stat,
75312 },
75313 {
75314 .procname = "rdma_stat_sq_poll",
75315 .data = &rdma_stat_sq_poll,
75316 - .maxlen = sizeof(atomic_t),
75317 + .maxlen = sizeof(atomic_unchecked_t),
75318 .mode = 0644,
75319 .proc_handler = read_reset_stat,
75320 },
75321 {
75322 .procname = "rdma_stat_sq_prod",
75323 .data = &rdma_stat_sq_prod,
75324 - .maxlen = sizeof(atomic_t),
75325 + .maxlen = sizeof(atomic_unchecked_t),
75326 .mode = 0644,
75327 .proc_handler = read_reset_stat,
75328 },
75329 diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
75330 index df67211..c354b13 100644
75331 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
75332 +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
75333 @@ -499,7 +499,7 @@ next_sge:
75334 svc_rdma_put_context(ctxt, 0);
75335 goto out;
75336 }
75337 - atomic_inc(&rdma_stat_read);
75338 + atomic_inc_unchecked(&rdma_stat_read);
75339
75340 if (read_wr.num_sge < chl_map->ch[ch_no].count) {
75341 chl_map->ch[ch_no].count -= read_wr.num_sge;
75342 @@ -609,7 +609,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
75343 dto_q);
75344 list_del_init(&ctxt->dto_q);
75345 } else {
75346 - atomic_inc(&rdma_stat_rq_starve);
75347 + atomic_inc_unchecked(&rdma_stat_rq_starve);
75348 clear_bit(XPT_DATA, &xprt->xpt_flags);
75349 ctxt = NULL;
75350 }
75351 @@ -629,7 +629,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
75352 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
75353 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
75354 BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
75355 - atomic_inc(&rdma_stat_recv);
75356 + atomic_inc_unchecked(&rdma_stat_recv);
75357
75358 /* Build up the XDR from the receive buffers. */
75359 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
75360 diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
75361 index 249a835..fb2794b 100644
75362 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
75363 +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
75364 @@ -362,7 +362,7 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
75365 write_wr.wr.rdma.remote_addr = to;
75366
75367 /* Post It */
75368 - atomic_inc(&rdma_stat_write);
75369 + atomic_inc_unchecked(&rdma_stat_write);
75370 if (svc_rdma_send(xprt, &write_wr))
75371 goto err;
75372 return 0;
75373 diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
75374 index ba1296d..0fec1a5 100644
75375 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
75376 +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
75377 @@ -300,7 +300,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
75378 return;
75379
75380 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
75381 - atomic_inc(&rdma_stat_rq_poll);
75382 + atomic_inc_unchecked(&rdma_stat_rq_poll);
75383
75384 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
75385 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
75386 @@ -322,7 +322,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
75387 }
75388
75389 if (ctxt)
75390 - atomic_inc(&rdma_stat_rq_prod);
75391 + atomic_inc_unchecked(&rdma_stat_rq_prod);
75392
75393 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
75394 /*
75395 @@ -394,7 +394,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
75396 return;
75397
75398 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
75399 - atomic_inc(&rdma_stat_sq_poll);
75400 + atomic_inc_unchecked(&rdma_stat_sq_poll);
75401 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
75402 if (wc.status != IB_WC_SUCCESS)
75403 /* Close the transport */
75404 @@ -412,7 +412,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
75405 }
75406
75407 if (ctxt)
75408 - atomic_inc(&rdma_stat_sq_prod);
75409 + atomic_inc_unchecked(&rdma_stat_sq_prod);
75410 }
75411
75412 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
75413 @@ -1274,7 +1274,7 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
75414 spin_lock_bh(&xprt->sc_lock);
75415 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
75416 spin_unlock_bh(&xprt->sc_lock);
75417 - atomic_inc(&rdma_stat_sq_starve);
75418 + atomic_inc_unchecked(&rdma_stat_sq_starve);
75419
75420 /* See if we can opportunistically reap SQ WR to make room */
75421 sq_cq_reap(xprt);
75422 diff --git a/net/sysctl_net.c b/net/sysctl_net.c
75423 index e758139..d29ea47 100644
75424 --- a/net/sysctl_net.c
75425 +++ b/net/sysctl_net.c
75426 @@ -47,7 +47,7 @@ static int net_ctl_permissions(struct ctl_table_root *root,
75427 struct ctl_table *table)
75428 {
75429 /* Allow network administrator to have same access as root. */
75430 - if (capable(CAP_NET_ADMIN)) {
75431 + if (capable_nolog(CAP_NET_ADMIN)) {
75432 int mode = (table->mode >> 6) & 7;
75433 return (mode << 6) | (mode << 3) | mode;
75434 }
75435 diff --git a/net/tipc/link.c b/net/tipc/link.c
75436 index ae98a72..7bb6056 100644
75437 --- a/net/tipc/link.c
75438 +++ b/net/tipc/link.c
75439 @@ -1203,7 +1203,7 @@ static int link_send_sections_long(struct tipc_port *sender,
75440 struct tipc_msg fragm_hdr;
75441 struct sk_buff *buf, *buf_chain, *prev;
75442 u32 fragm_crs, fragm_rest, hsz, sect_rest;
75443 - const unchar *sect_crs;
75444 + const unchar __user *sect_crs;
75445 int curr_sect;
75446 u32 fragm_no;
75447
75448 @@ -1247,7 +1247,7 @@ again:
75449
75450 if (!sect_rest) {
75451 sect_rest = msg_sect[++curr_sect].iov_len;
75452 - sect_crs = (const unchar *)msg_sect[curr_sect].iov_base;
75453 + sect_crs = (const unchar __user *)msg_sect[curr_sect].iov_base;
75454 }
75455
75456 if (sect_rest < fragm_rest)
75457 @@ -1266,7 +1266,7 @@ error:
75458 }
75459 } else
75460 skb_copy_to_linear_data_offset(buf, fragm_crs,
75461 - sect_crs, sz);
75462 + (const void __force_kernel *)sect_crs, sz);
75463 sect_crs += sz;
75464 sect_rest -= sz;
75465 fragm_crs += sz;
75466 diff --git a/net/tipc/msg.c b/net/tipc/msg.c
75467 index 83d5096..dcba497 100644
75468 --- a/net/tipc/msg.c
75469 +++ b/net/tipc/msg.c
75470 @@ -99,7 +99,7 @@ int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
75471 msg_sect[cnt].iov_len);
75472 else
75473 skb_copy_to_linear_data_offset(*buf, pos,
75474 - msg_sect[cnt].iov_base,
75475 + (const void __force_kernel *)msg_sect[cnt].iov_base,
75476 msg_sect[cnt].iov_len);
75477 pos += msg_sect[cnt].iov_len;
75478 }
75479 diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
75480 index 1983717..4d6102c 100644
75481 --- a/net/tipc/subscr.c
75482 +++ b/net/tipc/subscr.c
75483 @@ -101,7 +101,7 @@ static void subscr_send_event(struct subscription *sub,
75484 {
75485 struct iovec msg_sect;
75486
75487 - msg_sect.iov_base = (void *)&sub->evt;
75488 + msg_sect.iov_base = (void __force_user *)&sub->evt;
75489 msg_sect.iov_len = sizeof(struct tipc_event);
75490
75491 sub->evt.event = htohl(event, sub->swap);
75492 diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
75493 index d99678a..3514a21 100644
75494 --- a/net/unix/af_unix.c
75495 +++ b/net/unix/af_unix.c
75496 @@ -767,6 +767,12 @@ static struct sock *unix_find_other(struct net *net,
75497 err = -ECONNREFUSED;
75498 if (!S_ISSOCK(inode->i_mode))
75499 goto put_fail;
75500 +
75501 + if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
75502 + err = -EACCES;
75503 + goto put_fail;
75504 + }
75505 +
75506 u = unix_find_socket_byinode(inode);
75507 if (!u)
75508 goto put_fail;
75509 @@ -787,6 +793,13 @@ static struct sock *unix_find_other(struct net *net,
75510 if (u) {
75511 struct dentry *dentry;
75512 dentry = unix_sk(u)->dentry;
75513 +
75514 + if (!gr_handle_chroot_unix(pid_vnr(u->sk_peer_pid))) {
75515 + err = -EPERM;
75516 + sock_put(u);
75517 + goto fail;
75518 + }
75519 +
75520 if (dentry)
75521 touch_atime(unix_sk(u)->mnt, dentry);
75522 } else
75523 @@ -869,11 +882,18 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
75524 err = security_path_mknod(&path, dentry, mode, 0);
75525 if (err)
75526 goto out_mknod_drop_write;
75527 + if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
75528 + err = -EACCES;
75529 + goto out_mknod_drop_write;
75530 + }
75531 err = vfs_mknod(path.dentry->d_inode, dentry, mode, 0);
75532 out_mknod_drop_write:
75533 mnt_drop_write(path.mnt);
75534 if (err)
75535 goto out_mknod_dput;
75536 +
75537 + gr_handle_create(dentry, path.mnt);
75538 +
75539 mutex_unlock(&path.dentry->d_inode->i_mutex);
75540 dput(path.dentry);
75541 path.dentry = dentry;
75542 diff --git a/net/wireless/core.h b/net/wireless/core.h
75543 index b9ec306..b4a563e 100644
75544 --- a/net/wireless/core.h
75545 +++ b/net/wireless/core.h
75546 @@ -27,7 +27,7 @@ struct cfg80211_registered_device {
75547 struct mutex mtx;
75548
75549 /* rfkill support */
75550 - struct rfkill_ops rfkill_ops;
75551 + rfkill_ops_no_const rfkill_ops;
75552 struct rfkill *rfkill;
75553 struct work_struct rfkill_sync;
75554
75555 diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
75556 index 0af7f54..c916d2f 100644
75557 --- a/net/wireless/wext-core.c
75558 +++ b/net/wireless/wext-core.c
75559 @@ -747,8 +747,7 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
75560 */
75561
75562 /* Support for very large requests */
75563 - if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
75564 - (user_length > descr->max_tokens)) {
75565 + if (user_length > descr->max_tokens) {
75566 /* Allow userspace to GET more than max so
75567 * we can support any size GET requests.
75568 * There is still a limit : -ENOMEM.
75569 @@ -785,22 +784,6 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
75570 }
75571 }
75572
75573 - if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
75574 - /*
75575 - * If this is a GET, but not NOMAX, it means that the extra
75576 - * data is not bounded by userspace, but by max_tokens. Thus
75577 - * set the length to max_tokens. This matches the extra data
75578 - * allocation.
75579 - * The driver should fill it with the number of tokens it
75580 - * provided, and it may check iwp->length rather than having
75581 - * knowledge of max_tokens. If the driver doesn't change the
75582 - * iwp->length, this ioctl just copies back max_token tokens
75583 - * filled with zeroes. Hopefully the driver isn't claiming
75584 - * them to be valid data.
75585 - */
75586 - iwp->length = descr->max_tokens;
75587 - }
75588 -
75589 err = handler(dev, info, (union iwreq_data *) iwp, extra);
75590
75591 iwp->length += essid_compat;
75592 diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
75593 index 9049a5c..cfa6f5c 100644
75594 --- a/net/xfrm/xfrm_policy.c
75595 +++ b/net/xfrm/xfrm_policy.c
75596 @@ -299,7 +299,7 @@ static void xfrm_policy_kill(struct xfrm_policy *policy)
75597 {
75598 policy->walk.dead = 1;
75599
75600 - atomic_inc(&policy->genid);
75601 + atomic_inc_unchecked(&policy->genid);
75602
75603 if (del_timer(&policy->timer))
75604 xfrm_pol_put(policy);
75605 @@ -583,7 +583,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
75606 hlist_add_head(&policy->bydst, chain);
75607 xfrm_pol_hold(policy);
75608 net->xfrm.policy_count[dir]++;
75609 - atomic_inc(&flow_cache_genid);
75610 + atomic_inc_unchecked(&flow_cache_genid);
75611 if (delpol)
75612 __xfrm_policy_unlink(delpol, dir);
75613 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
75614 @@ -1530,7 +1530,7 @@ free_dst:
75615 goto out;
75616 }
75617
75618 -static int inline
75619 +static inline int
75620 xfrm_dst_alloc_copy(void **target, const void *src, int size)
75621 {
75622 if (!*target) {
75623 @@ -1542,7 +1542,7 @@ xfrm_dst_alloc_copy(void **target, const void *src, int size)
75624 return 0;
75625 }
75626
75627 -static int inline
75628 +static inline int
75629 xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
75630 {
75631 #ifdef CONFIG_XFRM_SUB_POLICY
75632 @@ -1554,7 +1554,7 @@ xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
75633 #endif
75634 }
75635
75636 -static int inline
75637 +static inline int
75638 xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl)
75639 {
75640 #ifdef CONFIG_XFRM_SUB_POLICY
75641 @@ -1648,7 +1648,7 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
75642
75643 xdst->num_pols = num_pols;
75644 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
75645 - xdst->policy_genid = atomic_read(&pols[0]->genid);
75646 + xdst->policy_genid = atomic_read_unchecked(&pols[0]->genid);
75647
75648 return xdst;
75649 }
75650 @@ -2345,7 +2345,7 @@ static int xfrm_bundle_ok(struct xfrm_dst *first)
75651 if (xdst->xfrm_genid != dst->xfrm->genid)
75652 return 0;
75653 if (xdst->num_pols > 0 &&
75654 - xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
75655 + xdst->policy_genid != atomic_read_unchecked(&xdst->pols[0]->genid))
75656 return 0;
75657
75658 mtu = dst_mtu(dst->child);
75659 @@ -2882,7 +2882,7 @@ static int xfrm_policy_migrate(struct xfrm_policy *pol,
75660 sizeof(pol->xfrm_vec[i].saddr));
75661 pol->xfrm_vec[i].encap_family = mp->new_family;
75662 /* flush bundles */
75663 - atomic_inc(&pol->genid);
75664 + atomic_inc_unchecked(&pol->genid);
75665 }
75666 }
75667
75668 diff --git a/scripts/Makefile.build b/scripts/Makefile.build
75669 index d2b366c..51ff91e 100644
75670 --- a/scripts/Makefile.build
75671 +++ b/scripts/Makefile.build
75672 @@ -109,7 +109,7 @@ endif
75673 endif
75674
75675 # Do not include host rules unless needed
75676 -ifneq ($(hostprogs-y)$(hostprogs-m),)
75677 +ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m),)
75678 include scripts/Makefile.host
75679 endif
75680
75681 diff --git a/scripts/Makefile.clean b/scripts/Makefile.clean
75682 index 686cb0d..9d653bf 100644
75683 --- a/scripts/Makefile.clean
75684 +++ b/scripts/Makefile.clean
75685 @@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subdir-ymn))
75686 __clean-files := $(extra-y) $(always) \
75687 $(targets) $(clean-files) \
75688 $(host-progs) \
75689 - $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
75690 + $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
75691 + $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
75692
75693 __clean-files := $(filter-out $(no-clean-files), $(__clean-files))
75694
75695 diff --git a/scripts/Makefile.host b/scripts/Makefile.host
75696 index 1ac414f..a1c1451 100644
75697 --- a/scripts/Makefile.host
75698 +++ b/scripts/Makefile.host
75699 @@ -31,6 +31,7 @@
75700 # Note: Shared libraries consisting of C++ files are not supported
75701
75702 __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
75703 +__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
75704
75705 # C code
75706 # Executables compiled from a single .c file
75707 @@ -54,6 +55,7 @@ host-cxxobjs := $(sort $(foreach m,$(host-cxxmulti),$($(m)-cxxobjs)))
75708 # Shared libaries (only .c supported)
75709 # Shared libraries (.so) - all .so files referenced in "xxx-objs"
75710 host-cshlib := $(sort $(filter %.so, $(host-cobjs)))
75711 +host-cshlib += $(sort $(filter %.so, $(__hostlibs)))
75712 # Remove .so files from "xxx-objs"
75713 host-cobjs := $(filter-out %.so,$(host-cobjs))
75714
75715 diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c
75716 index cb1f50c..cef2a7c 100644
75717 --- a/scripts/basic/fixdep.c
75718 +++ b/scripts/basic/fixdep.c
75719 @@ -161,7 +161,7 @@ static unsigned int strhash(const char *str, unsigned int sz)
75720 /*
75721 * Lookup a value in the configuration string.
75722 */
75723 -static int is_defined_config(const char *name, int len, unsigned int hash)
75724 +static int is_defined_config(const char *name, unsigned int len, unsigned int hash)
75725 {
75726 struct item *aux;
75727
75728 @@ -211,10 +211,10 @@ static void clear_config(void)
75729 /*
75730 * Record the use of a CONFIG_* word.
75731 */
75732 -static void use_config(const char *m, int slen)
75733 +static void use_config(const char *m, unsigned int slen)
75734 {
75735 unsigned int hash = strhash(m, slen);
75736 - int c, i;
75737 + unsigned int c, i;
75738
75739 if (is_defined_config(m, slen, hash))
75740 return;
75741 @@ -235,9 +235,9 @@ static void use_config(const char *m, int slen)
75742
75743 static void parse_config_file(const char *map, size_t len)
75744 {
75745 - const int *end = (const int *) (map + len);
75746 + const unsigned int *end = (const unsigned int *) (map + len);
75747 /* start at +1, so that p can never be < map */
75748 - const int *m = (const int *) map + 1;
75749 + const unsigned int *m = (const unsigned int *) map + 1;
75750 const char *p, *q;
75751
75752 for (; m < end; m++) {
75753 @@ -406,7 +406,7 @@ static void print_deps(void)
75754 static void traps(void)
75755 {
75756 static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
75757 - int *p = (int *)test;
75758 + unsigned int *p = (unsigned int *)test;
75759
75760 if (*p != INT_CONF) {
75761 fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
75762 diff --git a/scripts/gcc-plugin.sh b/scripts/gcc-plugin.sh
75763 new file mode 100644
75764 index 0000000..8729101
75765 --- /dev/null
75766 +++ b/scripts/gcc-plugin.sh
75767 @@ -0,0 +1,2 @@
75768 +#!/bin/sh
75769 +echo -e "#include \"gcc-plugin.h\"\n#include \"tree.h\"\n#include \"tm.h\"\n#include \"rtl.h\"" | $1 -x c -shared - -o /dev/null -I`$2 -print-file-name=plugin`/include >/dev/null 2>&1 && echo "y"
75770 diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
75771 index f936d1f..a66d95f 100644
75772 --- a/scripts/mod/file2alias.c
75773 +++ b/scripts/mod/file2alias.c
75774 @@ -72,7 +72,7 @@ static void device_id_check(const char *modname, const char *device_id,
75775 unsigned long size, unsigned long id_size,
75776 void *symval)
75777 {
75778 - int i;
75779 + unsigned int i;
75780
75781 if (size % id_size || size < id_size) {
75782 if (cross_build != 0)
75783 @@ -102,7 +102,7 @@ static void device_id_check(const char *modname, const char *device_id,
75784 /* USB is special because the bcdDevice can be matched against a numeric range */
75785 /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */
75786 static void do_usb_entry(struct usb_device_id *id,
75787 - unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
75788 + unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
75789 unsigned char range_lo, unsigned char range_hi,
75790 unsigned char max, struct module *mod)
75791 {
75792 @@ -203,7 +203,7 @@ static void do_usb_entry_multi(struct usb_device_id *id, struct module *mod)
75793 {
75794 unsigned int devlo, devhi;
75795 unsigned char chi, clo, max;
75796 - int ndigits;
75797 + unsigned int ndigits;
75798
75799 id->match_flags = TO_NATIVE(id->match_flags);
75800 id->idVendor = TO_NATIVE(id->idVendor);
75801 @@ -437,7 +437,7 @@ static void do_pnp_device_entry(void *symval, unsigned long size,
75802 for (i = 0; i < count; i++) {
75803 const char *id = (char *)devs[i].id;
75804 char acpi_id[sizeof(devs[0].id)];
75805 - int j;
75806 + unsigned int j;
75807
75808 buf_printf(&mod->dev_table_buf,
75809 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
75810 @@ -467,7 +467,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
75811
75812 for (j = 0; j < PNP_MAX_DEVICES; j++) {
75813 const char *id = (char *)card->devs[j].id;
75814 - int i2, j2;
75815 + unsigned int i2, j2;
75816 int dup = 0;
75817
75818 if (!id[0])
75819 @@ -493,7 +493,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
75820 /* add an individual alias for every device entry */
75821 if (!dup) {
75822 char acpi_id[sizeof(card->devs[0].id)];
75823 - int k;
75824 + unsigned int k;
75825
75826 buf_printf(&mod->dev_table_buf,
75827 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
75828 @@ -807,7 +807,7 @@ static void dmi_ascii_filter(char *d, const char *s)
75829 static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
75830 char *alias)
75831 {
75832 - int i, j;
75833 + unsigned int i, j;
75834
75835 sprintf(alias, "dmi*");
75836
75837 diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
75838 index 2bd594e..d43245e 100644
75839 --- a/scripts/mod/modpost.c
75840 +++ b/scripts/mod/modpost.c
75841 @@ -919,6 +919,7 @@ enum mismatch {
75842 ANY_INIT_TO_ANY_EXIT,
75843 ANY_EXIT_TO_ANY_INIT,
75844 EXPORT_TO_INIT_EXIT,
75845 + DATA_TO_TEXT
75846 };
75847
75848 struct sectioncheck {
75849 @@ -1027,6 +1028,12 @@ const struct sectioncheck sectioncheck[] = {
75850 .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
75851 .mismatch = EXPORT_TO_INIT_EXIT,
75852 .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
75853 +},
75854 +/* Do not reference code from writable data */
75855 +{
75856 + .fromsec = { DATA_SECTIONS, NULL },
75857 + .tosec = { TEXT_SECTIONS, NULL },
75858 + .mismatch = DATA_TO_TEXT
75859 }
75860 };
75861
75862 @@ -1149,10 +1156,10 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr,
75863 continue;
75864 if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
75865 continue;
75866 - if (sym->st_value == addr)
75867 - return sym;
75868 /* Find a symbol nearby - addr are maybe negative */
75869 d = sym->st_value - addr;
75870 + if (d == 0)
75871 + return sym;
75872 if (d < 0)
75873 d = addr - sym->st_value;
75874 if (d < distance) {
75875 @@ -1431,6 +1438,14 @@ static void report_sec_mismatch(const char *modname,
75876 tosym, prl_to, prl_to, tosym);
75877 free(prl_to);
75878 break;
75879 + case DATA_TO_TEXT:
75880 +/*
75881 + fprintf(stderr,
75882 + "The variable %s references\n"
75883 + "the %s %s%s%s\n",
75884 + fromsym, to, sec2annotation(tosec), tosym, to_p);
75885 +*/
75886 + break;
75887 }
75888 fprintf(stderr, "\n");
75889 }
75890 @@ -1656,7 +1671,7 @@ static void section_rel(const char *modname, struct elf_info *elf,
75891 static void check_sec_ref(struct module *mod, const char *modname,
75892 struct elf_info *elf)
75893 {
75894 - int i;
75895 + unsigned int i;
75896 Elf_Shdr *sechdrs = elf->sechdrs;
75897
75898 /* Walk through all sections */
75899 @@ -1754,7 +1769,7 @@ void __attribute__((format(printf, 2, 3))) buf_printf(struct buffer *buf,
75900 va_end(ap);
75901 }
75902
75903 -void buf_write(struct buffer *buf, const char *s, int len)
75904 +void buf_write(struct buffer *buf, const char *s, unsigned int len)
75905 {
75906 if (buf->size - buf->pos < len) {
75907 buf->size += len + SZ;
75908 @@ -1972,7 +1987,7 @@ static void write_if_changed(struct buffer *b, const char *fname)
75909 if (fstat(fileno(file), &st) < 0)
75910 goto close_write;
75911
75912 - if (st.st_size != b->pos)
75913 + if (st.st_size != (off_t)b->pos)
75914 goto close_write;
75915
75916 tmp = NOFAIL(malloc(b->pos));
75917 diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h
75918 index 2031119..b5433af 100644
75919 --- a/scripts/mod/modpost.h
75920 +++ b/scripts/mod/modpost.h
75921 @@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *expr);
75922
75923 struct buffer {
75924 char *p;
75925 - int pos;
75926 - int size;
75927 + unsigned int pos;
75928 + unsigned int size;
75929 };
75930
75931 void __attribute__((format(printf, 2, 3)))
75932 buf_printf(struct buffer *buf, const char *fmt, ...);
75933
75934 void
75935 -buf_write(struct buffer *buf, const char *s, int len);
75936 +buf_write(struct buffer *buf, const char *s, unsigned int len);
75937
75938 struct module {
75939 struct module *next;
75940 diff --git a/scripts/mod/sumversion.c b/scripts/mod/sumversion.c
75941 index 9dfcd6d..099068e 100644
75942 --- a/scripts/mod/sumversion.c
75943 +++ b/scripts/mod/sumversion.c
75944 @@ -470,7 +470,7 @@ static void write_version(const char *filename, const char *sum,
75945 goto out;
75946 }
75947
75948 - if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
75949 + if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
75950 warn("writing sum in %s failed: %s\n",
75951 filename, strerror(errno));
75952 goto out;
75953 diff --git a/scripts/pnmtologo.c b/scripts/pnmtologo.c
75954 index 5c11312..72742b5 100644
75955 --- a/scripts/pnmtologo.c
75956 +++ b/scripts/pnmtologo.c
75957 @@ -237,14 +237,14 @@ static void write_header(void)
75958 fprintf(out, " * Linux logo %s\n", logoname);
75959 fputs(" */\n\n", out);
75960 fputs("#include <linux/linux_logo.h>\n\n", out);
75961 - fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
75962 + fprintf(out, "static unsigned char %s_data[] = {\n",
75963 logoname);
75964 }
75965
75966 static void write_footer(void)
75967 {
75968 fputs("\n};\n\n", out);
75969 - fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
75970 + fprintf(out, "const struct linux_logo %s = {\n", logoname);
75971 fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
75972 fprintf(out, "\t.width\t\t= %d,\n", logo_width);
75973 fprintf(out, "\t.height\t\t= %d,\n", logo_height);
75974 @@ -374,7 +374,7 @@ static void write_logo_clut224(void)
75975 fputs("\n};\n\n", out);
75976
75977 /* write logo clut */
75978 - fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
75979 + fprintf(out, "static unsigned char %s_clut[] = {\n",
75980 logoname);
75981 write_hex_cnt = 0;
75982 for (i = 0; i < logo_clutsize; i++) {
75983 diff --git a/security/Kconfig b/security/Kconfig
75984 index 51bd5a0..eeabc9f 100644
75985 --- a/security/Kconfig
75986 +++ b/security/Kconfig
75987 @@ -4,6 +4,627 @@
75988
75989 menu "Security options"
75990
75991 +source grsecurity/Kconfig
75992 +
75993 +menu "PaX"
75994 +
75995 + config ARCH_TRACK_EXEC_LIMIT
75996 + bool
75997 +
75998 + config PAX_KERNEXEC_PLUGIN
75999 + bool
76000 +
76001 + config PAX_PER_CPU_PGD
76002 + bool
76003 +
76004 + config TASK_SIZE_MAX_SHIFT
76005 + int
76006 + depends on X86_64
76007 + default 47 if !PAX_PER_CPU_PGD
76008 + default 42 if PAX_PER_CPU_PGD
76009 +
76010 + config PAX_ENABLE_PAE
76011 + bool
76012 + default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
76013 +
76014 +config PAX
76015 + bool "Enable various PaX features"
76016 + depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
76017 + help
76018 + This allows you to enable various PaX features. PaX adds
76019 + intrusion prevention mechanisms to the kernel that reduce
76020 + the risks posed by exploitable memory corruption bugs.
76021 +
76022 +menu "PaX Control"
76023 + depends on PAX
76024 +
76025 +config PAX_SOFTMODE
76026 + bool 'Support soft mode'
76027 + help
76028 + Enabling this option will allow you to run PaX in soft mode, that
76029 + is, PaX features will not be enforced by default, only on executables
76030 + marked explicitly. You must also enable PT_PAX_FLAGS or XATTR_PAX_FLAGS
76031 + support as they are the only way to mark executables for soft mode use.
76032 +
76033 + Soft mode can be activated by using the "pax_softmode=1" kernel command
76034 + line option on boot. Furthermore you can control various PaX features
76035 + at runtime via the entries in /proc/sys/kernel/pax.
76036 +
76037 +config PAX_EI_PAX
76038 + bool 'Use legacy ELF header marking'
76039 + help
76040 + Enabling this option will allow you to control PaX features on
76041 + a per executable basis via the 'chpax' utility available at
76042 + http://pax.grsecurity.net/. The control flags will be read from
76043 + an otherwise reserved part of the ELF header. This marking has
76044 + numerous drawbacks (no support for soft-mode, toolchain does not
76045 + know about the non-standard use of the ELF header) therefore it
76046 + has been deprecated in favour of PT_PAX_FLAGS and XATTR_PAX_FLAGS
76047 + support.
76048 +
76049 + If you have applications not marked by the PT_PAX_FLAGS ELF program
76050 + header and you cannot use XATTR_PAX_FLAGS then you MUST enable this
76051 + option otherwise they will not get any protection.
76052 +
76053 + Note that if you enable PT_PAX_FLAGS or XATTR_PAX_FLAGS marking
76054 + support as well, they will override the legacy EI_PAX marks.
76055 +
76056 +config PAX_PT_PAX_FLAGS
76057 + bool 'Use ELF program header marking'
76058 + help
76059 + Enabling this option will allow you to control PaX features on
76060 + a per executable basis via the 'paxctl' utility available at
76061 + http://pax.grsecurity.net/. The control flags will be read from
76062 + a PaX specific ELF program header (PT_PAX_FLAGS). This marking
76063 + has the benefits of supporting both soft mode and being fully
76064 + integrated into the toolchain (the binutils patch is available
76065 + from http://pax.grsecurity.net).
76066 +
76067 + If you have applications not marked by the PT_PAX_FLAGS ELF program
76068 + header then you MUST enable either XATTR_PAX_FLAGS or EI_PAX marking
76069 + support otherwise they will not get any protection.
76070 +
76071 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
76072 + must make sure that the marks are the same if a binary has both marks.
76073 +
76074 + Note that if you enable the legacy EI_PAX marking support as well,
76075 + the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
76076 +
76077 +config PAX_XATTR_PAX_FLAGS
76078 + bool 'Use filesystem extended attributes marking'
76079 + depends on EXPERT
76080 + select CIFS_XATTR if CIFS
76081 + select EXT2_FS_XATTR if EXT2_FS
76082 + select EXT3_FS_XATTR if EXT3_FS
76083 + select EXT4_FS_XATTR if EXT4_FS
76084 + select JFFS2_FS_XATTR if JFFS2_FS
76085 + select REISERFS_FS_XATTR if REISERFS_FS
76086 + select SQUASHFS_XATTR if SQUASHFS
76087 + select TMPFS_XATTR if TMPFS
76088 + select UBIFS_FS_XATTR if UBIFS_FS
76089 + help
76090 + Enabling this option will allow you to control PaX features on
76091 + a per executable basis via the 'setfattr' utility. The control
76092 + flags will be read from the user.pax.flags extended attribute of
76093 + the file. This marking has the benefit of supporting binary-only
76094 + applications that self-check themselves (e.g., skype) and would
76095 + not tolerate chpax/paxctl changes. The main drawback is that
76096 + extended attributes are not supported by some filesystems (e.g.,
76097 + isofs, udf, vfat) so copying files through such filesystems will
76098 + lose the extended attributes and these PaX markings.
76099 +
76100 + If you have applications not marked by the PT_PAX_FLAGS ELF program
76101 + header then you MUST enable either XATTR_PAX_FLAGS or EI_PAX marking
76102 + support otherwise they will not get any protection.
76103 +
76104 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
76105 + must make sure that the marks are the same if a binary has both marks.
76106 +
76107 + Note that if you enable the legacy EI_PAX marking support as well,
76108 + the EI_PAX marks will be overridden by the XATTR_PAX_FLAGS marks.
76109 +
76110 +choice
76111 + prompt 'MAC system integration'
76112 + default PAX_HAVE_ACL_FLAGS
76113 + help
76114 + Mandatory Access Control systems have the option of controlling
76115 + PaX flags on a per executable basis, choose the method supported
76116 + by your particular system.
76117 +
76118 + - "none": if your MAC system does not interact with PaX,
76119 + - "direct": if your MAC system defines pax_set_initial_flags() itself,
76120 + - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
76121 +
76122 + NOTE: this option is for developers/integrators only.
76123 +
76124 + config PAX_NO_ACL_FLAGS
76125 + bool 'none'
76126 +
76127 + config PAX_HAVE_ACL_FLAGS
76128 + bool 'direct'
76129 +
76130 + config PAX_HOOK_ACL_FLAGS
76131 + bool 'hook'
76132 +endchoice
76133 +
76134 +endmenu
76135 +
76136 +menu "Non-executable pages"
76137 + depends on PAX
76138 +
76139 +config PAX_NOEXEC
76140 + bool "Enforce non-executable pages"
76141 + depends on ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86
76142 + help
76143 + By design some architectures do not allow for protecting memory
76144 + pages against execution or even if they do, Linux does not make
76145 + use of this feature. In practice this means that if a page is
76146 + readable (such as the stack or heap) it is also executable.
76147 +
76148 + There is a well known exploit technique that makes use of this
76149 + fact and a common programming mistake where an attacker can
76150 + introduce code of his choice somewhere in the attacked program's
76151 + memory (typically the stack or the heap) and then execute it.
76152 +
76153 + If the attacked program was running with different (typically
76154 + higher) privileges than that of the attacker, then he can elevate
76155 + his own privilege level (e.g. get a root shell, write to files for
76156 + which he does not have write access to, etc).
76157 +
76158 + Enabling this option will let you choose from various features
76159 + that prevent the injection and execution of 'foreign' code in
76160 + a program.
76161 +
76162 + This will also break programs that rely on the old behaviour and
76163 + expect that dynamically allocated memory via the malloc() family
76164 + of functions is executable (which it is not). Notable examples
76165 + are the XFree86 4.x server, the java runtime and wine.
76166 +
76167 +config PAX_PAGEEXEC
76168 + bool "Paging based non-executable pages"
76169 + depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
76170 + select S390_SWITCH_AMODE if S390
76171 + select S390_EXEC_PROTECT if S390
76172 + select ARCH_TRACK_EXEC_LIMIT if X86_32
76173 + help
76174 + This implementation is based on the paging feature of the CPU.
76175 + On i386 without hardware non-executable bit support there is a
76176 + variable but usually low performance impact, however on Intel's
76177 + P4 core based CPUs it is very high so you should not enable this
76178 + for kernels meant to be used on such CPUs.
76179 +
76180 + On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
76181 + with hardware non-executable bit support there is no performance
76182 + impact, on ppc the impact is negligible.
76183 +
76184 + Note that several architectures require various emulations due to
76185 + badly designed userland ABIs, this will cause a performance impact
76186 + but will disappear as soon as userland is fixed. For example, ppc
76187 + userland MUST have been built with secure-plt by a recent toolchain.
76188 +
76189 +config PAX_SEGMEXEC
76190 + bool "Segmentation based non-executable pages"
76191 + depends on PAX_NOEXEC && X86_32
76192 + help
76193 + This implementation is based on the segmentation feature of the
76194 + CPU and has a very small performance impact, however applications
76195 + will be limited to a 1.5 GB address space instead of the normal
76196 + 3 GB.
76197 +
76198 +config PAX_EMUTRAMP
76199 + bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
76200 + default y if PARISC
76201 + help
76202 + There are some programs and libraries that for one reason or
76203 + another attempt to execute special small code snippets from
76204 + non-executable memory pages. Most notable examples are the
76205 + signal handler return code generated by the kernel itself and
76206 + the GCC trampolines.
76207 +
76208 + If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
76209 + such programs will no longer work under your kernel.
76210 +
76211 + As a remedy you can say Y here and use the 'chpax' or 'paxctl'
76212 + utilities to enable trampoline emulation for the affected programs
76213 + yet still have the protection provided by the non-executable pages.
76214 +
76215 + On parisc you MUST enable this option and EMUSIGRT as well, otherwise
76216 + your system will not even boot.
76217 +
76218 + Alternatively you can say N here and use the 'chpax' or 'paxctl'
76219 + utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
76220 + for the affected files.
76221 +
76222 + NOTE: enabling this feature *may* open up a loophole in the
76223 + protection provided by non-executable pages that an attacker
76224 + could abuse. Therefore the best solution is to not have any
76225 + files on your system that would require this option. This can
76226 + be achieved by not using libc5 (which relies on the kernel
76227 + signal handler return code) and not using or rewriting programs
76228 + that make use of the nested function implementation of GCC.
76229 + Skilled users can just fix GCC itself so that it implements
76230 + nested function calls in a way that does not interfere with PaX.
76231 +
76232 +config PAX_EMUSIGRT
76233 + bool "Automatically emulate sigreturn trampolines"
76234 + depends on PAX_EMUTRAMP && PARISC
76235 + default y
76236 + help
76237 + Enabling this option will have the kernel automatically detect
76238 + and emulate signal return trampolines executing on the stack
76239 + that would otherwise lead to task termination.
76240 +
76241 + This solution is intended as a temporary one for users with
76242 + legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
76243 + Modula-3 runtime, etc) or executables linked to such, basically
76244 + everything that does not specify its own SA_RESTORER function in
76245 + normal executable memory like glibc 2.1+ does.
76246 +
76247 + On parisc you MUST enable this option, otherwise your system will
76248 + not even boot.
76249 +
76250 + NOTE: this feature cannot be disabled on a per executable basis
76251 + and since it *does* open up a loophole in the protection provided
76252 + by non-executable pages, the best solution is to not have any
76253 + files on your system that would require this option.
76254 +
76255 +config PAX_MPROTECT
76256 + bool "Restrict mprotect()"
76257 + depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
76258 + help
76259 + Enabling this option will prevent programs from
76260 + - changing the executable status of memory pages that were
76261 + not originally created as executable,
76262 + - making read-only executable pages writable again,
76263 + - creating executable pages from anonymous memory,
76264 + - making read-only-after-relocations (RELRO) data pages writable again.
76265 +
76266 + You should say Y here to complete the protection provided by
76267 + the enforcement of non-executable pages.
76268 +
76269 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
76270 + this feature on a per file basis.
76271 +
76272 +config PAX_MPROTECT_COMPAT
76273 + bool "Use legacy/compat protection demoting (read help)"
76274 + depends on PAX_MPROTECT
76275 + default n
76276 + help
76277 + The current implementation of PAX_MPROTECT denies RWX allocations/mprotects
76278 + by sending the proper error code to the application. For some broken
76279 + userland, this can cause problems with Python or other applications. The
76280 + current implementation however allows for applications like clamav to
76281 + detect if JIT compilation/execution is allowed and to fall back gracefully
76282 + to an interpreter-based mode if it does not. While we encourage everyone
76283 + to use the current implementation as-is and push upstream to fix broken
76284 + userland (note that the RWX logging option can assist with this), in some
76285 + environments this may not be possible. Having to disable MPROTECT
76286 + completely on certain binaries reduces the security benefit of PaX,
76287 + so this option is provided for those environments to revert to the old
76288 + behavior.
76289 +
76290 +config PAX_ELFRELOCS
76291 + bool "Allow ELF text relocations (read help)"
76292 + depends on PAX_MPROTECT
76293 + default n
76294 + help
76295 + Non-executable pages and mprotect() restrictions are effective
76296 + in preventing the introduction of new executable code into an
76297 + attacked task's address space. There remain only two venues
76298 + for this kind of attack: if the attacker can execute already
76299 + existing code in the attacked task then he can either have it
76300 + create and mmap() a file containing his code or have it mmap()
76301 + an already existing ELF library that does not have position
76302 + independent code in it and use mprotect() on it to make it
76303 + writable and copy his code there. While protecting against
76304 + the former approach is beyond PaX, the latter can be prevented
76305 + by having only PIC ELF libraries on one's system (which do not
76306 + need to relocate their code). If you are sure this is your case,
76307 + as is the case with all modern Linux distributions, then leave
76308 + this option disabled. You should say 'n' here.
76309 +
76310 +config PAX_ETEXECRELOCS
76311 + bool "Allow ELF ET_EXEC text relocations"
76312 + depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
76313 + select PAX_ELFRELOCS
76314 + default y
76315 + help
76316 + On some architectures there are incorrectly created applications
76317 + that require text relocations and would not work without enabling
76318 + this option. If you are an alpha, ia64 or parisc user, you should
76319 + enable this option and disable it once you have made sure that
76320 + none of your applications need it.
76321 +
76322 +config PAX_EMUPLT
76323 + bool "Automatically emulate ELF PLT"
76324 + depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
76325 + default y
76326 + help
76327 + Enabling this option will have the kernel automatically detect
76328 + and emulate the Procedure Linkage Table entries in ELF files.
76329 + On some architectures such entries are in writable memory, and
76330 + become non-executable leading to task termination. Therefore
76331 + it is mandatory that you enable this option on alpha, parisc,
76332 + sparc and sparc64, otherwise your system would not even boot.
76333 +
76334 + NOTE: this feature *does* open up a loophole in the protection
76335 + provided by the non-executable pages, therefore the proper
76336 + solution is to modify the toolchain to produce a PLT that does
76337 + not need to be writable.
76338 +
76339 +config PAX_DLRESOLVE
76340 + bool 'Emulate old glibc resolver stub'
76341 + depends on PAX_EMUPLT && SPARC
76342 + default n
76343 + help
76344 + This option is needed if userland has an old glibc (before 2.4)
76345 + that puts a 'save' instruction into the runtime generated resolver
76346 + stub that needs special emulation.
76347 +
76348 +config PAX_KERNEXEC
76349 + bool "Enforce non-executable kernel pages"
76350 + depends on (PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN
76351 + select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
76352 + select PAX_KERNEXEC_PLUGIN if X86_64
76353 + help
76354 + This is the kernel land equivalent of PAGEEXEC and MPROTECT,
76355 + that is, enabling this option will make it harder to inject
76356 + and execute 'foreign' code in kernel memory itself.
76357 +
76358 + Note that on x86_64 kernels there is a known regression when
76359 + this feature and KVM/VMX are both enabled in the host kernel.
76360 +
76361 +choice
76362 + prompt "Return Address Instrumentation Method"
76363 + default PAX_KERNEXEC_PLUGIN_METHOD_BTS
76364 + depends on PAX_KERNEXEC_PLUGIN
76365 + help
76366 + Select the method used to instrument function pointer dereferences.
76367 + Note that binary modules cannot be instrumented by this approach.
76368 +
76369 + config PAX_KERNEXEC_PLUGIN_METHOD_BTS
76370 + bool "bts"
76371 + help
76372 + This method is compatible with binary only modules but has
76373 + a higher runtime overhead.
76374 +
76375 + config PAX_KERNEXEC_PLUGIN_METHOD_OR
76376 + bool "or"
76377 + depends on !PARAVIRT
76378 + help
76379 + This method is incompatible with binary only modules but has
76380 + a lower runtime overhead.
76381 +endchoice
76382 +
76383 +config PAX_KERNEXEC_PLUGIN_METHOD
76384 + string
76385 + default "bts" if PAX_KERNEXEC_PLUGIN_METHOD_BTS
76386 + default "or" if PAX_KERNEXEC_PLUGIN_METHOD_OR
76387 + default ""
76388 +
76389 +config PAX_KERNEXEC_MODULE_TEXT
76390 + int "Minimum amount of memory reserved for module code"
76391 + default "4"
76392 + depends on PAX_KERNEXEC && X86_32 && MODULES
76393 + help
76394 + Due to implementation details the kernel must reserve a fixed
76395 + amount of memory for module code at compile time that cannot be
76396 + changed at runtime. Here you can specify the minimum amount
76397 + in MB that will be reserved. Due to the same implementation
76398 + details this size will always be rounded up to the next 2/4 MB
76399 + boundary (depends on PAE) so the actually available memory for
76400 + module code will usually be more than this minimum.
76401 +
76402 + The default 4 MB should be enough for most users but if you have
76403 + an excessive number of modules (e.g., most distribution configs
76404 + compile many drivers as modules) or use huge modules such as
76405 + nvidia's kernel driver, you will need to adjust this amount.
76406 + A good rule of thumb is to look at your currently loaded kernel
76407 + modules and add up their sizes.
76408 +
76409 +endmenu
76410 +
76411 +menu "Address Space Layout Randomization"
76412 + depends on PAX
76413 +
76414 +config PAX_ASLR
76415 + bool "Address Space Layout Randomization"
76416 + help
76417 + Many if not most exploit techniques rely on the knowledge of
76418 + certain addresses in the attacked program. The following options
76419 + will allow the kernel to apply a certain amount of randomization
76420 + to specific parts of the program thereby forcing an attacker to
76421 + guess them in most cases. Any failed guess will most likely crash
76422 + the attacked program which allows the kernel to detect such attempts
76423 + and react on them. PaX itself provides no reaction mechanisms,
76424 + instead it is strongly encouraged that you make use of Nergal's
76425 + segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
76426 + (http://www.grsecurity.net/) built-in crash detection features or
76427 + develop one yourself.
76428 +
76429 + By saying Y here you can choose to randomize the following areas:
76430 + - top of the task's kernel stack
76431 + - top of the task's userland stack
76432 + - base address for mmap() requests that do not specify one
76433 + (this includes all libraries)
76434 + - base address of the main executable
76435 +
76436 + It is strongly recommended to say Y here as address space layout
76437 + randomization has negligible impact on performance yet it provides
76438 + a very effective protection.
76439 +
76440 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
76441 + this feature on a per file basis.
76442 +
76443 +config PAX_RANDKSTACK
76444 + bool "Randomize kernel stack base"
76445 + depends on X86_TSC && X86
76446 + help
76447 + By saying Y here the kernel will randomize every task's kernel
76448 + stack on every system call. This will not only force an attacker
76449 + to guess it but also prevent him from making use of possible
76450 + leaked information about it.
76451 +
76452 + Since the kernel stack is a rather scarce resource, randomization
76453 + may cause unexpected stack overflows, therefore you should very
76454 + carefully test your system. Note that once enabled in the kernel
76455 + configuration, this feature cannot be disabled on a per file basis.
76456 +
76457 +config PAX_RANDUSTACK
76458 + bool "Randomize user stack base"
76459 + depends on PAX_ASLR
76460 + help
76461 + By saying Y here the kernel will randomize every task's userland
76462 + stack. The randomization is done in two steps where the second
76463 + one may apply a big amount of shift to the top of the stack and
76464 + cause problems for programs that want to use lots of memory (more
76465 + than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
76466 + For this reason the second step can be controlled by 'chpax' or
76467 + 'paxctl' on a per file basis.
76468 +
76469 +config PAX_RANDMMAP
76470 + bool "Randomize mmap() base"
76471 + depends on PAX_ASLR
76472 + help
76473 + By saying Y here the kernel will use a randomized base address for
76474 + mmap() requests that do not specify one themselves. As a result
76475 + all dynamically loaded libraries will appear at random addresses
76476 + and therefore be harder to exploit by a technique where an attacker
76477 + attempts to execute library code for his purposes (e.g. spawn a
76478 + shell from an exploited program that is running at an elevated
76479 + privilege level).
76480 +
76481 + Furthermore, if a program is relinked as a dynamic ELF file, its
76482 + base address will be randomized as well, completing the full
76483 + randomization of the address space layout. Attacking such programs
76484 + becomes a guess game. You can find an example of doing this at
76485 + http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
76486 + http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
76487 +
76488 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
76489 + feature on a per file basis.
76490 +
76491 +endmenu
76492 +
76493 +menu "Miscellaneous hardening features"
76494 +
76495 +config PAX_MEMORY_SANITIZE
76496 + bool "Sanitize all freed memory"
76497 + depends on !HIBERNATION
76498 + help
76499 + By saying Y here the kernel will erase memory pages as soon as they
76500 + are freed. This in turn reduces the lifetime of data stored in the
76501 + pages, making it less likely that sensitive information such as
76502 + passwords, cryptographic secrets, etc stay in memory for too long.
76503 +
76504 + This is especially useful for programs whose runtime is short, long
76505 + lived processes and the kernel itself benefit from this as long as
76506 + they operate on whole memory pages and ensure timely freeing of pages
76507 + that may hold sensitive information.
76508 +
76509 + The tradeoff is performance impact, on a single CPU system kernel
76510 + compilation sees a 3% slowdown, other systems and workloads may vary
76511 + and you are advised to test this feature on your expected workload
76512 + before deploying it.
76513 +
76514 + Note that this feature does not protect data stored in live pages,
76515 + e.g., process memory swapped to disk may stay there for a long time.
76516 +
76517 +config PAX_MEMORY_STACKLEAK
76518 + bool "Sanitize kernel stack"
76519 + depends on X86
76520 + help
76521 + By saying Y here the kernel will erase the kernel stack before it
76522 + returns from a system call. This in turn reduces the information
76523 + that a kernel stack leak bug can reveal.
76524 +
76525 + Note that such a bug can still leak information that was put on
76526 + the stack by the current system call (the one eventually triggering
76527 + the bug) but traces of earlier system calls on the kernel stack
76528 + cannot leak anymore.
76529 +
76530 + The tradeoff is performance impact: on a single CPU system kernel
76531 + compilation sees a 1% slowdown, other systems and workloads may vary
76532 + and you are advised to test this feature on your expected workload
76533 + before deploying it.
76534 +
76535 + Note: full support for this feature requires gcc with plugin support
76536 + so make sure your compiler is at least gcc 4.5.0. Using older gcc
76537 + versions means that functions with large enough stack frames may
76538 + leave uninitialized memory behind that may be exposed to a later
76539 + syscall leaking the stack.
76540 +
76541 +config PAX_MEMORY_UDEREF
76542 + bool "Prevent invalid userland pointer dereference"
76543 + depends on X86 && !UML_X86 && !XEN
76544 + select PAX_PER_CPU_PGD if X86_64
76545 + help
76546 + By saying Y here the kernel will be prevented from dereferencing
76547 + userland pointers in contexts where the kernel expects only kernel
76548 + pointers. This is both a useful runtime debugging feature and a
76549 + security measure that prevents exploiting a class of kernel bugs.
76550 +
76551 + The tradeoff is that some virtualization solutions may experience
76552 + a huge slowdown and therefore you should not enable this feature
76553 + for kernels meant to run in such environments. Whether a given VM
76554 + solution is affected or not is best determined by simply trying it
76555 + out, the performance impact will be obvious right on boot as this
76556 + mechanism engages from very early on. A good rule of thumb is that
76557 + VMs running on CPUs without hardware virtualization support (i.e.,
76558 + the majority of IA-32 CPUs) will likely experience the slowdown.
76559 +
76560 +config PAX_REFCOUNT
76561 + bool "Prevent various kernel object reference counter overflows"
76562 + depends on GRKERNSEC && (X86 || SPARC64)
76563 + help
76564 + By saying Y here the kernel will detect and prevent overflowing
76565 + various (but not all) kinds of object reference counters. Such
76566 + overflows can normally occur due to bugs only and are often, if
76567 + not always, exploitable.
76568 +
76569 + The tradeoff is that data structures protected by an overflowed
76570 + refcount will never be freed and therefore will leak memory. Note
76571 + that this leak also happens even without this protection but in
76572 + that case the overflow can eventually trigger the freeing of the
76573 + data structure while it is still being used elsewhere, resulting
76574 + in the exploitable situation that this feature prevents.
76575 +
76576 + Since this has a negligible performance impact, you should enable
76577 + this feature.
76578 +
76579 +config PAX_USERCOPY
76580 + bool "Harden heap object copies between kernel and userland"
76581 + depends on X86 || PPC || SPARC || ARM
76582 + depends on GRKERNSEC && (SLAB || SLUB || SLOB)
76583 + help
76584 + By saying Y here the kernel will enforce the size of heap objects
76585 + when they are copied in either direction between the kernel and
76586 + userland, even if only a part of the heap object is copied.
76587 +
76588 + Specifically, this checking prevents information leaking from the
76589 + kernel heap during kernel to userland copies (if the kernel heap
76590 + object is otherwise fully initialized) and prevents kernel heap
76591 + overflows during userland to kernel copies.
76592 +
76593 + Note that the current implementation provides the strictest bounds
76594 + checks for the SLUB allocator.
76595 +
76596 + Enabling this option also enables per-slab cache protection against
76597 + data in a given cache being copied into/out of via userland
76598 + accessors. Though the whitelist of regions will be reduced over
76599 + time, it notably protects important data structures like task structs.
76600 +
76601 + If frame pointers are enabled on x86, this option will also restrict
76602 + copies into and out of the kernel stack to local variables within a
76603 + single frame.
76604 +
76605 + Since this has a negligible performance impact, you should enable
76606 + this feature.
76607 +
76608 +endmenu
76609 +
76610 +endmenu
76611 +
76612 config KEYS
76613 bool "Enable access key retention support"
76614 help
76615 @@ -169,7 +790,7 @@ config INTEL_TXT
76616 config LSM_MMAP_MIN_ADDR
76617 int "Low address space for LSM to protect from user allocation"
76618 depends on SECURITY && SECURITY_SELINUX
76619 - default 32768 if ARM
76620 + default 32768 if ALPHA || ARM || PARISC || SPARC32
76621 default 65536
76622 help
76623 This is the portion of low virtual memory which should be protected
76624 diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
76625 index 3783202..1852837 100644
76626 --- a/security/apparmor/lsm.c
76627 +++ b/security/apparmor/lsm.c
76628 @@ -621,7 +621,7 @@ static int apparmor_task_setrlimit(struct task_struct *task,
76629 return error;
76630 }
76631
76632 -static struct security_operations apparmor_ops = {
76633 +static struct security_operations apparmor_ops __read_only = {
76634 .name = "apparmor",
76635
76636 .ptrace_access_check = apparmor_ptrace_access_check,
76637 diff --git a/security/commoncap.c b/security/commoncap.c
76638 index ee4f848..a320c64 100644
76639 --- a/security/commoncap.c
76640 +++ b/security/commoncap.c
76641 @@ -28,6 +28,7 @@
76642 #include <linux/prctl.h>
76643 #include <linux/securebits.h>
76644 #include <linux/user_namespace.h>
76645 +#include <net/sock.h>
76646
76647 /*
76648 * If a non-root user executes a setuid-root binary in
76649 @@ -58,7 +59,7 @@ int cap_netlink_send(struct sock *sk, struct sk_buff *skb)
76650
76651 int cap_netlink_recv(struct sk_buff *skb, int cap)
76652 {
76653 - if (!cap_raised(current_cap(), cap))
76654 + if (!cap_raised(current_cap(), cap) || !gr_is_capable(cap))
76655 return -EPERM;
76656 return 0;
76657 }
76658 @@ -579,6 +580,9 @@ int cap_bprm_secureexec(struct linux_binprm *bprm)
76659 {
76660 const struct cred *cred = current_cred();
76661
76662 + if (gr_acl_enable_at_secure())
76663 + return 1;
76664 +
76665 if (cred->uid != 0) {
76666 if (bprm->cap_effective)
76667 return 1;
76668 diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
76669 index 3ccf7ac..d73ad64 100644
76670 --- a/security/integrity/ima/ima.h
76671 +++ b/security/integrity/ima/ima.h
76672 @@ -86,8 +86,8 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
76673 extern spinlock_t ima_queue_lock;
76674
76675 struct ima_h_table {
76676 - atomic_long_t len; /* number of stored measurements in the list */
76677 - atomic_long_t violations;
76678 + atomic_long_unchecked_t len; /* number of stored measurements in the list */
76679 + atomic_long_unchecked_t violations;
76680 struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
76681 };
76682 extern struct ima_h_table ima_htable;
76683 diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
76684 index 88a2788..581ab92 100644
76685 --- a/security/integrity/ima/ima_api.c
76686 +++ b/security/integrity/ima/ima_api.c
76687 @@ -75,7 +75,7 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
76688 int result;
76689
76690 /* can overflow, only indicator */
76691 - atomic_long_inc(&ima_htable.violations);
76692 + atomic_long_inc_unchecked(&ima_htable.violations);
76693
76694 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
76695 if (!entry) {
76696 diff --git a/security/integrity/ima/ima_audit.c b/security/integrity/ima/ima_audit.c
76697 index c5c5a72..2ad942f 100644
76698 --- a/security/integrity/ima/ima_audit.c
76699 +++ b/security/integrity/ima/ima_audit.c
76700 @@ -56,9 +56,11 @@ void integrity_audit_msg(int audit_msgno, struct inode *inode,
76701 audit_log_format(ab, " name=");
76702 audit_log_untrustedstring(ab, fname);
76703 }
76704 - if (inode)
76705 - audit_log_format(ab, " dev=%s ino=%lu",
76706 - inode->i_sb->s_id, inode->i_ino);
76707 + if (inode) {
76708 + audit_log_format(ab, " dev=");
76709 + audit_log_untrustedstring(ab, inode->i_sb->s_id);
76710 + audit_log_format(ab, " ino=%lu", inode->i_ino);
76711 + }
76712 audit_log_format(ab, " res=%d", !result ? 0 : 1);
76713 audit_log_end(ab);
76714 }
76715 diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
76716 index e1aa2b4..52027bf 100644
76717 --- a/security/integrity/ima/ima_fs.c
76718 +++ b/security/integrity/ima/ima_fs.c
76719 @@ -28,12 +28,12 @@
76720 static int valid_policy = 1;
76721 #define TMPBUFLEN 12
76722 static ssize_t ima_show_htable_value(char __user *buf, size_t count,
76723 - loff_t *ppos, atomic_long_t *val)
76724 + loff_t *ppos, atomic_long_unchecked_t *val)
76725 {
76726 char tmpbuf[TMPBUFLEN];
76727 ssize_t len;
76728
76729 - len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
76730 + len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
76731 return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
76732 }
76733
76734 diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
76735 index 55a6271..ad829c3 100644
76736 --- a/security/integrity/ima/ima_queue.c
76737 +++ b/security/integrity/ima/ima_queue.c
76738 @@ -81,7 +81,7 @@ static int ima_add_digest_entry(struct ima_template_entry *entry)
76739 INIT_LIST_HEAD(&qe->later);
76740 list_add_tail_rcu(&qe->later, &ima_measurements);
76741
76742 - atomic_long_inc(&ima_htable.len);
76743 + atomic_long_inc_unchecked(&ima_htable.len);
76744 key = ima_hash_key(entry->digest);
76745 hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
76746 return 0;
76747 diff --git a/security/keys/compat.c b/security/keys/compat.c
76748 index 4c48e13..7abdac9 100644
76749 --- a/security/keys/compat.c
76750 +++ b/security/keys/compat.c
76751 @@ -44,7 +44,7 @@ long compat_keyctl_instantiate_key_iov(
76752 if (ret == 0)
76753 goto no_payload_free;
76754
76755 - ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
76756 + ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
76757
76758 if (iov != iovstack)
76759 kfree(iov);
76760 diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
76761 index 0b3f5d7..892c8a6 100644
76762 --- a/security/keys/keyctl.c
76763 +++ b/security/keys/keyctl.c
76764 @@ -921,7 +921,7 @@ static int keyctl_change_reqkey_auth(struct key *key)
76765 /*
76766 * Copy the iovec data from userspace
76767 */
76768 -static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
76769 +static long copy_from_user_iovec(void *buffer, const struct iovec __user *iov,
76770 unsigned ioc)
76771 {
76772 for (; ioc > 0; ioc--) {
76773 @@ -943,7 +943,7 @@ static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
76774 * If successful, 0 will be returned.
76775 */
76776 long keyctl_instantiate_key_common(key_serial_t id,
76777 - const struct iovec *payload_iov,
76778 + const struct iovec __user *payload_iov,
76779 unsigned ioc,
76780 size_t plen,
76781 key_serial_t ringid)
76782 @@ -1038,7 +1038,7 @@ long keyctl_instantiate_key(key_serial_t id,
76783 [0].iov_len = plen
76784 };
76785
76786 - return keyctl_instantiate_key_common(id, iov, 1, plen, ringid);
76787 + return keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, 1, plen, ringid);
76788 }
76789
76790 return keyctl_instantiate_key_common(id, NULL, 0, 0, ringid);
76791 @@ -1071,7 +1071,7 @@ long keyctl_instantiate_key_iov(key_serial_t id,
76792 if (ret == 0)
76793 goto no_payload_free;
76794
76795 - ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
76796 + ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
76797
76798 if (iov != iovstack)
76799 kfree(iov);
76800 diff --git a/security/keys/keyring.c b/security/keys/keyring.c
76801 index 37a7f3b..86dc19f 100644
76802 --- a/security/keys/keyring.c
76803 +++ b/security/keys/keyring.c
76804 @@ -214,15 +214,15 @@ static long keyring_read(const struct key *keyring,
76805 ret = -EFAULT;
76806
76807 for (loop = 0; loop < klist->nkeys; loop++) {
76808 + key_serial_t serial;
76809 key = klist->keys[loop];
76810 + serial = key->serial;
76811
76812 tmp = sizeof(key_serial_t);
76813 if (tmp > buflen)
76814 tmp = buflen;
76815
76816 - if (copy_to_user(buffer,
76817 - &key->serial,
76818 - tmp) != 0)
76819 + if (copy_to_user(buffer, &serial, tmp))
76820 goto error;
76821
76822 buflen -= tmp;
76823 diff --git a/security/lsm_audit.c b/security/lsm_audit.c
76824 index 893af8a..ba9237c 100644
76825 --- a/security/lsm_audit.c
76826 +++ b/security/lsm_audit.c
76827 @@ -234,10 +234,11 @@ static void dump_common_audit_data(struct audit_buffer *ab,
76828 audit_log_d_path(ab, "path=", &a->u.path);
76829
76830 inode = a->u.path.dentry->d_inode;
76831 - if (inode)
76832 - audit_log_format(ab, " dev=%s ino=%lu",
76833 - inode->i_sb->s_id,
76834 - inode->i_ino);
76835 + if (inode) {
76836 + audit_log_format(ab, " dev=");
76837 + audit_log_untrustedstring(ab, inode->i_sb->s_id);
76838 + audit_log_format(ab, " ino=%lu", inode->i_ino);
76839 + }
76840 break;
76841 }
76842 case LSM_AUDIT_DATA_DENTRY: {
76843 @@ -247,10 +248,11 @@ static void dump_common_audit_data(struct audit_buffer *ab,
76844 audit_log_untrustedstring(ab, a->u.dentry->d_name.name);
76845
76846 inode = a->u.dentry->d_inode;
76847 - if (inode)
76848 - audit_log_format(ab, " dev=%s ino=%lu",
76849 - inode->i_sb->s_id,
76850 - inode->i_ino);
76851 + if (inode) {
76852 + audit_log_format(ab, " dev=");
76853 + audit_log_untrustedstring(ab, inode->i_sb->s_id);
76854 + audit_log_format(ab, " ino=%lu", inode->i_ino);
76855 + }
76856 break;
76857 }
76858 case LSM_AUDIT_DATA_INODE: {
76859 @@ -265,8 +267,9 @@ static void dump_common_audit_data(struct audit_buffer *ab,
76860 dentry->d_name.name);
76861 dput(dentry);
76862 }
76863 - audit_log_format(ab, " dev=%s ino=%lu", inode->i_sb->s_id,
76864 - inode->i_ino);
76865 + audit_log_format(ab, " dev=");
76866 + audit_log_untrustedstring(ab, inode->i_sb->s_id);
76867 + audit_log_format(ab, " ino=%lu", inode->i_ino);
76868 break;
76869 }
76870 case LSM_AUDIT_DATA_TASK:
76871 diff --git a/security/min_addr.c b/security/min_addr.c
76872 index f728728..6457a0c 100644
76873 --- a/security/min_addr.c
76874 +++ b/security/min_addr.c
76875 @@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR;
76876 */
76877 static void update_mmap_min_addr(void)
76878 {
76879 +#ifndef SPARC
76880 #ifdef CONFIG_LSM_MMAP_MIN_ADDR
76881 if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
76882 mmap_min_addr = dac_mmap_min_addr;
76883 @@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
76884 #else
76885 mmap_min_addr = dac_mmap_min_addr;
76886 #endif
76887 +#endif
76888 }
76889
76890 /*
76891 diff --git a/security/security.c b/security/security.c
76892 index e2f684a..8d62ef5 100644
76893 --- a/security/security.c
76894 +++ b/security/security.c
76895 @@ -26,8 +26,8 @@
76896 static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1] =
76897 CONFIG_DEFAULT_SECURITY;
76898
76899 -static struct security_operations *security_ops;
76900 -static struct security_operations default_security_ops = {
76901 +static struct security_operations *security_ops __read_only;
76902 +static struct security_operations default_security_ops __read_only = {
76903 .name = "default",
76904 };
76905
76906 @@ -68,7 +68,9 @@ int __init security_init(void)
76907
76908 void reset_security_ops(void)
76909 {
76910 + pax_open_kernel();
76911 security_ops = &default_security_ops;
76912 + pax_close_kernel();
76913 }
76914
76915 /* Save user chosen LSM */
76916 diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
76917 index 1126c10..effb32b 100644
76918 --- a/security/selinux/hooks.c
76919 +++ b/security/selinux/hooks.c
76920 @@ -94,8 +94,6 @@
76921
76922 #define NUM_SEL_MNT_OPTS 5
76923
76924 -extern struct security_operations *security_ops;
76925 -
76926 /* SECMARK reference count */
76927 static atomic_t selinux_secmark_refcount = ATOMIC_INIT(0);
76928
76929 @@ -5449,7 +5447,7 @@ static int selinux_key_getsecurity(struct key *key, char **_buffer)
76930
76931 #endif
76932
76933 -static struct security_operations selinux_ops = {
76934 +static struct security_operations selinux_ops __read_only = {
76935 .name = "selinux",
76936
76937 .ptrace_access_check = selinux_ptrace_access_check,
76938 diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h
76939 index b43813c..74be837 100644
76940 --- a/security/selinux/include/xfrm.h
76941 +++ b/security/selinux/include/xfrm.h
76942 @@ -48,7 +48,7 @@ int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall);
76943
76944 static inline void selinux_xfrm_notify_policyload(void)
76945 {
76946 - atomic_inc(&flow_cache_genid);
76947 + atomic_inc_unchecked(&flow_cache_genid);
76948 }
76949 #else
76950 static inline int selinux_xfrm_enabled(void)
76951 diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
76952 index 7db62b4..ee4d949 100644
76953 --- a/security/smack/smack_lsm.c
76954 +++ b/security/smack/smack_lsm.c
76955 @@ -3481,7 +3481,7 @@ static int smack_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
76956 return 0;
76957 }
76958
76959 -struct security_operations smack_ops = {
76960 +struct security_operations smack_ops __read_only = {
76961 .name = "smack",
76962
76963 .ptrace_access_check = smack_ptrace_access_check,
76964 diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
76965 index 4b327b6..646c57a 100644
76966 --- a/security/tomoyo/tomoyo.c
76967 +++ b/security/tomoyo/tomoyo.c
76968 @@ -504,7 +504,7 @@ static int tomoyo_socket_sendmsg(struct socket *sock, struct msghdr *msg,
76969 * tomoyo_security_ops is a "struct security_operations" which is used for
76970 * registering TOMOYO.
76971 */
76972 -static struct security_operations tomoyo_security_ops = {
76973 +static struct security_operations tomoyo_security_ops __read_only = {
76974 .name = "tomoyo",
76975 .cred_alloc_blank = tomoyo_cred_alloc_blank,
76976 .cred_prepare = tomoyo_cred_prepare,
76977 diff --git a/sound/aoa/codecs/onyx.c b/sound/aoa/codecs/onyx.c
76978 index 762af68..7103453 100644
76979 --- a/sound/aoa/codecs/onyx.c
76980 +++ b/sound/aoa/codecs/onyx.c
76981 @@ -54,7 +54,7 @@ struct onyx {
76982 spdif_locked:1,
76983 analog_locked:1,
76984 original_mute:2;
76985 - int open_count;
76986 + local_t open_count;
76987 struct codec_info *codec_info;
76988
76989 /* mutex serializes concurrent access to the device
76990 @@ -753,7 +753,7 @@ static int onyx_open(struct codec_info_item *cii,
76991 struct onyx *onyx = cii->codec_data;
76992
76993 mutex_lock(&onyx->mutex);
76994 - onyx->open_count++;
76995 + local_inc(&onyx->open_count);
76996 mutex_unlock(&onyx->mutex);
76997
76998 return 0;
76999 @@ -765,8 +765,7 @@ static int onyx_close(struct codec_info_item *cii,
77000 struct onyx *onyx = cii->codec_data;
77001
77002 mutex_lock(&onyx->mutex);
77003 - onyx->open_count--;
77004 - if (!onyx->open_count)
77005 + if (local_dec_and_test(&onyx->open_count))
77006 onyx->spdif_locked = onyx->analog_locked = 0;
77007 mutex_unlock(&onyx->mutex);
77008
77009 diff --git a/sound/aoa/codecs/onyx.h b/sound/aoa/codecs/onyx.h
77010 index ffd2025..df062c9 100644
77011 --- a/sound/aoa/codecs/onyx.h
77012 +++ b/sound/aoa/codecs/onyx.h
77013 @@ -11,6 +11,7 @@
77014 #include <linux/i2c.h>
77015 #include <asm/pmac_low_i2c.h>
77016 #include <asm/prom.h>
77017 +#include <asm/local.h>
77018
77019 /* PCM3052 register definitions */
77020
77021 diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
77022 index 3cc4b86..af0a951 100644
77023 --- a/sound/core/oss/pcm_oss.c
77024 +++ b/sound/core/oss/pcm_oss.c
77025 @@ -1189,10 +1189,10 @@ snd_pcm_sframes_t snd_pcm_oss_write3(struct snd_pcm_substream *substream, const
77026 if (in_kernel) {
77027 mm_segment_t fs;
77028 fs = snd_enter_user();
77029 - ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
77030 + ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
77031 snd_leave_user(fs);
77032 } else {
77033 - ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
77034 + ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
77035 }
77036 if (ret != -EPIPE && ret != -ESTRPIPE)
77037 break;
77038 @@ -1234,10 +1234,10 @@ snd_pcm_sframes_t snd_pcm_oss_read3(struct snd_pcm_substream *substream, char *p
77039 if (in_kernel) {
77040 mm_segment_t fs;
77041 fs = snd_enter_user();
77042 - ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
77043 + ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
77044 snd_leave_user(fs);
77045 } else {
77046 - ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
77047 + ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
77048 }
77049 if (ret == -EPIPE) {
77050 if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
77051 @@ -1337,7 +1337,7 @@ static ssize_t snd_pcm_oss_write2(struct snd_pcm_substream *substream, const cha
77052 struct snd_pcm_plugin_channel *channels;
77053 size_t oss_frame_bytes = (runtime->oss.plugin_first->src_width * runtime->oss.plugin_first->src_format.channels) / 8;
77054 if (!in_kernel) {
77055 - if (copy_from_user(runtime->oss.buffer, (const char __force __user *)buf, bytes))
77056 + if (copy_from_user(runtime->oss.buffer, (const char __force_user *)buf, bytes))
77057 return -EFAULT;
77058 buf = runtime->oss.buffer;
77059 }
77060 @@ -1407,7 +1407,7 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
77061 }
77062 } else {
77063 tmp = snd_pcm_oss_write2(substream,
77064 - (const char __force *)buf,
77065 + (const char __force_kernel *)buf,
77066 runtime->oss.period_bytes, 0);
77067 if (tmp <= 0)
77068 goto err;
77069 @@ -1433,7 +1433,7 @@ static ssize_t snd_pcm_oss_read2(struct snd_pcm_substream *substream, char *buf,
77070 struct snd_pcm_runtime *runtime = substream->runtime;
77071 snd_pcm_sframes_t frames, frames1;
77072 #ifdef CONFIG_SND_PCM_OSS_PLUGINS
77073 - char __user *final_dst = (char __force __user *)buf;
77074 + char __user *final_dst = (char __force_user *)buf;
77075 if (runtime->oss.plugin_first) {
77076 struct snd_pcm_plugin_channel *channels;
77077 size_t oss_frame_bytes = (runtime->oss.plugin_last->dst_width * runtime->oss.plugin_last->dst_format.channels) / 8;
77078 @@ -1495,7 +1495,7 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
77079 xfer += tmp;
77080 runtime->oss.buffer_used -= tmp;
77081 } else {
77082 - tmp = snd_pcm_oss_read2(substream, (char __force *)buf,
77083 + tmp = snd_pcm_oss_read2(substream, (char __force_kernel *)buf,
77084 runtime->oss.period_bytes, 0);
77085 if (tmp <= 0)
77086 goto err;
77087 @@ -1663,7 +1663,7 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
77088 size1);
77089 size1 /= runtime->channels; /* frames */
77090 fs = snd_enter_user();
77091 - snd_pcm_lib_write(substream, (void __force __user *)runtime->oss.buffer, size1);
77092 + snd_pcm_lib_write(substream, (void __force_user *)runtime->oss.buffer, size1);
77093 snd_leave_user(fs);
77094 }
77095 } else if (runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) {
77096 diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
77097 index 91cdf94..4085161 100644
77098 --- a/sound/core/pcm_compat.c
77099 +++ b/sound/core/pcm_compat.c
77100 @@ -31,7 +31,7 @@ static int snd_pcm_ioctl_delay_compat(struct snd_pcm_substream *substream,
77101 int err;
77102
77103 fs = snd_enter_user();
77104 - err = snd_pcm_delay(substream, &delay);
77105 + err = snd_pcm_delay(substream, (snd_pcm_sframes_t __force_user *)&delay);
77106 snd_leave_user(fs);
77107 if (err < 0)
77108 return err;
77109 diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
77110 index 25ed9fe..24c46e9 100644
77111 --- a/sound/core/pcm_native.c
77112 +++ b/sound/core/pcm_native.c
77113 @@ -2765,11 +2765,11 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
77114 switch (substream->stream) {
77115 case SNDRV_PCM_STREAM_PLAYBACK:
77116 result = snd_pcm_playback_ioctl1(NULL, substream, cmd,
77117 - (void __user *)arg);
77118 + (void __force_user *)arg);
77119 break;
77120 case SNDRV_PCM_STREAM_CAPTURE:
77121 result = snd_pcm_capture_ioctl1(NULL, substream, cmd,
77122 - (void __user *)arg);
77123 + (void __force_user *)arg);
77124 break;
77125 default:
77126 result = -EINVAL;
77127 diff --git a/sound/core/seq/seq_device.c b/sound/core/seq/seq_device.c
77128 index 5cf8d65..912a79c 100644
77129 --- a/sound/core/seq/seq_device.c
77130 +++ b/sound/core/seq/seq_device.c
77131 @@ -64,7 +64,7 @@ struct ops_list {
77132 int argsize; /* argument size */
77133
77134 /* operators */
77135 - struct snd_seq_dev_ops ops;
77136 + struct snd_seq_dev_ops *ops;
77137
77138 /* registred devices */
77139 struct list_head dev_list; /* list of devices */
77140 @@ -333,7 +333,7 @@ int snd_seq_device_register_driver(char *id, struct snd_seq_dev_ops *entry,
77141
77142 mutex_lock(&ops->reg_mutex);
77143 /* copy driver operators */
77144 - ops->ops = *entry;
77145 + ops->ops = entry;
77146 ops->driver |= DRIVER_LOADED;
77147 ops->argsize = argsize;
77148
77149 @@ -463,7 +463,7 @@ static int init_device(struct snd_seq_device *dev, struct ops_list *ops)
77150 dev->name, ops->id, ops->argsize, dev->argsize);
77151 return -EINVAL;
77152 }
77153 - if (ops->ops.init_device(dev) >= 0) {
77154 + if (ops->ops->init_device(dev) >= 0) {
77155 dev->status = SNDRV_SEQ_DEVICE_REGISTERED;
77156 ops->num_init_devices++;
77157 } else {
77158 @@ -490,7 +490,7 @@ static int free_device(struct snd_seq_device *dev, struct ops_list *ops)
77159 dev->name, ops->id, ops->argsize, dev->argsize);
77160 return -EINVAL;
77161 }
77162 - if ((result = ops->ops.free_device(dev)) >= 0 || result == -ENXIO) {
77163 + if ((result = ops->ops->free_device(dev)) >= 0 || result == -ENXIO) {
77164 dev->status = SNDRV_SEQ_DEVICE_FREE;
77165 dev->driver_data = NULL;
77166 ops->num_init_devices--;
77167 diff --git a/sound/drivers/mts64.c b/sound/drivers/mts64.c
77168 index f24bf9a..1f7b67c 100644
77169 --- a/sound/drivers/mts64.c
77170 +++ b/sound/drivers/mts64.c
77171 @@ -29,6 +29,7 @@
77172 #include <sound/initval.h>
77173 #include <sound/rawmidi.h>
77174 #include <sound/control.h>
77175 +#include <asm/local.h>
77176
77177 #define CARD_NAME "Miditerminal 4140"
77178 #define DRIVER_NAME "MTS64"
77179 @@ -67,7 +68,7 @@ struct mts64 {
77180 struct pardevice *pardev;
77181 int pardev_claimed;
77182
77183 - int open_count;
77184 + local_t open_count;
77185 int current_midi_output_port;
77186 int current_midi_input_port;
77187 u8 mode[MTS64_NUM_INPUT_PORTS];
77188 @@ -697,7 +698,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
77189 {
77190 struct mts64 *mts = substream->rmidi->private_data;
77191
77192 - if (mts->open_count == 0) {
77193 + if (local_read(&mts->open_count) == 0) {
77194 /* We don't need a spinlock here, because this is just called
77195 if the device has not been opened before.
77196 So there aren't any IRQs from the device */
77197 @@ -705,7 +706,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
77198
77199 msleep(50);
77200 }
77201 - ++(mts->open_count);
77202 + local_inc(&mts->open_count);
77203
77204 return 0;
77205 }
77206 @@ -715,8 +716,7 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
77207 struct mts64 *mts = substream->rmidi->private_data;
77208 unsigned long flags;
77209
77210 - --(mts->open_count);
77211 - if (mts->open_count == 0) {
77212 + if (local_dec_return(&mts->open_count) == 0) {
77213 /* We need the spinlock_irqsave here because we can still
77214 have IRQs at this point */
77215 spin_lock_irqsave(&mts->lock, flags);
77216 @@ -725,8 +725,8 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
77217
77218 msleep(500);
77219
77220 - } else if (mts->open_count < 0)
77221 - mts->open_count = 0;
77222 + } else if (local_read(&mts->open_count) < 0)
77223 + local_set(&mts->open_count, 0);
77224
77225 return 0;
77226 }
77227 diff --git a/sound/drivers/opl4/opl4_lib.c b/sound/drivers/opl4/opl4_lib.c
77228 index b953fb4..1999c01 100644
77229 --- a/sound/drivers/opl4/opl4_lib.c
77230 +++ b/sound/drivers/opl4/opl4_lib.c
77231 @@ -29,7 +29,7 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
77232 MODULE_DESCRIPTION("OPL4 driver");
77233 MODULE_LICENSE("GPL");
77234
77235 -static void inline snd_opl4_wait(struct snd_opl4 *opl4)
77236 +static inline void snd_opl4_wait(struct snd_opl4 *opl4)
77237 {
77238 int timeout = 10;
77239 while ((inb(opl4->fm_port) & OPL4_STATUS_BUSY) && --timeout > 0)
77240 diff --git a/sound/drivers/portman2x4.c b/sound/drivers/portman2x4.c
77241 index f664823..590c745 100644
77242 --- a/sound/drivers/portman2x4.c
77243 +++ b/sound/drivers/portman2x4.c
77244 @@ -48,6 +48,7 @@
77245 #include <sound/initval.h>
77246 #include <sound/rawmidi.h>
77247 #include <sound/control.h>
77248 +#include <asm/local.h>
77249
77250 #define CARD_NAME "Portman 2x4"
77251 #define DRIVER_NAME "portman"
77252 @@ -85,7 +86,7 @@ struct portman {
77253 struct pardevice *pardev;
77254 int pardev_claimed;
77255
77256 - int open_count;
77257 + local_t open_count;
77258 int mode[PORTMAN_NUM_INPUT_PORTS];
77259 struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
77260 };
77261 diff --git a/sound/firewire/amdtp.c b/sound/firewire/amdtp.c
77262 index 87657dd..a8268d4 100644
77263 --- a/sound/firewire/amdtp.c
77264 +++ b/sound/firewire/amdtp.c
77265 @@ -371,7 +371,7 @@ static void queue_out_packet(struct amdtp_out_stream *s, unsigned int cycle)
77266 ptr = s->pcm_buffer_pointer + data_blocks;
77267 if (ptr >= pcm->runtime->buffer_size)
77268 ptr -= pcm->runtime->buffer_size;
77269 - ACCESS_ONCE(s->pcm_buffer_pointer) = ptr;
77270 + ACCESS_ONCE_RW(s->pcm_buffer_pointer) = ptr;
77271
77272 s->pcm_period_pointer += data_blocks;
77273 if (s->pcm_period_pointer >= pcm->runtime->period_size) {
77274 @@ -511,7 +511,7 @@ EXPORT_SYMBOL(amdtp_out_stream_start);
77275 */
77276 void amdtp_out_stream_update(struct amdtp_out_stream *s)
77277 {
77278 - ACCESS_ONCE(s->source_node_id_field) =
77279 + ACCESS_ONCE_RW(s->source_node_id_field) =
77280 (fw_parent_device(s->unit)->card->node_id & 0x3f) << 24;
77281 }
77282 EXPORT_SYMBOL(amdtp_out_stream_update);
77283 diff --git a/sound/firewire/amdtp.h b/sound/firewire/amdtp.h
77284 index 537a9cb..8e8c8e9 100644
77285 --- a/sound/firewire/amdtp.h
77286 +++ b/sound/firewire/amdtp.h
77287 @@ -146,7 +146,7 @@ static inline void amdtp_out_stream_pcm_prepare(struct amdtp_out_stream *s)
77288 static inline void amdtp_out_stream_pcm_trigger(struct amdtp_out_stream *s,
77289 struct snd_pcm_substream *pcm)
77290 {
77291 - ACCESS_ONCE(s->pcm) = pcm;
77292 + ACCESS_ONCE_RW(s->pcm) = pcm;
77293 }
77294
77295 /**
77296 diff --git a/sound/firewire/isight.c b/sound/firewire/isight.c
77297 index cd094ec..eca1277 100644
77298 --- a/sound/firewire/isight.c
77299 +++ b/sound/firewire/isight.c
77300 @@ -96,7 +96,7 @@ static void isight_update_pointers(struct isight *isight, unsigned int count)
77301 ptr += count;
77302 if (ptr >= runtime->buffer_size)
77303 ptr -= runtime->buffer_size;
77304 - ACCESS_ONCE(isight->buffer_pointer) = ptr;
77305 + ACCESS_ONCE_RW(isight->buffer_pointer) = ptr;
77306
77307 isight->period_counter += count;
77308 if (isight->period_counter >= runtime->period_size) {
77309 @@ -307,7 +307,7 @@ static int isight_hw_params(struct snd_pcm_substream *substream,
77310 if (err < 0)
77311 return err;
77312
77313 - ACCESS_ONCE(isight->pcm_active) = true;
77314 + ACCESS_ONCE_RW(isight->pcm_active) = true;
77315
77316 return 0;
77317 }
77318 @@ -340,7 +340,7 @@ static int isight_hw_free(struct snd_pcm_substream *substream)
77319 {
77320 struct isight *isight = substream->private_data;
77321
77322 - ACCESS_ONCE(isight->pcm_active) = false;
77323 + ACCESS_ONCE_RW(isight->pcm_active) = false;
77324
77325 mutex_lock(&isight->mutex);
77326 isight_stop_streaming(isight);
77327 @@ -433,10 +433,10 @@ static int isight_trigger(struct snd_pcm_substream *substream, int cmd)
77328
77329 switch (cmd) {
77330 case SNDRV_PCM_TRIGGER_START:
77331 - ACCESS_ONCE(isight->pcm_running) = true;
77332 + ACCESS_ONCE_RW(isight->pcm_running) = true;
77333 break;
77334 case SNDRV_PCM_TRIGGER_STOP:
77335 - ACCESS_ONCE(isight->pcm_running) = false;
77336 + ACCESS_ONCE_RW(isight->pcm_running) = false;
77337 break;
77338 default:
77339 return -EINVAL;
77340 diff --git a/sound/isa/cmi8330.c b/sound/isa/cmi8330.c
77341 index c94578d..0794ac1 100644
77342 --- a/sound/isa/cmi8330.c
77343 +++ b/sound/isa/cmi8330.c
77344 @@ -172,7 +172,7 @@ struct snd_cmi8330 {
77345
77346 struct snd_pcm *pcm;
77347 struct snd_cmi8330_stream {
77348 - struct snd_pcm_ops ops;
77349 + snd_pcm_ops_no_const ops;
77350 snd_pcm_open_callback_t open;
77351 void *private_data; /* sb or wss */
77352 } streams[2];
77353 diff --git a/sound/oss/sb_audio.c b/sound/oss/sb_audio.c
77354 index 733b014..56ce96f 100644
77355 --- a/sound/oss/sb_audio.c
77356 +++ b/sound/oss/sb_audio.c
77357 @@ -901,7 +901,7 @@ sb16_copy_from_user(int dev,
77358 buf16 = (signed short *)(localbuf + localoffs);
77359 while (c)
77360 {
77361 - locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
77362 + locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
77363 if (copy_from_user(lbuf8,
77364 userbuf+useroffs + p,
77365 locallen))
77366 diff --git a/sound/oss/swarm_cs4297a.c b/sound/oss/swarm_cs4297a.c
77367 index 09d4648..cf234c7 100644
77368 --- a/sound/oss/swarm_cs4297a.c
77369 +++ b/sound/oss/swarm_cs4297a.c
77370 @@ -2606,7 +2606,6 @@ static int __init cs4297a_init(void)
77371 {
77372 struct cs4297a_state *s;
77373 u32 pwr, id;
77374 - mm_segment_t fs;
77375 int rval;
77376 #ifndef CONFIG_BCM_CS4297A_CSWARM
77377 u64 cfg;
77378 @@ -2696,22 +2695,23 @@ static int __init cs4297a_init(void)
77379 if (!rval) {
77380 char *sb1250_duart_present;
77381
77382 +#if 0
77383 + mm_segment_t fs;
77384 fs = get_fs();
77385 set_fs(KERNEL_DS);
77386 -#if 0
77387 val = SOUND_MASK_LINE;
77388 mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
77389 for (i = 0; i < ARRAY_SIZE(initvol); i++) {
77390 val = initvol[i].vol;
77391 mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
77392 }
77393 + set_fs(fs);
77394 // cs4297a_write_ac97(s, 0x18, 0x0808);
77395 #else
77396 // cs4297a_write_ac97(s, 0x5e, 0x180);
77397 cs4297a_write_ac97(s, 0x02, 0x0808);
77398 cs4297a_write_ac97(s, 0x18, 0x0808);
77399 #endif
77400 - set_fs(fs);
77401
77402 list_add(&s->list, &cs4297a_devs);
77403
77404 diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
77405 index 5644711..a2aebc1 100644
77406 --- a/sound/pci/hda/hda_codec.h
77407 +++ b/sound/pci/hda/hda_codec.h
77408 @@ -611,7 +611,7 @@ struct hda_bus_ops {
77409 /* notify power-up/down from codec to controller */
77410 void (*pm_notify)(struct hda_bus *bus);
77411 #endif
77412 -};
77413 +} __no_const;
77414
77415 /* template to pass to the bus constructor */
77416 struct hda_bus_template {
77417 @@ -713,6 +713,7 @@ struct hda_codec_ops {
77418 #endif
77419 void (*reboot_notify)(struct hda_codec *codec);
77420 };
77421 +typedef struct hda_codec_ops __no_const hda_codec_ops_no_const;
77422
77423 /* record for amp information cache */
77424 struct hda_cache_head {
77425 @@ -743,7 +744,7 @@ struct hda_pcm_ops {
77426 struct snd_pcm_substream *substream);
77427 int (*cleanup)(struct hda_pcm_stream *info, struct hda_codec *codec,
77428 struct snd_pcm_substream *substream);
77429 -};
77430 +} __no_const;
77431
77432 /* PCM information for each substream */
77433 struct hda_pcm_stream {
77434 @@ -801,7 +802,7 @@ struct hda_codec {
77435 const char *modelname; /* model name for preset */
77436
77437 /* set by patch */
77438 - struct hda_codec_ops patch_ops;
77439 + hda_codec_ops_no_const patch_ops;
77440
77441 /* PCM to create, set by patch_ops.build_pcms callback */
77442 unsigned int num_pcms;
77443 diff --git a/sound/pci/ice1712/ice1712.h b/sound/pci/ice1712/ice1712.h
77444 index 0da778a..bc38b84 100644
77445 --- a/sound/pci/ice1712/ice1712.h
77446 +++ b/sound/pci/ice1712/ice1712.h
77447 @@ -269,7 +269,7 @@ struct snd_ak4xxx_private {
77448 unsigned int mask_flags; /* total mask bits */
77449 struct snd_akm4xxx_ops {
77450 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
77451 - } ops;
77452 + } __no_const ops;
77453 };
77454
77455 struct snd_ice1712_spdif {
77456 @@ -285,7 +285,7 @@ struct snd_ice1712_spdif {
77457 int (*default_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
77458 void (*stream_get)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
77459 int (*stream_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
77460 - } ops;
77461 + } __no_const ops;
77462 };
77463
77464
77465 diff --git a/sound/pci/ymfpci/ymfpci_main.c b/sound/pci/ymfpci/ymfpci_main.c
77466 index 03ee4e3..be86b46 100644
77467 --- a/sound/pci/ymfpci/ymfpci_main.c
77468 +++ b/sound/pci/ymfpci/ymfpci_main.c
77469 @@ -203,8 +203,8 @@ static void snd_ymfpci_hw_stop(struct snd_ymfpci *chip)
77470 if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
77471 break;
77472 }
77473 - if (atomic_read(&chip->interrupt_sleep_count)) {
77474 - atomic_set(&chip->interrupt_sleep_count, 0);
77475 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
77476 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
77477 wake_up(&chip->interrupt_sleep);
77478 }
77479 __end:
77480 @@ -788,7 +788,7 @@ static void snd_ymfpci_irq_wait(struct snd_ymfpci *chip)
77481 continue;
77482 init_waitqueue_entry(&wait, current);
77483 add_wait_queue(&chip->interrupt_sleep, &wait);
77484 - atomic_inc(&chip->interrupt_sleep_count);
77485 + atomic_inc_unchecked(&chip->interrupt_sleep_count);
77486 schedule_timeout_uninterruptible(msecs_to_jiffies(50));
77487 remove_wait_queue(&chip->interrupt_sleep, &wait);
77488 }
77489 @@ -826,8 +826,8 @@ static irqreturn_t snd_ymfpci_interrupt(int irq, void *dev_id)
77490 snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
77491 spin_unlock(&chip->reg_lock);
77492
77493 - if (atomic_read(&chip->interrupt_sleep_count)) {
77494 - atomic_set(&chip->interrupt_sleep_count, 0);
77495 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
77496 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
77497 wake_up(&chip->interrupt_sleep);
77498 }
77499 }
77500 @@ -2382,7 +2382,7 @@ int __devinit snd_ymfpci_create(struct snd_card *card,
77501 spin_lock_init(&chip->reg_lock);
77502 spin_lock_init(&chip->voice_lock);
77503 init_waitqueue_head(&chip->interrupt_sleep);
77504 - atomic_set(&chip->interrupt_sleep_count, 0);
77505 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
77506 chip->card = card;
77507 chip->pci = pci;
77508 chip->irq = -1;
77509 diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
77510 index ee15337..e2187a6 100644
77511 --- a/sound/soc/soc-pcm.c
77512 +++ b/sound/soc/soc-pcm.c
77513 @@ -583,7 +583,7 @@ static snd_pcm_uframes_t soc_pcm_pointer(struct snd_pcm_substream *substream)
77514 }
77515
77516 /* ASoC PCM operations */
77517 -static struct snd_pcm_ops soc_pcm_ops = {
77518 +static snd_pcm_ops_no_const soc_pcm_ops = {
77519 .open = soc_pcm_open,
77520 .close = soc_pcm_close,
77521 .hw_params = soc_pcm_hw_params,
77522 diff --git a/sound/usb/card.h b/sound/usb/card.h
77523 index a39edcc..1014050 100644
77524 --- a/sound/usb/card.h
77525 +++ b/sound/usb/card.h
77526 @@ -44,6 +44,7 @@ struct snd_urb_ops {
77527 int (*prepare_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
77528 int (*retire_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
77529 };
77530 +typedef struct snd_urb_ops __no_const snd_urb_ops_no_const;
77531
77532 struct snd_usb_substream {
77533 struct snd_usb_stream *stream;
77534 @@ -93,7 +94,7 @@ struct snd_usb_substream {
77535 struct snd_pcm_hw_constraint_list rate_list; /* limited rates */
77536 spinlock_t lock;
77537
77538 - struct snd_urb_ops ops; /* callbacks (must be filled at init) */
77539 + snd_urb_ops_no_const ops; /* callbacks (must be filled at init) */
77540 int last_frame_number; /* stored frame number */
77541 int last_delay; /* stored delay */
77542 };
77543 diff --git a/tools/gcc/Makefile b/tools/gcc/Makefile
77544 new file mode 100644
77545 index 0000000..b044b80
77546 --- /dev/null
77547 +++ b/tools/gcc/Makefile
77548 @@ -0,0 +1,21 @@
77549 +#CC := gcc
77550 +#PLUGIN_SOURCE_FILES := pax_plugin.c
77551 +#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
77552 +GCCPLUGINS_DIR := $(shell $(CC) -print-file-name=plugin)
77553 +#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W
77554 +
77555 +HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include
77556 +
77557 +hostlibs-y := constify_plugin.so
77558 +hostlibs-$(CONFIG_PAX_MEMORY_STACKLEAK) += stackleak_plugin.so
77559 +hostlibs-$(CONFIG_KALLOCSTAT_PLUGIN) += kallocstat_plugin.so
77560 +hostlibs-$(CONFIG_PAX_KERNEXEC_PLUGIN) += kernexec_plugin.so
77561 +hostlibs-$(CONFIG_CHECKER_PLUGIN) += checker_plugin.so
77562 +
77563 +always := $(hostlibs-y)
77564 +
77565 +constify_plugin-objs := constify_plugin.o
77566 +stackleak_plugin-objs := stackleak_plugin.o
77567 +kallocstat_plugin-objs := kallocstat_plugin.o
77568 +kernexec_plugin-objs := kernexec_plugin.o
77569 +checker_plugin-objs := checker_plugin.o
77570 diff --git a/tools/gcc/checker_plugin.c b/tools/gcc/checker_plugin.c
77571 new file mode 100644
77572 index 0000000..d41b5af
77573 --- /dev/null
77574 +++ b/tools/gcc/checker_plugin.c
77575 @@ -0,0 +1,171 @@
77576 +/*
77577 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
77578 + * Licensed under the GPL v2
77579 + *
77580 + * Note: the choice of the license means that the compilation process is
77581 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
77582 + * but for the kernel it doesn't matter since it doesn't link against
77583 + * any of the gcc libraries
77584 + *
77585 + * gcc plugin to implement various sparse (source code checker) features
77586 + *
77587 + * TODO:
77588 + * - define separate __iomem, __percpu and __rcu address spaces (lots of code to patch)
77589 + *
77590 + * BUGS:
77591 + * - none known
77592 + */
77593 +#include "gcc-plugin.h"
77594 +#include "config.h"
77595 +#include "system.h"
77596 +#include "coretypes.h"
77597 +#include "tree.h"
77598 +#include "tree-pass.h"
77599 +#include "flags.h"
77600 +#include "intl.h"
77601 +#include "toplev.h"
77602 +#include "plugin.h"
77603 +//#include "expr.h" where are you...
77604 +#include "diagnostic.h"
77605 +#include "plugin-version.h"
77606 +#include "tm.h"
77607 +#include "function.h"
77608 +#include "basic-block.h"
77609 +#include "gimple.h"
77610 +#include "rtl.h"
77611 +#include "emit-rtl.h"
77612 +#include "tree-flow.h"
77613 +#include "target.h"
77614 +
77615 +extern void c_register_addr_space (const char *str, addr_space_t as);
77616 +extern enum machine_mode default_addr_space_pointer_mode (addr_space_t);
77617 +extern enum machine_mode default_addr_space_address_mode (addr_space_t);
77618 +extern bool default_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as);
77619 +extern bool default_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as);
77620 +extern rtx default_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as);
77621 +
77622 +extern void print_gimple_stmt(FILE *, gimple, int, int);
77623 +extern rtx emit_move_insn(rtx x, rtx y);
77624 +
77625 +int plugin_is_GPL_compatible;
77626 +
77627 +static struct plugin_info checker_plugin_info = {
77628 + .version = "201111150100",
77629 +};
77630 +
77631 +#define ADDR_SPACE_KERNEL 0
77632 +#define ADDR_SPACE_FORCE_KERNEL 1
77633 +#define ADDR_SPACE_USER 2
77634 +#define ADDR_SPACE_FORCE_USER 3
77635 +#define ADDR_SPACE_IOMEM 0
77636 +#define ADDR_SPACE_FORCE_IOMEM 0
77637 +#define ADDR_SPACE_PERCPU 0
77638 +#define ADDR_SPACE_FORCE_PERCPU 0
77639 +#define ADDR_SPACE_RCU 0
77640 +#define ADDR_SPACE_FORCE_RCU 0
77641 +
77642 +static enum machine_mode checker_addr_space_pointer_mode(addr_space_t addrspace)
77643 +{
77644 + return default_addr_space_pointer_mode(ADDR_SPACE_GENERIC);
77645 +}
77646 +
77647 +static enum machine_mode checker_addr_space_address_mode(addr_space_t addrspace)
77648 +{
77649 + return default_addr_space_address_mode(ADDR_SPACE_GENERIC);
77650 +}
77651 +
77652 +static bool checker_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as)
77653 +{
77654 + return default_addr_space_valid_pointer_mode(mode, as);
77655 +}
77656 +
77657 +static bool checker_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as)
77658 +{
77659 + return default_addr_space_legitimate_address_p(mode, mem, strict, ADDR_SPACE_GENERIC);
77660 +}
77661 +
77662 +static rtx checker_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as)
77663 +{
77664 + return default_addr_space_legitimize_address(x, oldx, mode, as);
77665 +}
77666 +
77667 +static bool checker_addr_space_subset_p(addr_space_t subset, addr_space_t superset)
77668 +{
77669 + if (subset == ADDR_SPACE_FORCE_KERNEL && superset == ADDR_SPACE_KERNEL)
77670 + return true;
77671 +
77672 + if (subset == ADDR_SPACE_FORCE_USER && superset == ADDR_SPACE_USER)
77673 + return true;
77674 +
77675 + if (subset == ADDR_SPACE_FORCE_IOMEM && superset == ADDR_SPACE_IOMEM)
77676 + return true;
77677 +
77678 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_USER)
77679 + return true;
77680 +
77681 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_IOMEM)
77682 + return true;
77683 +
77684 + if (subset == ADDR_SPACE_USER && superset == ADDR_SPACE_FORCE_KERNEL)
77685 + return true;
77686 +
77687 + if (subset == ADDR_SPACE_IOMEM && superset == ADDR_SPACE_FORCE_KERNEL)
77688 + return true;
77689 +
77690 + return subset == superset;
77691 +}
77692 +
77693 +static rtx checker_addr_space_convert(rtx op, tree from_type, tree to_type)
77694 +{
77695 +// addr_space_t from_as = TYPE_ADDR_SPACE(TREE_TYPE(from_type));
77696 +// addr_space_t to_as = TYPE_ADDR_SPACE(TREE_TYPE(to_type));
77697 +
77698 + return op;
77699 +}
77700 +
77701 +static void register_checker_address_spaces(void *event_data, void *data)
77702 +{
77703 + c_register_addr_space("__kernel", ADDR_SPACE_KERNEL);
77704 + c_register_addr_space("__force_kernel", ADDR_SPACE_FORCE_KERNEL);
77705 + c_register_addr_space("__user", ADDR_SPACE_USER);
77706 + c_register_addr_space("__force_user", ADDR_SPACE_FORCE_USER);
77707 +// c_register_addr_space("__iomem", ADDR_SPACE_IOMEM);
77708 +// c_register_addr_space("__force_iomem", ADDR_SPACE_FORCE_IOMEM);
77709 +// c_register_addr_space("__percpu", ADDR_SPACE_PERCPU);
77710 +// c_register_addr_space("__force_percpu", ADDR_SPACE_FORCE_PERCPU);
77711 +// c_register_addr_space("__rcu", ADDR_SPACE_RCU);
77712 +// c_register_addr_space("__force_rcu", ADDR_SPACE_FORCE_RCU);
77713 +
77714 + targetm.addr_space.pointer_mode = checker_addr_space_pointer_mode;
77715 + targetm.addr_space.address_mode = checker_addr_space_address_mode;
77716 + targetm.addr_space.valid_pointer_mode = checker_addr_space_valid_pointer_mode;
77717 + targetm.addr_space.legitimate_address_p = checker_addr_space_legitimate_address_p;
77718 +// targetm.addr_space.legitimize_address = checker_addr_space_legitimize_address;
77719 + targetm.addr_space.subset_p = checker_addr_space_subset_p;
77720 + targetm.addr_space.convert = checker_addr_space_convert;
77721 +}
77722 +
77723 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
77724 +{
77725 + const char * const plugin_name = plugin_info->base_name;
77726 + const int argc = plugin_info->argc;
77727 + const struct plugin_argument * const argv = plugin_info->argv;
77728 + int i;
77729 +
77730 + if (!plugin_default_version_check(version, &gcc_version)) {
77731 + error(G_("incompatible gcc/plugin versions"));
77732 + return 1;
77733 + }
77734 +
77735 + register_callback(plugin_name, PLUGIN_INFO, NULL, &checker_plugin_info);
77736 +
77737 + for (i = 0; i < argc; ++i)
77738 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
77739 +
77740 + if (TARGET_64BIT == 0)
77741 + return 0;
77742 +
77743 + register_callback(plugin_name, PLUGIN_PRAGMAS, register_checker_address_spaces, NULL);
77744 +
77745 + return 0;
77746 +}
77747 diff --git a/tools/gcc/constify_plugin.c b/tools/gcc/constify_plugin.c
77748 new file mode 100644
77749 index 0000000..704a564
77750 --- /dev/null
77751 +++ b/tools/gcc/constify_plugin.c
77752 @@ -0,0 +1,303 @@
77753 +/*
77754 + * Copyright 2011 by Emese Revfy <re.emese@gmail.com>
77755 + * Copyright 2011 by PaX Team <pageexec@freemail.hu>
77756 + * Licensed under the GPL v2, or (at your option) v3
77757 + *
77758 + * This gcc plugin constifies all structures which contain only function pointers or are explicitly marked for constification.
77759 + *
77760 + * Homepage:
77761 + * http://www.grsecurity.net/~ephox/const_plugin/
77762 + *
77763 + * Usage:
77764 + * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o constify_plugin.so constify_plugin.c
77765 + * $ gcc -fplugin=constify_plugin.so test.c -O2
77766 + */
77767 +
77768 +#include "gcc-plugin.h"
77769 +#include "config.h"
77770 +#include "system.h"
77771 +#include "coretypes.h"
77772 +#include "tree.h"
77773 +#include "tree-pass.h"
77774 +#include "flags.h"
77775 +#include "intl.h"
77776 +#include "toplev.h"
77777 +#include "plugin.h"
77778 +#include "diagnostic.h"
77779 +#include "plugin-version.h"
77780 +#include "tm.h"
77781 +#include "function.h"
77782 +#include "basic-block.h"
77783 +#include "gimple.h"
77784 +#include "rtl.h"
77785 +#include "emit-rtl.h"
77786 +#include "tree-flow.h"
77787 +
77788 +#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1(TYPE)
77789 +
77790 +int plugin_is_GPL_compatible;
77791 +
77792 +static struct plugin_info const_plugin_info = {
77793 + .version = "201111150100",
77794 + .help = "no-constify\tturn off constification\n",
77795 +};
77796 +
77797 +static void constify_type(tree type);
77798 +static bool walk_struct(tree node);
77799 +
77800 +static tree deconstify_type(tree old_type)
77801 +{
77802 + tree new_type, field;
77803 +
77804 + new_type = build_qualified_type(old_type, TYPE_QUALS(old_type) & ~TYPE_QUAL_CONST);
77805 + TYPE_FIELDS(new_type) = copy_list(TYPE_FIELDS(new_type));
77806 + for (field = TYPE_FIELDS(new_type); field; field = TREE_CHAIN(field))
77807 + DECL_FIELD_CONTEXT(field) = new_type;
77808 + TYPE_READONLY(new_type) = 0;
77809 + C_TYPE_FIELDS_READONLY(new_type) = 0;
77810 + return new_type;
77811 +}
77812 +
77813 +static tree handle_no_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
77814 +{
77815 + tree type;
77816 +
77817 + *no_add_attrs = true;
77818 + if (TREE_CODE(*node) == FUNCTION_DECL) {
77819 + error("%qE attribute does not apply to functions", name);
77820 + return NULL_TREE;
77821 + }
77822 +
77823 + if (TREE_CODE(*node) == VAR_DECL) {
77824 + error("%qE attribute does not apply to variables", name);
77825 + return NULL_TREE;
77826 + }
77827 +
77828 + if (TYPE_P(*node)) {
77829 + if (TREE_CODE(*node) == RECORD_TYPE || TREE_CODE(*node) == UNION_TYPE)
77830 + *no_add_attrs = false;
77831 + else
77832 + error("%qE attribute applies to struct and union types only", name);
77833 + return NULL_TREE;
77834 + }
77835 +
77836 + type = TREE_TYPE(*node);
77837 +
77838 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE) {
77839 + error("%qE attribute applies to struct and union types only", name);
77840 + return NULL_TREE;
77841 + }
77842 +
77843 + if (lookup_attribute(IDENTIFIER_POINTER(name), TYPE_ATTRIBUTES(type))) {
77844 + error("%qE attribute is already applied to the type", name);
77845 + return NULL_TREE;
77846 + }
77847 +
77848 + if (TREE_CODE(*node) == TYPE_DECL && !TYPE_READONLY(type)) {
77849 + error("%qE attribute used on type that is not constified", name);
77850 + return NULL_TREE;
77851 + }
77852 +
77853 + if (TREE_CODE(*node) == TYPE_DECL) {
77854 + TREE_TYPE(*node) = deconstify_type(type);
77855 + TREE_READONLY(*node) = 0;
77856 + return NULL_TREE;
77857 + }
77858 +
77859 + return NULL_TREE;
77860 +}
77861 +
77862 +static tree handle_do_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
77863 +{
77864 + *no_add_attrs = true;
77865 + if (!TYPE_P(*node)) {
77866 + error("%qE attribute applies to types only", name);
77867 + return NULL_TREE;
77868 + }
77869 +
77870 + if (TREE_CODE(*node) != RECORD_TYPE && TREE_CODE(*node) != UNION_TYPE) {
77871 + error("%qE attribute applies to struct and union types only", name);
77872 + return NULL_TREE;
77873 + }
77874 +
77875 + *no_add_attrs = false;
77876 + constify_type(*node);
77877 + return NULL_TREE;
77878 +}
77879 +
77880 +static struct attribute_spec no_const_attr = {
77881 + .name = "no_const",
77882 + .min_length = 0,
77883 + .max_length = 0,
77884 + .decl_required = false,
77885 + .type_required = false,
77886 + .function_type_required = false,
77887 + .handler = handle_no_const_attribute,
77888 +#if BUILDING_GCC_VERSION >= 4007
77889 + .affects_type_identity = true
77890 +#endif
77891 +};
77892 +
77893 +static struct attribute_spec do_const_attr = {
77894 + .name = "do_const",
77895 + .min_length = 0,
77896 + .max_length = 0,
77897 + .decl_required = false,
77898 + .type_required = false,
77899 + .function_type_required = false,
77900 + .handler = handle_do_const_attribute,
77901 +#if BUILDING_GCC_VERSION >= 4007
77902 + .affects_type_identity = true
77903 +#endif
77904 +};
77905 +
77906 +static void register_attributes(void *event_data, void *data)
77907 +{
77908 + register_attribute(&no_const_attr);
77909 + register_attribute(&do_const_attr);
77910 +}
77911 +
77912 +static void constify_type(tree type)
77913 +{
77914 + TYPE_READONLY(type) = 1;
77915 + C_TYPE_FIELDS_READONLY(type) = 1;
77916 +}
77917 +
77918 +static bool is_fptr(tree field)
77919 +{
77920 + tree ptr = TREE_TYPE(field);
77921 +
77922 + if (TREE_CODE(ptr) != POINTER_TYPE)
77923 + return false;
77924 +
77925 + return TREE_CODE(TREE_TYPE(ptr)) == FUNCTION_TYPE;
77926 +}
77927 +
77928 +static bool walk_struct(tree node)
77929 +{
77930 + tree field;
77931 +
77932 + if (lookup_attribute("no_const", TYPE_ATTRIBUTES(node)))
77933 + return false;
77934 +
77935 + if (TYPE_FIELDS(node) == NULL_TREE)
77936 + return false;
77937 +
77938 + for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
77939 + tree type = TREE_TYPE(field);
77940 + enum tree_code code = TREE_CODE(type);
77941 + if (code == RECORD_TYPE || code == UNION_TYPE) {
77942 + if (!(walk_struct(type)))
77943 + return false;
77944 + } else if (!is_fptr(field) && !TREE_READONLY(field))
77945 + return false;
77946 + }
77947 + return true;
77948 +}
77949 +
77950 +static void finish_type(void *event_data, void *data)
77951 +{
77952 + tree type = (tree)event_data;
77953 +
77954 + if (type == NULL_TREE)
77955 + return;
77956 +
77957 + if (TYPE_READONLY(type))
77958 + return;
77959 +
77960 + if (walk_struct(type))
77961 + constify_type(type);
77962 +}
77963 +
77964 +static unsigned int check_local_variables(void);
77965 +
77966 +struct gimple_opt_pass pass_local_variable = {
77967 + {
77968 + .type = GIMPLE_PASS,
77969 + .name = "check_local_variables",
77970 + .gate = NULL,
77971 + .execute = check_local_variables,
77972 + .sub = NULL,
77973 + .next = NULL,
77974 + .static_pass_number = 0,
77975 + .tv_id = TV_NONE,
77976 + .properties_required = 0,
77977 + .properties_provided = 0,
77978 + .properties_destroyed = 0,
77979 + .todo_flags_start = 0,
77980 + .todo_flags_finish = 0
77981 + }
77982 +};
77983 +
77984 +static unsigned int check_local_variables(void)
77985 +{
77986 + tree var;
77987 + referenced_var_iterator rvi;
77988 +
77989 +#if BUILDING_GCC_VERSION == 4005
77990 + FOR_EACH_REFERENCED_VAR(var, rvi) {
77991 +#else
77992 + FOR_EACH_REFERENCED_VAR(cfun, var, rvi) {
77993 +#endif
77994 + tree type = TREE_TYPE(var);
77995 +
77996 + if (!DECL_P(var) || TREE_STATIC(var) || DECL_EXTERNAL(var))
77997 + continue;
77998 +
77999 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
78000 + continue;
78001 +
78002 + if (!TYPE_READONLY(type))
78003 + continue;
78004 +
78005 +// if (lookup_attribute("no_const", DECL_ATTRIBUTES(var)))
78006 +// continue;
78007 +
78008 +// if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type)))
78009 +// continue;
78010 +
78011 + if (walk_struct(type)) {
78012 + error("constified variable %qE cannot be local", var);
78013 + return 1;
78014 + }
78015 + }
78016 + return 0;
78017 +}
78018 +
78019 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
78020 +{
78021 + const char * const plugin_name = plugin_info->base_name;
78022 + const int argc = plugin_info->argc;
78023 + const struct plugin_argument * const argv = plugin_info->argv;
78024 + int i;
78025 + bool constify = true;
78026 +
78027 + struct register_pass_info local_variable_pass_info = {
78028 + .pass = &pass_local_variable.pass,
78029 + .reference_pass_name = "*referenced_vars",
78030 + .ref_pass_instance_number = 0,
78031 + .pos_op = PASS_POS_INSERT_AFTER
78032 + };
78033 +
78034 + if (!plugin_default_version_check(version, &gcc_version)) {
78035 + error(G_("incompatible gcc/plugin versions"));
78036 + return 1;
78037 + }
78038 +
78039 + for (i = 0; i < argc; ++i) {
78040 + if (!(strcmp(argv[i].key, "no-constify"))) {
78041 + constify = false;
78042 + continue;
78043 + }
78044 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
78045 + }
78046 +
78047 + register_callback(plugin_name, PLUGIN_INFO, NULL, &const_plugin_info);
78048 + if (constify) {
78049 + register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
78050 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &local_variable_pass_info);
78051 + }
78052 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
78053 +
78054 + return 0;
78055 +}
78056 diff --git a/tools/gcc/kallocstat_plugin.c b/tools/gcc/kallocstat_plugin.c
78057 new file mode 100644
78058 index 0000000..a5eabce
78059 --- /dev/null
78060 +++ b/tools/gcc/kallocstat_plugin.c
78061 @@ -0,0 +1,167 @@
78062 +/*
78063 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
78064 + * Licensed under the GPL v2
78065 + *
78066 + * Note: the choice of the license means that the compilation process is
78067 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
78068 + * but for the kernel it doesn't matter since it doesn't link against
78069 + * any of the gcc libraries
78070 + *
78071 + * gcc plugin to find the distribution of k*alloc sizes
78072 + *
78073 + * TODO:
78074 + *
78075 + * BUGS:
78076 + * - none known
78077 + */
78078 +#include "gcc-plugin.h"
78079 +#include "config.h"
78080 +#include "system.h"
78081 +#include "coretypes.h"
78082 +#include "tree.h"
78083 +#include "tree-pass.h"
78084 +#include "flags.h"
78085 +#include "intl.h"
78086 +#include "toplev.h"
78087 +#include "plugin.h"
78088 +//#include "expr.h" where are you...
78089 +#include "diagnostic.h"
78090 +#include "plugin-version.h"
78091 +#include "tm.h"
78092 +#include "function.h"
78093 +#include "basic-block.h"
78094 +#include "gimple.h"
78095 +#include "rtl.h"
78096 +#include "emit-rtl.h"
78097 +
78098 +extern void print_gimple_stmt(FILE *, gimple, int, int);
78099 +
78100 +int plugin_is_GPL_compatible;
78101 +
78102 +static const char * const kalloc_functions[] = {
78103 + "__kmalloc",
78104 + "kmalloc",
78105 + "kmalloc_large",
78106 + "kmalloc_node",
78107 + "kmalloc_order",
78108 + "kmalloc_order_trace",
78109 + "kmalloc_slab",
78110 + "kzalloc",
78111 + "kzalloc_node",
78112 +};
78113 +
78114 +static struct plugin_info kallocstat_plugin_info = {
78115 + .version = "201111150100",
78116 +};
78117 +
78118 +static unsigned int execute_kallocstat(void);
78119 +
78120 +static struct gimple_opt_pass kallocstat_pass = {
78121 + .pass = {
78122 + .type = GIMPLE_PASS,
78123 + .name = "kallocstat",
78124 + .gate = NULL,
78125 + .execute = execute_kallocstat,
78126 + .sub = NULL,
78127 + .next = NULL,
78128 + .static_pass_number = 0,
78129 + .tv_id = TV_NONE,
78130 + .properties_required = 0,
78131 + .properties_provided = 0,
78132 + .properties_destroyed = 0,
78133 + .todo_flags_start = 0,
78134 + .todo_flags_finish = 0
78135 + }
78136 +};
78137 +
78138 +static bool is_kalloc(const char *fnname)
78139 +{
78140 + size_t i;
78141 +
78142 + for (i = 0; i < ARRAY_SIZE(kalloc_functions); i++)
78143 + if (!strcmp(fnname, kalloc_functions[i]))
78144 + return true;
78145 + return false;
78146 +}
78147 +
78148 +static unsigned int execute_kallocstat(void)
78149 +{
78150 + basic_block bb;
78151 +
78152 + // 1. loop through BBs and GIMPLE statements
78153 + FOR_EACH_BB(bb) {
78154 + gimple_stmt_iterator gsi;
78155 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
78156 + // gimple match:
78157 + tree fndecl, size;
78158 + gimple call_stmt;
78159 + const char *fnname;
78160 +
78161 + // is it a call
78162 + call_stmt = gsi_stmt(gsi);
78163 + if (!is_gimple_call(call_stmt))
78164 + continue;
78165 + fndecl = gimple_call_fndecl(call_stmt);
78166 + if (fndecl == NULL_TREE)
78167 + continue;
78168 + if (TREE_CODE(fndecl) != FUNCTION_DECL)
78169 + continue;
78170 +
78171 + // is it a call to k*alloc
78172 + fnname = IDENTIFIER_POINTER(DECL_NAME(fndecl));
78173 + if (!is_kalloc(fnname))
78174 + continue;
78175 +
78176 + // is the size arg the result of a simple const assignment
78177 + size = gimple_call_arg(call_stmt, 0);
78178 + while (true) {
78179 + gimple def_stmt;
78180 + expanded_location xloc;
78181 + size_t size_val;
78182 +
78183 + if (TREE_CODE(size) != SSA_NAME)
78184 + break;
78185 + def_stmt = SSA_NAME_DEF_STMT(size);
78186 + if (!def_stmt || !is_gimple_assign(def_stmt))
78187 + break;
78188 + if (gimple_num_ops(def_stmt) != 2)
78189 + break;
78190 + size = gimple_assign_rhs1(def_stmt);
78191 + if (!TREE_CONSTANT(size))
78192 + continue;
78193 + xloc = expand_location(gimple_location(def_stmt));
78194 + if (!xloc.file)
78195 + xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
78196 + size_val = TREE_INT_CST_LOW(size);
78197 + fprintf(stderr, "kallocsize: %8zu %8zx %s %s:%u\n", size_val, size_val, fnname, xloc.file, xloc.line);
78198 + break;
78199 + }
78200 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
78201 +//debug_tree(gimple_call_fn(call_stmt));
78202 +//print_node(stderr, "pax", fndecl, 4);
78203 + }
78204 + }
78205 +
78206 + return 0;
78207 +}
78208 +
78209 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
78210 +{
78211 + const char * const plugin_name = plugin_info->base_name;
78212 + struct register_pass_info kallocstat_pass_info = {
78213 + .pass = &kallocstat_pass.pass,
78214 + .reference_pass_name = "ssa",
78215 + .ref_pass_instance_number = 0,
78216 + .pos_op = PASS_POS_INSERT_AFTER
78217 + };
78218 +
78219 + if (!plugin_default_version_check(version, &gcc_version)) {
78220 + error(G_("incompatible gcc/plugin versions"));
78221 + return 1;
78222 + }
78223 +
78224 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kallocstat_plugin_info);
78225 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kallocstat_pass_info);
78226 +
78227 + return 0;
78228 +}
78229 diff --git a/tools/gcc/kernexec_plugin.c b/tools/gcc/kernexec_plugin.c
78230 new file mode 100644
78231 index 0000000..008f159
78232 --- /dev/null
78233 +++ b/tools/gcc/kernexec_plugin.c
78234 @@ -0,0 +1,427 @@
78235 +/*
78236 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
78237 + * Licensed under the GPL v2
78238 + *
78239 + * Note: the choice of the license means that the compilation process is
78240 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
78241 + * but for the kernel it doesn't matter since it doesn't link against
78242 + * any of the gcc libraries
78243 + *
78244 + * gcc plugin to make KERNEXEC/amd64 almost as good as it is on i386
78245 + *
78246 + * TODO:
78247 + *
78248 + * BUGS:
78249 + * - none known
78250 + */
78251 +#include "gcc-plugin.h"
78252 +#include "config.h"
78253 +#include "system.h"
78254 +#include "coretypes.h"
78255 +#include "tree.h"
78256 +#include "tree-pass.h"
78257 +#include "flags.h"
78258 +#include "intl.h"
78259 +#include "toplev.h"
78260 +#include "plugin.h"
78261 +//#include "expr.h" where are you...
78262 +#include "diagnostic.h"
78263 +#include "plugin-version.h"
78264 +#include "tm.h"
78265 +#include "function.h"
78266 +#include "basic-block.h"
78267 +#include "gimple.h"
78268 +#include "rtl.h"
78269 +#include "emit-rtl.h"
78270 +#include "tree-flow.h"
78271 +
78272 +extern void print_gimple_stmt(FILE *, gimple, int, int);
78273 +extern rtx emit_move_insn(rtx x, rtx y);
78274 +
78275 +int plugin_is_GPL_compatible;
78276 +
78277 +static struct plugin_info kernexec_plugin_info = {
78278 + .version = "201111291120",
78279 + .help = "method=[bts|or]\tinstrumentation method\n"
78280 +};
78281 +
78282 +static unsigned int execute_kernexec_reload(void);
78283 +static unsigned int execute_kernexec_fptr(void);
78284 +static unsigned int execute_kernexec_retaddr(void);
78285 +static bool kernexec_cmodel_check(void);
78286 +
78287 +static void (*kernexec_instrument_fptr)(gimple_stmt_iterator *);
78288 +static void (*kernexec_instrument_retaddr)(rtx);
78289 +
78290 +static struct gimple_opt_pass kernexec_reload_pass = {
78291 + .pass = {
78292 + .type = GIMPLE_PASS,
78293 + .name = "kernexec_reload",
78294 + .gate = kernexec_cmodel_check,
78295 + .execute = execute_kernexec_reload,
78296 + .sub = NULL,
78297 + .next = NULL,
78298 + .static_pass_number = 0,
78299 + .tv_id = TV_NONE,
78300 + .properties_required = 0,
78301 + .properties_provided = 0,
78302 + .properties_destroyed = 0,
78303 + .todo_flags_start = 0,
78304 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
78305 + }
78306 +};
78307 +
78308 +static struct gimple_opt_pass kernexec_fptr_pass = {
78309 + .pass = {
78310 + .type = GIMPLE_PASS,
78311 + .name = "kernexec_fptr",
78312 + .gate = kernexec_cmodel_check,
78313 + .execute = execute_kernexec_fptr,
78314 + .sub = NULL,
78315 + .next = NULL,
78316 + .static_pass_number = 0,
78317 + .tv_id = TV_NONE,
78318 + .properties_required = 0,
78319 + .properties_provided = 0,
78320 + .properties_destroyed = 0,
78321 + .todo_flags_start = 0,
78322 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
78323 + }
78324 +};
78325 +
78326 +static struct rtl_opt_pass kernexec_retaddr_pass = {
78327 + .pass = {
78328 + .type = RTL_PASS,
78329 + .name = "kernexec_retaddr",
78330 + .gate = kernexec_cmodel_check,
78331 + .execute = execute_kernexec_retaddr,
78332 + .sub = NULL,
78333 + .next = NULL,
78334 + .static_pass_number = 0,
78335 + .tv_id = TV_NONE,
78336 + .properties_required = 0,
78337 + .properties_provided = 0,
78338 + .properties_destroyed = 0,
78339 + .todo_flags_start = 0,
78340 + .todo_flags_finish = TODO_dump_func | TODO_ggc_collect
78341 + }
78342 +};
78343 +
78344 +static bool kernexec_cmodel_check(void)
78345 +{
78346 + tree section;
78347 +
78348 + if (ix86_cmodel != CM_KERNEL)
78349 + return false;
78350 +
78351 + section = lookup_attribute("section", DECL_ATTRIBUTES(current_function_decl));
78352 + if (!section || !TREE_VALUE(section))
78353 + return true;
78354 +
78355 + section = TREE_VALUE(TREE_VALUE(section));
78356 + if (strncmp(TREE_STRING_POINTER(section), ".vsyscall_", 10))
78357 + return true;
78358 +
78359 + return false;
78360 +}
78361 +
78362 +/*
78363 + * add special KERNEXEC instrumentation: reload %r10 after it has been clobbered
78364 + */
78365 +static void kernexec_reload_fptr_mask(gimple_stmt_iterator *gsi)
78366 +{
78367 + gimple asm_movabs_stmt;
78368 +
78369 + // build asm volatile("movabs $0x8000000000000000, %%r10\n\t" : : : );
78370 + asm_movabs_stmt = gimple_build_asm_vec("movabs $0x8000000000000000, %%r10\n\t", NULL, NULL, NULL, NULL);
78371 + gimple_asm_set_volatile(asm_movabs_stmt, true);
78372 + gsi_insert_after(gsi, asm_movabs_stmt, GSI_CONTINUE_LINKING);
78373 + update_stmt(asm_movabs_stmt);
78374 +}
78375 +
78376 +/*
78377 + * find all asm() stmts that clobber r10 and add a reload of r10
78378 + */
78379 +static unsigned int execute_kernexec_reload(void)
78380 +{
78381 + basic_block bb;
78382 +
78383 + // 1. loop through BBs and GIMPLE statements
78384 + FOR_EACH_BB(bb) {
78385 + gimple_stmt_iterator gsi;
78386 +
78387 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
78388 + // gimple match: __asm__ ("" : : : "r10");
78389 + gimple asm_stmt;
78390 + size_t nclobbers;
78391 +
78392 + // is it an asm ...
78393 + asm_stmt = gsi_stmt(gsi);
78394 + if (gimple_code(asm_stmt) != GIMPLE_ASM)
78395 + continue;
78396 +
78397 + // ... clobbering r10
78398 + nclobbers = gimple_asm_nclobbers(asm_stmt);
78399 + while (nclobbers--) {
78400 + tree op = gimple_asm_clobber_op(asm_stmt, nclobbers);
78401 + if (strcmp(TREE_STRING_POINTER(TREE_VALUE(op)), "r10"))
78402 + continue;
78403 + kernexec_reload_fptr_mask(&gsi);
78404 +//print_gimple_stmt(stderr, asm_stmt, 0, TDF_LINENO);
78405 + break;
78406 + }
78407 + }
78408 + }
78409 +
78410 + return 0;
78411 +}
78412 +
78413 +/*
78414 + * add special KERNEXEC instrumentation: force MSB of fptr to 1, which will produce
78415 + * a non-canonical address from a userland ptr and will just trigger a GPF on dereference
78416 + */
78417 +static void kernexec_instrument_fptr_bts(gimple_stmt_iterator *gsi)
78418 +{
78419 + gimple assign_intptr, assign_new_fptr, call_stmt;
78420 + tree intptr, old_fptr, new_fptr, kernexec_mask;
78421 +
78422 + call_stmt = gsi_stmt(*gsi);
78423 + old_fptr = gimple_call_fn(call_stmt);
78424 +
78425 + // create temporary unsigned long variable used for bitops and cast fptr to it
78426 + intptr = create_tmp_var(long_unsigned_type_node, "kernexec_bts");
78427 + add_referenced_var(intptr);
78428 + mark_sym_for_renaming(intptr);
78429 + assign_intptr = gimple_build_assign(intptr, fold_convert(long_unsigned_type_node, old_fptr));
78430 + gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
78431 + update_stmt(assign_intptr);
78432 +
78433 + // apply logical or to temporary unsigned long and bitmask
78434 + kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0x8000000000000000LL);
78435 +// kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0xffffffff80000000LL);
78436 + assign_intptr = gimple_build_assign(intptr, fold_build2(BIT_IOR_EXPR, long_long_unsigned_type_node, intptr, kernexec_mask));
78437 + gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
78438 + update_stmt(assign_intptr);
78439 +
78440 + // cast temporary unsigned long back to a temporary fptr variable
78441 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec");
78442 + add_referenced_var(new_fptr);
78443 + mark_sym_for_renaming(new_fptr);
78444 + assign_new_fptr = gimple_build_assign(new_fptr, fold_convert(TREE_TYPE(old_fptr), intptr));
78445 + gsi_insert_before(gsi, assign_new_fptr, GSI_SAME_STMT);
78446 + update_stmt(assign_new_fptr);
78447 +
78448 + // replace call stmt fn with the new fptr
78449 + gimple_call_set_fn(call_stmt, new_fptr);
78450 + update_stmt(call_stmt);
78451 +}
78452 +
78453 +static void kernexec_instrument_fptr_or(gimple_stmt_iterator *gsi)
78454 +{
78455 + gimple asm_or_stmt, call_stmt;
78456 + tree old_fptr, new_fptr, input, output;
78457 + VEC(tree, gc) *inputs = NULL;
78458 + VEC(tree, gc) *outputs = NULL;
78459 +
78460 + call_stmt = gsi_stmt(*gsi);
78461 + old_fptr = gimple_call_fn(call_stmt);
78462 +
78463 + // create temporary fptr variable
78464 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_or");
78465 + add_referenced_var(new_fptr);
78466 + mark_sym_for_renaming(new_fptr);
78467 +
78468 + // build asm volatile("orq %%r10, %0\n\t" : "=r"(new_fptr) : "0"(old_fptr));
78469 + input = build_tree_list(NULL_TREE, build_string(2, "0"));
78470 + input = chainon(NULL_TREE, build_tree_list(input, old_fptr));
78471 + output = build_tree_list(NULL_TREE, build_string(3, "=r"));
78472 + output = chainon(NULL_TREE, build_tree_list(output, new_fptr));
78473 + VEC_safe_push(tree, gc, inputs, input);
78474 + VEC_safe_push(tree, gc, outputs, output);
78475 + asm_or_stmt = gimple_build_asm_vec("orq %%r10, %0\n\t", inputs, outputs, NULL, NULL);
78476 + gimple_asm_set_volatile(asm_or_stmt, true);
78477 + gsi_insert_before(gsi, asm_or_stmt, GSI_SAME_STMT);
78478 + update_stmt(asm_or_stmt);
78479 +
78480 + // replace call stmt fn with the new fptr
78481 + gimple_call_set_fn(call_stmt, new_fptr);
78482 + update_stmt(call_stmt);
78483 +}
78484 +
78485 +/*
78486 + * find all C level function pointer dereferences and forcibly set the highest bit of the pointer
78487 + */
78488 +static unsigned int execute_kernexec_fptr(void)
78489 +{
78490 + basic_block bb;
78491 +
78492 + // 1. loop through BBs and GIMPLE statements
78493 + FOR_EACH_BB(bb) {
78494 + gimple_stmt_iterator gsi;
78495 +
78496 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
78497 + // gimple match: h_1 = get_fptr (); D.2709_3 = h_1 (x_2(D));
78498 + tree fn;
78499 + gimple call_stmt;
78500 +
78501 + // is it a call ...
78502 + call_stmt = gsi_stmt(gsi);
78503 + if (!is_gimple_call(call_stmt))
78504 + continue;
78505 + fn = gimple_call_fn(call_stmt);
78506 + if (TREE_CODE(fn) == ADDR_EXPR)
78507 + continue;
78508 + if (TREE_CODE(fn) != SSA_NAME)
78509 + gcc_unreachable();
78510 +
78511 + // ... through a function pointer
78512 + fn = SSA_NAME_VAR(fn);
78513 + if (TREE_CODE(fn) != VAR_DECL && TREE_CODE(fn) != PARM_DECL)
78514 + continue;
78515 + fn = TREE_TYPE(fn);
78516 + if (TREE_CODE(fn) != POINTER_TYPE)
78517 + continue;
78518 + fn = TREE_TYPE(fn);
78519 + if (TREE_CODE(fn) != FUNCTION_TYPE)
78520 + continue;
78521 +
78522 + kernexec_instrument_fptr(&gsi);
78523 +
78524 +//debug_tree(gimple_call_fn(call_stmt));
78525 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
78526 + }
78527 + }
78528 +
78529 + return 0;
78530 +}
78531 +
78532 +// add special KERNEXEC instrumentation: btsq $63,(%rsp) just before retn
78533 +static void kernexec_instrument_retaddr_bts(rtx insn)
78534 +{
78535 + rtx btsq;
78536 + rtvec argvec, constraintvec, labelvec;
78537 + int line;
78538 +
78539 + // create asm volatile("btsq $63,(%%rsp)":::)
78540 + argvec = rtvec_alloc(0);
78541 + constraintvec = rtvec_alloc(0);
78542 + labelvec = rtvec_alloc(0);
78543 + line = expand_location(RTL_LOCATION(insn)).line;
78544 + btsq = gen_rtx_ASM_OPERANDS(VOIDmode, "btsq $63,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
78545 + MEM_VOLATILE_P(btsq) = 1;
78546 +// RTX_FRAME_RELATED_P(btsq) = 1; // not for ASM_OPERANDS
78547 + emit_insn_before(btsq, insn);
78548 +}
78549 +
78550 +// add special KERNEXEC instrumentation: orq %r10,(%rsp) just before retn
78551 +static void kernexec_instrument_retaddr_or(rtx insn)
78552 +{
78553 + rtx orq;
78554 + rtvec argvec, constraintvec, labelvec;
78555 + int line;
78556 +
78557 + // create asm volatile("orq %%r10,(%%rsp)":::)
78558 + argvec = rtvec_alloc(0);
78559 + constraintvec = rtvec_alloc(0);
78560 + labelvec = rtvec_alloc(0);
78561 + line = expand_location(RTL_LOCATION(insn)).line;
78562 + orq = gen_rtx_ASM_OPERANDS(VOIDmode, "orq %%r10,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
78563 + MEM_VOLATILE_P(orq) = 1;
78564 +// RTX_FRAME_RELATED_P(orq) = 1; // not for ASM_OPERANDS
78565 + emit_insn_before(orq, insn);
78566 +}
78567 +
78568 +/*
78569 + * find all asm level function returns and forcibly set the highest bit of the return address
78570 + */
78571 +static unsigned int execute_kernexec_retaddr(void)
78572 +{
78573 + rtx insn;
78574 +
78575 + // 1. find function returns
78576 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
78577 + // rtl match: (jump_insn 41 40 42 2 (return) fptr.c:42 634 {return_internal} (nil))
78578 + // (jump_insn 12 9 11 2 (parallel [ (return) (unspec [ (0) ] UNSPEC_REP) ]) fptr.c:46 635 {return_internal_long} (nil))
78579 + rtx body;
78580 +
78581 + // is it a retn
78582 + if (!JUMP_P(insn))
78583 + continue;
78584 + body = PATTERN(insn);
78585 + if (GET_CODE(body) == PARALLEL)
78586 + body = XVECEXP(body, 0, 0);
78587 + if (GET_CODE(body) != RETURN)
78588 + continue;
78589 + kernexec_instrument_retaddr(insn);
78590 + }
78591 +
78592 +// print_simple_rtl(stderr, get_insns());
78593 +// print_rtl(stderr, get_insns());
78594 +
78595 + return 0;
78596 +}
78597 +
78598 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
78599 +{
78600 + const char * const plugin_name = plugin_info->base_name;
78601 + const int argc = plugin_info->argc;
78602 + const struct plugin_argument * const argv = plugin_info->argv;
78603 + int i;
78604 + struct register_pass_info kernexec_reload_pass_info = {
78605 + .pass = &kernexec_reload_pass.pass,
78606 + .reference_pass_name = "ssa",
78607 + .ref_pass_instance_number = 0,
78608 + .pos_op = PASS_POS_INSERT_AFTER
78609 + };
78610 + struct register_pass_info kernexec_fptr_pass_info = {
78611 + .pass = &kernexec_fptr_pass.pass,
78612 + .reference_pass_name = "ssa",
78613 + .ref_pass_instance_number = 0,
78614 + .pos_op = PASS_POS_INSERT_AFTER
78615 + };
78616 + struct register_pass_info kernexec_retaddr_pass_info = {
78617 + .pass = &kernexec_retaddr_pass.pass,
78618 + .reference_pass_name = "pro_and_epilogue",
78619 + .ref_pass_instance_number = 0,
78620 + .pos_op = PASS_POS_INSERT_AFTER
78621 + };
78622 +
78623 + if (!plugin_default_version_check(version, &gcc_version)) {
78624 + error(G_("incompatible gcc/plugin versions"));
78625 + return 1;
78626 + }
78627 +
78628 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kernexec_plugin_info);
78629 +
78630 + if (TARGET_64BIT == 0)
78631 + return 0;
78632 +
78633 + for (i = 0; i < argc; ++i) {
78634 + if (!strcmp(argv[i].key, "method")) {
78635 + if (!argv[i].value) {
78636 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
78637 + continue;
78638 + }
78639 + if (!strcmp(argv[i].value, "bts")) {
78640 + kernexec_instrument_fptr = kernexec_instrument_fptr_bts;
78641 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_bts;
78642 + } else if (!strcmp(argv[i].value, "or")) {
78643 + kernexec_instrument_fptr = kernexec_instrument_fptr_or;
78644 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_or;
78645 + fix_register("r10", 1, 1);
78646 + } else
78647 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
78648 + continue;
78649 + }
78650 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
78651 + }
78652 + if (!kernexec_instrument_fptr || !kernexec_instrument_retaddr)
78653 + error(G_("no instrumentation method was selected via '-fplugin-arg-%s-method'"), plugin_name);
78654 +
78655 + if (kernexec_instrument_fptr == kernexec_instrument_fptr_or)
78656 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_reload_pass_info);
78657 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_fptr_pass_info);
78658 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_retaddr_pass_info);
78659 +
78660 + return 0;
78661 +}
78662 diff --git a/tools/gcc/stackleak_plugin.c b/tools/gcc/stackleak_plugin.c
78663 new file mode 100644
78664 index 0000000..8b61031
78665 --- /dev/null
78666 +++ b/tools/gcc/stackleak_plugin.c
78667 @@ -0,0 +1,295 @@
78668 +/*
78669 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
78670 + * Licensed under the GPL v2
78671 + *
78672 + * Note: the choice of the license means that the compilation process is
78673 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
78674 + * but for the kernel it doesn't matter since it doesn't link against
78675 + * any of the gcc libraries
78676 + *
78677 + * gcc plugin to help implement various PaX features
78678 + *
78679 + * - track lowest stack pointer
78680 + *
78681 + * TODO:
78682 + * - initialize all local variables
78683 + *
78684 + * BUGS:
78685 + * - none known
78686 + */
78687 +#include "gcc-plugin.h"
78688 +#include "config.h"
78689 +#include "system.h"
78690 +#include "coretypes.h"
78691 +#include "tree.h"
78692 +#include "tree-pass.h"
78693 +#include "flags.h"
78694 +#include "intl.h"
78695 +#include "toplev.h"
78696 +#include "plugin.h"
78697 +//#include "expr.h" where are you...
78698 +#include "diagnostic.h"
78699 +#include "plugin-version.h"
78700 +#include "tm.h"
78701 +#include "function.h"
78702 +#include "basic-block.h"
78703 +#include "gimple.h"
78704 +#include "rtl.h"
78705 +#include "emit-rtl.h"
78706 +
78707 +extern void print_gimple_stmt(FILE *, gimple, int, int);
78708 +
78709 +int plugin_is_GPL_compatible;
78710 +
78711 +static int track_frame_size = -1;
78712 +static const char track_function[] = "pax_track_stack";
78713 +static const char check_function[] = "pax_check_alloca";
78714 +static bool init_locals;
78715 +
78716 +static struct plugin_info stackleak_plugin_info = {
78717 + .version = "201111150100",
78718 + .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
78719 +// "initialize-locals\t\tforcibly initialize all stack frames\n"
78720 +};
78721 +
78722 +static bool gate_stackleak_track_stack(void);
78723 +static unsigned int execute_stackleak_tree_instrument(void);
78724 +static unsigned int execute_stackleak_final(void);
78725 +
78726 +static struct gimple_opt_pass stackleak_tree_instrument_pass = {
78727 + .pass = {
78728 + .type = GIMPLE_PASS,
78729 + .name = "stackleak_tree_instrument",
78730 + .gate = gate_stackleak_track_stack,
78731 + .execute = execute_stackleak_tree_instrument,
78732 + .sub = NULL,
78733 + .next = NULL,
78734 + .static_pass_number = 0,
78735 + .tv_id = TV_NONE,
78736 + .properties_required = PROP_gimple_leh | PROP_cfg,
78737 + .properties_provided = 0,
78738 + .properties_destroyed = 0,
78739 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
78740 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_update_ssa
78741 + }
78742 +};
78743 +
78744 +static struct rtl_opt_pass stackleak_final_rtl_opt_pass = {
78745 + .pass = {
78746 + .type = RTL_PASS,
78747 + .name = "stackleak_final",
78748 + .gate = gate_stackleak_track_stack,
78749 + .execute = execute_stackleak_final,
78750 + .sub = NULL,
78751 + .next = NULL,
78752 + .static_pass_number = 0,
78753 + .tv_id = TV_NONE,
78754 + .properties_required = 0,
78755 + .properties_provided = 0,
78756 + .properties_destroyed = 0,
78757 + .todo_flags_start = 0,
78758 + .todo_flags_finish = TODO_dump_func
78759 + }
78760 +};
78761 +
78762 +static bool gate_stackleak_track_stack(void)
78763 +{
78764 + return track_frame_size >= 0;
78765 +}
78766 +
78767 +static void stackleak_check_alloca(gimple_stmt_iterator *gsi)
78768 +{
78769 + gimple check_alloca;
78770 + tree fndecl, fntype, alloca_size;
78771 +
78772 + // insert call to void pax_check_alloca(unsigned long size)
78773 + fntype = build_function_type_list(void_type_node, long_unsigned_type_node, NULL_TREE);
78774 + fndecl = build_fn_decl(check_function, fntype);
78775 + DECL_ASSEMBLER_NAME(fndecl); // for LTO
78776 + alloca_size = gimple_call_arg(gsi_stmt(*gsi), 0);
78777 + check_alloca = gimple_build_call(fndecl, 1, alloca_size);
78778 + gsi_insert_before(gsi, check_alloca, GSI_SAME_STMT);
78779 +}
78780 +
78781 +static void stackleak_add_instrumentation(gimple_stmt_iterator *gsi)
78782 +{
78783 + gimple track_stack;
78784 + tree fndecl, fntype;
78785 +
78786 + // insert call to void pax_track_stack(void)
78787 + fntype = build_function_type_list(void_type_node, NULL_TREE);
78788 + fndecl = build_fn_decl(track_function, fntype);
78789 + DECL_ASSEMBLER_NAME(fndecl); // for LTO
78790 + track_stack = gimple_build_call(fndecl, 0);
78791 + gsi_insert_after(gsi, track_stack, GSI_CONTINUE_LINKING);
78792 +}
78793 +
78794 +#if BUILDING_GCC_VERSION == 4005
78795 +static bool gimple_call_builtin_p(gimple stmt, enum built_in_function code)
78796 +{
78797 + tree fndecl;
78798 +
78799 + if (!is_gimple_call(stmt))
78800 + return false;
78801 + fndecl = gimple_call_fndecl(stmt);
78802 + if (!fndecl)
78803 + return false;
78804 + if (DECL_BUILT_IN_CLASS(fndecl) != BUILT_IN_NORMAL)
78805 + return false;
78806 +// print_node(stderr, "pax", fndecl, 4);
78807 + return DECL_FUNCTION_CODE(fndecl) == code;
78808 +}
78809 +#endif
78810 +
78811 +static bool is_alloca(gimple stmt)
78812 +{
78813 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA))
78814 + return true;
78815 +
78816 +#if BUILDING_GCC_VERSION >= 4007
78817 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA_WITH_ALIGN))
78818 + return true;
78819 +#endif
78820 +
78821 + return false;
78822 +}
78823 +
78824 +static unsigned int execute_stackleak_tree_instrument(void)
78825 +{
78826 + basic_block bb, entry_bb;
78827 + bool prologue_instrumented = false;
78828 +
78829 + entry_bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb;
78830 +
78831 + // 1. loop through BBs and GIMPLE statements
78832 + FOR_EACH_BB(bb) {
78833 + gimple_stmt_iterator gsi;
78834 +
78835 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
78836 + // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
78837 + if (!is_alloca(gsi_stmt(gsi)))
78838 + continue;
78839 +
78840 + // 2. insert stack overflow check before each __builtin_alloca call
78841 + stackleak_check_alloca(&gsi);
78842 +
78843 + // 3. insert track call after each __builtin_alloca call
78844 + stackleak_add_instrumentation(&gsi);
78845 + if (bb == entry_bb)
78846 + prologue_instrumented = true;
78847 + }
78848 + }
78849 +
78850 + // 4. insert track call at the beginning
78851 + if (!prologue_instrumented) {
78852 + gimple_stmt_iterator gsi;
78853 +
78854 + bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
78855 + if (dom_info_available_p(CDI_DOMINATORS))
78856 + set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR);
78857 + gsi = gsi_start_bb(bb);
78858 + stackleak_add_instrumentation(&gsi);
78859 + }
78860 +
78861 + return 0;
78862 +}
78863 +
78864 +static unsigned int execute_stackleak_final(void)
78865 +{
78866 + rtx insn;
78867 +
78868 + if (cfun->calls_alloca)
78869 + return 0;
78870 +
78871 + // keep calls only if function frame is big enough
78872 + if (get_frame_size() >= track_frame_size)
78873 + return 0;
78874 +
78875 + // 1. find pax_track_stack calls
78876 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
78877 + // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
78878 + rtx body;
78879 +
78880 + if (!CALL_P(insn))
78881 + continue;
78882 + body = PATTERN(insn);
78883 + if (GET_CODE(body) != CALL)
78884 + continue;
78885 + body = XEXP(body, 0);
78886 + if (GET_CODE(body) != MEM)
78887 + continue;
78888 + body = XEXP(body, 0);
78889 + if (GET_CODE(body) != SYMBOL_REF)
78890 + continue;
78891 + if (strcmp(XSTR(body, 0), track_function))
78892 + continue;
78893 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
78894 + // 2. delete call
78895 + insn = delete_insn_and_edges(insn);
78896 +#if BUILDING_GCC_VERSION >= 4007
78897 + if (GET_CODE(insn) == NOTE && NOTE_KIND(insn) == NOTE_INSN_CALL_ARG_LOCATION)
78898 + insn = delete_insn_and_edges(insn);
78899 +#endif
78900 + }
78901 +
78902 +// print_simple_rtl(stderr, get_insns());
78903 +// print_rtl(stderr, get_insns());
78904 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
78905 +
78906 + return 0;
78907 +}
78908 +
78909 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
78910 +{
78911 + const char * const plugin_name = plugin_info->base_name;
78912 + const int argc = plugin_info->argc;
78913 + const struct plugin_argument * const argv = plugin_info->argv;
78914 + int i;
78915 + struct register_pass_info stackleak_tree_instrument_pass_info = {
78916 + .pass = &stackleak_tree_instrument_pass.pass,
78917 +// .reference_pass_name = "tree_profile",
78918 + .reference_pass_name = "optimized",
78919 + .ref_pass_instance_number = 0,
78920 + .pos_op = PASS_POS_INSERT_AFTER
78921 + };
78922 + struct register_pass_info stackleak_final_pass_info = {
78923 + .pass = &stackleak_final_rtl_opt_pass.pass,
78924 + .reference_pass_name = "final",
78925 + .ref_pass_instance_number = 0,
78926 + .pos_op = PASS_POS_INSERT_BEFORE
78927 + };
78928 +
78929 + if (!plugin_default_version_check(version, &gcc_version)) {
78930 + error(G_("incompatible gcc/plugin versions"));
78931 + return 1;
78932 + }
78933 +
78934 + register_callback(plugin_name, PLUGIN_INFO, NULL, &stackleak_plugin_info);
78935 +
78936 + for (i = 0; i < argc; ++i) {
78937 + if (!strcmp(argv[i].key, "track-lowest-sp")) {
78938 + if (!argv[i].value) {
78939 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
78940 + continue;
78941 + }
78942 + track_frame_size = atoi(argv[i].value);
78943 + if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
78944 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
78945 + continue;
78946 + }
78947 + if (!strcmp(argv[i].key, "initialize-locals")) {
78948 + if (argv[i].value) {
78949 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
78950 + continue;
78951 + }
78952 + init_locals = true;
78953 + continue;
78954 + }
78955 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
78956 + }
78957 +
78958 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info);
78959 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info);
78960 +
78961 + return 0;
78962 +}
78963 diff --git a/tools/perf/util/include/asm/alternative-asm.h b/tools/perf/util/include/asm/alternative-asm.h
78964 index 6789d78..4afd019 100644
78965 --- a/tools/perf/util/include/asm/alternative-asm.h
78966 +++ b/tools/perf/util/include/asm/alternative-asm.h
78967 @@ -5,4 +5,7 @@
78968
78969 #define altinstruction_entry #
78970
78971 + .macro pax_force_retaddr rip=0, reload=0
78972 + .endm
78973 +
78974 #endif
78975 diff --git a/usr/gen_init_cpio.c b/usr/gen_init_cpio.c
78976 index af0f22f..9a7d479 100644
78977 --- a/usr/gen_init_cpio.c
78978 +++ b/usr/gen_init_cpio.c
78979 @@ -303,7 +303,7 @@ static int cpio_mkfile(const char *name, const char *location,
78980 int retval;
78981 int rc = -1;
78982 int namesize;
78983 - int i;
78984 + unsigned int i;
78985
78986 mode |= S_IFREG;
78987
78988 @@ -392,9 +392,10 @@ static char *cpio_replace_env(char *new_location)
78989 *env_var = *expanded = '\0';
78990 strncat(env_var, start + 2, end - start - 2);
78991 strncat(expanded, new_location, start - new_location);
78992 - strncat(expanded, getenv(env_var), PATH_MAX);
78993 - strncat(expanded, end + 1, PATH_MAX);
78994 + strncat(expanded, getenv(env_var), PATH_MAX - strlen(expanded));
78995 + strncat(expanded, end + 1, PATH_MAX - strlen(expanded));
78996 strncpy(new_location, expanded, PATH_MAX);
78997 + new_location[PATH_MAX] = 0;
78998 } else
78999 break;
79000 }
79001 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
79002 index d9cfb78..4f27c10 100644
79003 --- a/virt/kvm/kvm_main.c
79004 +++ b/virt/kvm/kvm_main.c
79005 @@ -75,7 +75,7 @@ LIST_HEAD(vm_list);
79006
79007 static cpumask_var_t cpus_hardware_enabled;
79008 static int kvm_usage_count = 0;
79009 -static atomic_t hardware_enable_failed;
79010 +static atomic_unchecked_t hardware_enable_failed;
79011
79012 struct kmem_cache *kvm_vcpu_cache;
79013 EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
79014 @@ -2268,7 +2268,7 @@ static void hardware_enable_nolock(void *junk)
79015
79016 if (r) {
79017 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
79018 - atomic_inc(&hardware_enable_failed);
79019 + atomic_inc_unchecked(&hardware_enable_failed);
79020 printk(KERN_INFO "kvm: enabling virtualization on "
79021 "CPU%d failed\n", cpu);
79022 }
79023 @@ -2322,10 +2322,10 @@ static int hardware_enable_all(void)
79024
79025 kvm_usage_count++;
79026 if (kvm_usage_count == 1) {
79027 - atomic_set(&hardware_enable_failed, 0);
79028 + atomic_set_unchecked(&hardware_enable_failed, 0);
79029 on_each_cpu(hardware_enable_nolock, NULL, 1);
79030
79031 - if (atomic_read(&hardware_enable_failed)) {
79032 + if (atomic_read_unchecked(&hardware_enable_failed)) {
79033 hardware_disable_all_nolock();
79034 r = -EBUSY;
79035 }
79036 @@ -2676,7 +2676,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
79037 kvm_arch_vcpu_put(vcpu);
79038 }
79039
79040 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
79041 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
79042 struct module *module)
79043 {
79044 int r;
79045 @@ -2739,7 +2739,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
79046 if (!vcpu_align)
79047 vcpu_align = __alignof__(struct kvm_vcpu);
79048 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
79049 - 0, NULL);
79050 + SLAB_USERCOPY, NULL);
79051 if (!kvm_vcpu_cache) {
79052 r = -ENOMEM;
79053 goto out_free_3;
79054 @@ -2749,9 +2749,11 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
79055 if (r)
79056 goto out_free;
79057
79058 - kvm_chardev_ops.owner = module;
79059 - kvm_vm_fops.owner = module;
79060 - kvm_vcpu_fops.owner = module;
79061 + pax_open_kernel();
79062 + *(void **)&kvm_chardev_ops.owner = module;
79063 + *(void **)&kvm_vm_fops.owner = module;
79064 + *(void **)&kvm_vcpu_fops.owner = module;
79065 + pax_close_kernel();
79066
79067 r = misc_register(&kvm_dev);
79068 if (r) {